xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-ssa-dom.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /* SSA Dominator optimizations for trees
2    Copyright (C) 2001-2015 Free Software Foundation, Inc.
3    Contributed by Diego Novillo <dnovillo@redhat.com>
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11 
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 GNU General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "hash-table.h"
25 #include "tm.h"
26 #include "hash-set.h"
27 #include "machmode.h"
28 #include "vec.h"
29 #include "double-int.h"
30 #include "input.h"
31 #include "alias.h"
32 #include "symtab.h"
33 #include "wide-int.h"
34 #include "inchash.h"
35 #include "real.h"
36 #include "tree.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "flags.h"
40 #include "tm_p.h"
41 #include "predict.h"
42 #include "hard-reg-set.h"
43 #include "input.h"
44 #include "function.h"
45 #include "dominance.h"
46 #include "cfg.h"
47 #include "cfganal.h"
48 #include "basic-block.h"
49 #include "cfgloop.h"
50 #include "inchash.h"
51 #include "gimple-pretty-print.h"
52 #include "tree-ssa-alias.h"
53 #include "internal-fn.h"
54 #include "gimple-fold.h"
55 #include "tree-eh.h"
56 #include "gimple-expr.h"
57 #include "is-a.h"
58 #include "gimple.h"
59 #include "gimple-iterator.h"
60 #include "gimple-ssa.h"
61 #include "tree-cfg.h"
62 #include "tree-phinodes.h"
63 #include "ssa-iterators.h"
64 #include "stringpool.h"
65 #include "tree-ssanames.h"
66 #include "tree-into-ssa.h"
67 #include "domwalk.h"
68 #include "tree-pass.h"
69 #include "tree-ssa-propagate.h"
70 #include "tree-ssa-threadupdate.h"
71 #include "langhooks.h"
72 #include "params.h"
73 #include "tree-ssa-threadedge.h"
74 #include "tree-ssa-dom.h"
75 #include "inchash.h"
76 #include "gimplify.h"
77 #include "tree-cfgcleanup.h"
78 
79 /* This file implements optimizations on the dominator tree.  */
80 
81 /* Representation of a "naked" right-hand-side expression, to be used
82    in recording available expressions in the expression hash table.  */
83 
84 enum expr_kind
85 {
86   EXPR_SINGLE,
87   EXPR_UNARY,
88   EXPR_BINARY,
89   EXPR_TERNARY,
90   EXPR_CALL,
91   EXPR_PHI
92 };
93 
94 struct hashable_expr
95 {
96   tree type;
97   enum expr_kind kind;
98   union {
99     struct { tree rhs; } single;
100     struct { enum tree_code op;  tree opnd; } unary;
101     struct { enum tree_code op;  tree opnd0, opnd1; } binary;
102     struct { enum tree_code op;  tree opnd0, opnd1, opnd2; } ternary;
103     struct { gcall *fn_from; bool pure; size_t nargs; tree *args; } call;
104     struct { size_t nargs; tree *args; } phi;
105   } ops;
106 };
107 
108 /* Structure for recording known values of a conditional expression
109    at the exits from its block.  */
110 
111 typedef struct cond_equivalence_s
112 {
113   struct hashable_expr cond;
114   tree value;
115 } cond_equivalence;
116 
117 
118 /* Structure for recording edge equivalences as well as any pending
119    edge redirections during the dominator optimizer.
120 
121    Computing and storing the edge equivalences instead of creating
122    them on-demand can save significant amounts of time, particularly
123    for pathological cases involving switch statements.
124 
125    These structures live for a single iteration of the dominator
126    optimizer in the edge's AUX field.  At the end of an iteration we
127    free each of these structures and update the AUX field to point
128    to any requested redirection target (the code for updating the
129    CFG and SSA graph for edge redirection expects redirection edge
130    targets to be in the AUX field for each edge.  */
131 
132 struct edge_info
133 {
134   /* If this edge creates a simple equivalence, the LHS and RHS of
135      the equivalence will be stored here.  */
136   tree lhs;
137   tree rhs;
138 
139   /* Traversing an edge may also indicate one or more particular conditions
140      are true or false.  */
141   vec<cond_equivalence> cond_equivalences;
142 };
143 
144 /* Stack of available expressions in AVAIL_EXPRs.  Each block pushes any
145    expressions it enters into the hash table along with a marker entry
146    (null).  When we finish processing the block, we pop off entries and
147    remove the expressions from the global hash table until we hit the
148    marker.  */
149 typedef struct expr_hash_elt * expr_hash_elt_t;
150 
151 static vec<std::pair<expr_hash_elt_t, expr_hash_elt_t> > avail_exprs_stack;
152 
153 /* Structure for entries in the expression hash table.  */
154 
155 struct expr_hash_elt
156 {
157   /* The value (lhs) of this expression.  */
158   tree lhs;
159 
160   /* The expression (rhs) we want to record.  */
161   struct hashable_expr expr;
162 
163   /* The virtual operand associated with the nearest dominating stmt
164      loading from or storing to expr.  */
165   tree vop;
166 
167   /* The hash value for RHS.  */
168   hashval_t hash;
169 
170   /* A unique stamp, typically the address of the hash
171      element itself, used in removing entries from the table.  */
172   struct expr_hash_elt *stamp;
173 };
174 
175 /* Hashtable helpers.  */
176 
177 static bool hashable_expr_equal_p (const struct hashable_expr *,
178 				   const struct hashable_expr *);
179 static void free_expr_hash_elt (void *);
180 
181 struct expr_elt_hasher
182 {
183   typedef expr_hash_elt *value_type;
184   typedef expr_hash_elt *compare_type;
185   typedef int store_values_directly;
186   static inline hashval_t hash (const value_type &);
187   static inline bool equal (const value_type &, const compare_type &);
188   static inline void remove (value_type &);
189 };
190 
191 inline hashval_t
192 expr_elt_hasher::hash (const value_type &p)
193 {
194   return p->hash;
195 }
196 
197 inline bool
198 expr_elt_hasher::equal (const value_type &p1, const compare_type &p2)
199 {
200   const struct hashable_expr *expr1 = &p1->expr;
201   const struct expr_hash_elt *stamp1 = p1->stamp;
202   const struct hashable_expr *expr2 = &p2->expr;
203   const struct expr_hash_elt *stamp2 = p2->stamp;
204 
205   /* This case should apply only when removing entries from the table.  */
206   if (stamp1 == stamp2)
207     return true;
208 
209   if (p1->hash != p2->hash)
210     return false;
211 
212   /* In case of a collision, both RHS have to be identical and have the
213      same VUSE operands.  */
214   if (hashable_expr_equal_p (expr1, expr2)
215       && types_compatible_p (expr1->type, expr2->type))
216     return true;
217 
218   return false;
219 }
220 
221 /* Delete an expr_hash_elt and reclaim its storage.  */
222 
223 inline void
224 expr_elt_hasher::remove (value_type &element)
225 {
226   free_expr_hash_elt (element);
227 }
228 
229 /* Hash table with expressions made available during the renaming process.
230    When an assignment of the form X_i = EXPR is found, the statement is
231    stored in this table.  If the same expression EXPR is later found on the
232    RHS of another statement, it is replaced with X_i (thus performing
233    global redundancy elimination).  Similarly as we pass through conditionals
234    we record the conditional itself as having either a true or false value
235    in this table.  */
236 static hash_table<expr_elt_hasher> *avail_exprs;
237 
238 /* Stack of dest,src pairs that need to be restored during finalization.
239 
240    A NULL entry is used to mark the end of pairs which need to be
241    restored during finalization of this block.  */
242 static vec<tree> const_and_copies_stack;
243 
244 /* Track whether or not we have changed the control flow graph.  */
245 static bool cfg_altered;
246 
247 /* Bitmap of blocks that have had EH statements cleaned.  We should
248    remove their dead edges eventually.  */
249 static bitmap need_eh_cleanup;
250 static vec<gimple> need_noreturn_fixup;
251 
252 /* Statistics for dominator optimizations.  */
253 struct opt_stats_d
254 {
255   long num_stmts;
256   long num_exprs_considered;
257   long num_re;
258   long num_const_prop;
259   long num_copy_prop;
260 };
261 
262 static struct opt_stats_d opt_stats;
263 
264 /* Local functions.  */
265 static void optimize_stmt (basic_block, gimple_stmt_iterator);
266 static tree lookup_avail_expr (gimple, bool, bool = true);
267 static hashval_t avail_expr_hash (const void *);
268 static void htab_statistics (FILE *,
269 			     const hash_table<expr_elt_hasher> &);
270 static void record_cond (cond_equivalence *);
271 static void record_const_or_copy (tree, tree);
272 static void record_equality (tree, tree);
273 static void record_equivalences_from_phis (basic_block);
274 static void record_equivalences_from_incoming_edge (basic_block);
275 static void eliminate_redundant_computations (gimple_stmt_iterator *);
276 static void record_equivalences_from_stmt (gimple, int);
277 static void remove_local_expressions_from_table (void);
278 static void restore_vars_to_original_value (void);
279 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
280 
281 
282 /* Given a statement STMT, initialize the hash table element pointed to
283    by ELEMENT.  */
284 
285 static void
286 initialize_hash_element (gimple stmt, tree lhs,
287                          struct expr_hash_elt *element)
288 {
289   enum gimple_code code = gimple_code (stmt);
290   struct hashable_expr *expr = &element->expr;
291 
292   if (code == GIMPLE_ASSIGN)
293     {
294       enum tree_code subcode = gimple_assign_rhs_code (stmt);
295 
296       switch (get_gimple_rhs_class (subcode))
297         {
298         case GIMPLE_SINGLE_RHS:
299 	  expr->kind = EXPR_SINGLE;
300 	  expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
301 	  expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
302 	  break;
303         case GIMPLE_UNARY_RHS:
304 	  expr->kind = EXPR_UNARY;
305 	  expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
306 	  if (CONVERT_EXPR_CODE_P (subcode))
307 	    subcode = NOP_EXPR;
308 	  expr->ops.unary.op = subcode;
309 	  expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
310 	  break;
311         case GIMPLE_BINARY_RHS:
312 	  expr->kind = EXPR_BINARY;
313 	  expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
314 	  expr->ops.binary.op = subcode;
315 	  expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
316 	  expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
317 	  break;
318         case GIMPLE_TERNARY_RHS:
319 	  expr->kind = EXPR_TERNARY;
320 	  expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
321 	  expr->ops.ternary.op = subcode;
322 	  expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
323 	  expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
324 	  expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
325 	  break;
326         default:
327           gcc_unreachable ();
328         }
329     }
330   else if (code == GIMPLE_COND)
331     {
332       expr->type = boolean_type_node;
333       expr->kind = EXPR_BINARY;
334       expr->ops.binary.op = gimple_cond_code (stmt);
335       expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
336       expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
337     }
338   else if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
339     {
340       size_t nargs = gimple_call_num_args (call_stmt);
341       size_t i;
342 
343       gcc_assert (gimple_call_lhs (call_stmt));
344 
345       expr->type = TREE_TYPE (gimple_call_lhs (call_stmt));
346       expr->kind = EXPR_CALL;
347       expr->ops.call.fn_from = call_stmt;
348 
349       if (gimple_call_flags (call_stmt) & (ECF_CONST | ECF_PURE))
350         expr->ops.call.pure = true;
351       else
352         expr->ops.call.pure = false;
353 
354       expr->ops.call.nargs = nargs;
355       expr->ops.call.args = XCNEWVEC (tree, nargs);
356       for (i = 0; i < nargs; i++)
357         expr->ops.call.args[i] = gimple_call_arg (call_stmt, i);
358     }
359   else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
360     {
361       expr->type = TREE_TYPE (gimple_switch_index (swtch_stmt));
362       expr->kind = EXPR_SINGLE;
363       expr->ops.single.rhs = gimple_switch_index (swtch_stmt);
364     }
365   else if (code == GIMPLE_GOTO)
366     {
367       expr->type = TREE_TYPE (gimple_goto_dest (stmt));
368       expr->kind = EXPR_SINGLE;
369       expr->ops.single.rhs = gimple_goto_dest (stmt);
370     }
371   else if (code == GIMPLE_PHI)
372     {
373       size_t nargs = gimple_phi_num_args (stmt);
374       size_t i;
375 
376       expr->type = TREE_TYPE (gimple_phi_result (stmt));
377       expr->kind = EXPR_PHI;
378       expr->ops.phi.nargs = nargs;
379       expr->ops.phi.args = XCNEWVEC (tree, nargs);
380 
381       for (i = 0; i < nargs; i++)
382         expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
383     }
384   else
385     gcc_unreachable ();
386 
387   element->lhs = lhs;
388   element->vop = gimple_vuse (stmt);
389   element->hash = avail_expr_hash (element);
390   element->stamp = element;
391 }
392 
393 /* Given a conditional expression COND as a tree, initialize
394    a hashable_expr expression EXPR.  The conditional must be a
395    comparison or logical negation.  A constant or a variable is
396    not permitted.  */
397 
398 static void
399 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
400 {
401   expr->type = boolean_type_node;
402 
403   if (COMPARISON_CLASS_P (cond))
404     {
405       expr->kind = EXPR_BINARY;
406       expr->ops.binary.op = TREE_CODE (cond);
407       expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
408       expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
409     }
410   else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
411     {
412       expr->kind = EXPR_UNARY;
413       expr->ops.unary.op = TRUTH_NOT_EXPR;
414       expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
415     }
416   else
417     gcc_unreachable ();
418 }
419 
420 /* Given a hashable_expr expression EXPR and an LHS,
421    initialize the hash table element pointed to by ELEMENT.  */
422 
423 static void
424 initialize_hash_element_from_expr (struct hashable_expr *expr,
425                                    tree lhs,
426                                    struct expr_hash_elt *element)
427 {
428   element->expr = *expr;
429   element->lhs = lhs;
430   element->vop = NULL_TREE;
431   element->hash = avail_expr_hash (element);
432   element->stamp = element;
433 }
434 
435 /* Compare two hashable_expr structures for equivalence.
436    They are considered equivalent when the the expressions
437    they denote must necessarily be equal.  The logic is intended
438    to follow that of operand_equal_p in fold-const.c  */
439 
440 static bool
441 hashable_expr_equal_p (const struct hashable_expr *expr0,
442                         const struct hashable_expr *expr1)
443 {
444   tree type0 = expr0->type;
445   tree type1 = expr1->type;
446 
447   /* If either type is NULL, there is nothing to check.  */
448   if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
449     return false;
450 
451   /* If both types don't have the same signedness, precision, and mode,
452      then we can't consider  them equal.  */
453   if (type0 != type1
454       && (TREE_CODE (type0) == ERROR_MARK
455 	  || TREE_CODE (type1) == ERROR_MARK
456 	  || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
457 	  || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
458 	  || TYPE_MODE (type0) != TYPE_MODE (type1)))
459     return false;
460 
461   if (expr0->kind != expr1->kind)
462     return false;
463 
464   switch (expr0->kind)
465     {
466     case EXPR_SINGLE:
467       return operand_equal_p (expr0->ops.single.rhs,
468                               expr1->ops.single.rhs, 0);
469 
470     case EXPR_UNARY:
471       if (expr0->ops.unary.op != expr1->ops.unary.op)
472         return false;
473 
474       if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
475            || expr0->ops.unary.op == NON_LVALUE_EXPR)
476           && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
477         return false;
478 
479       return operand_equal_p (expr0->ops.unary.opnd,
480                               expr1->ops.unary.opnd, 0);
481 
482     case EXPR_BINARY:
483       if (expr0->ops.binary.op != expr1->ops.binary.op)
484 	return false;
485 
486       if (operand_equal_p (expr0->ops.binary.opnd0,
487 			   expr1->ops.binary.opnd0, 0)
488 	  && operand_equal_p (expr0->ops.binary.opnd1,
489 			      expr1->ops.binary.opnd1, 0))
490 	return true;
491 
492       /* For commutative ops, allow the other order.  */
493       return (commutative_tree_code (expr0->ops.binary.op)
494 	      && operand_equal_p (expr0->ops.binary.opnd0,
495 				  expr1->ops.binary.opnd1, 0)
496 	      && operand_equal_p (expr0->ops.binary.opnd1,
497 				  expr1->ops.binary.opnd0, 0));
498 
499     case EXPR_TERNARY:
500       if (expr0->ops.ternary.op != expr1->ops.ternary.op
501 	  || !operand_equal_p (expr0->ops.ternary.opnd2,
502 			       expr1->ops.ternary.opnd2, 0))
503 	return false;
504 
505       if (operand_equal_p (expr0->ops.ternary.opnd0,
506 			   expr1->ops.ternary.opnd0, 0)
507 	  && operand_equal_p (expr0->ops.ternary.opnd1,
508 			      expr1->ops.ternary.opnd1, 0))
509 	return true;
510 
511       /* For commutative ops, allow the other order.  */
512       return (commutative_ternary_tree_code (expr0->ops.ternary.op)
513 	      && operand_equal_p (expr0->ops.ternary.opnd0,
514 				  expr1->ops.ternary.opnd1, 0)
515 	      && operand_equal_p (expr0->ops.ternary.opnd1,
516 				  expr1->ops.ternary.opnd0, 0));
517 
518     case EXPR_CALL:
519       {
520         size_t i;
521 
522         /* If the calls are to different functions, then they
523            clearly cannot be equal.  */
524         if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
525                                         expr1->ops.call.fn_from))
526           return false;
527 
528         if (! expr0->ops.call.pure)
529           return false;
530 
531         if (expr0->ops.call.nargs !=  expr1->ops.call.nargs)
532           return false;
533 
534         for (i = 0; i < expr0->ops.call.nargs; i++)
535           if (! operand_equal_p (expr0->ops.call.args[i],
536                                  expr1->ops.call.args[i], 0))
537             return false;
538 
539 	if (stmt_could_throw_p (expr0->ops.call.fn_from))
540 	  {
541 	    int lp0 = lookup_stmt_eh_lp (expr0->ops.call.fn_from);
542 	    int lp1 = lookup_stmt_eh_lp (expr1->ops.call.fn_from);
543 	    if ((lp0 > 0 || lp1 > 0) && lp0 != lp1)
544 	      return false;
545 	  }
546 
547         return true;
548       }
549 
550     case EXPR_PHI:
551       {
552         size_t i;
553 
554         if (expr0->ops.phi.nargs !=  expr1->ops.phi.nargs)
555           return false;
556 
557         for (i = 0; i < expr0->ops.phi.nargs; i++)
558           if (! operand_equal_p (expr0->ops.phi.args[i],
559                                  expr1->ops.phi.args[i], 0))
560             return false;
561 
562         return true;
563       }
564 
565     default:
566       gcc_unreachable ();
567     }
568 }
569 
570 /* Generate a hash value for a pair of expressions.  This can be used
571    iteratively by passing a previous result in HSTATE.
572 
573    The same hash value is always returned for a given pair of expressions,
574    regardless of the order in which they are presented.  This is useful in
575    hashing the operands of commutative functions.  */
576 
577 namespace inchash
578 {
579 
580 static void
581 add_expr_commutative (const_tree t1, const_tree t2, hash &hstate)
582 {
583   hash one, two;
584 
585   inchash::add_expr (t1, one);
586   inchash::add_expr (t2, two);
587   hstate.add_commutative (one, two);
588 }
589 
590 /* Compute a hash value for a hashable_expr value EXPR and a
591    previously accumulated hash value VAL.  If two hashable_expr
592    values compare equal with hashable_expr_equal_p, they must
593    hash to the same value, given an identical value of VAL.
594    The logic is intended to follow inchash::add_expr in tree.c.  */
595 
596 static void
597 add_hashable_expr (const struct hashable_expr *expr, hash &hstate)
598 {
599   switch (expr->kind)
600     {
601     case EXPR_SINGLE:
602       inchash::add_expr (expr->ops.single.rhs, hstate);
603       break;
604 
605     case EXPR_UNARY:
606       hstate.add_object (expr->ops.unary.op);
607 
608       /* Make sure to include signedness in the hash computation.
609          Don't hash the type, that can lead to having nodes which
610          compare equal according to operand_equal_p, but which
611          have different hash codes.  */
612       if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
613           || expr->ops.unary.op == NON_LVALUE_EXPR)
614         hstate.add_int (TYPE_UNSIGNED (expr->type));
615 
616       inchash::add_expr (expr->ops.unary.opnd, hstate);
617       break;
618 
619     case EXPR_BINARY:
620       hstate.add_object (expr->ops.binary.op);
621       if (commutative_tree_code (expr->ops.binary.op))
622 	inchash::add_expr_commutative (expr->ops.binary.opnd0,
623 					  expr->ops.binary.opnd1, hstate);
624       else
625         {
626           inchash::add_expr (expr->ops.binary.opnd0, hstate);
627           inchash::add_expr (expr->ops.binary.opnd1, hstate);
628         }
629       break;
630 
631     case EXPR_TERNARY:
632       hstate.add_object (expr->ops.ternary.op);
633       if (commutative_ternary_tree_code (expr->ops.ternary.op))
634 	inchash::add_expr_commutative (expr->ops.ternary.opnd0,
635 					  expr->ops.ternary.opnd1, hstate);
636       else
637         {
638           inchash::add_expr (expr->ops.ternary.opnd0, hstate);
639           inchash::add_expr (expr->ops.ternary.opnd1, hstate);
640         }
641       inchash::add_expr (expr->ops.ternary.opnd2, hstate);
642       break;
643 
644     case EXPR_CALL:
645       {
646         size_t i;
647         enum tree_code code = CALL_EXPR;
648         gcall *fn_from;
649 
650         hstate.add_object (code);
651         fn_from = expr->ops.call.fn_from;
652         if (gimple_call_internal_p (fn_from))
653           hstate.merge_hash ((hashval_t) gimple_call_internal_fn (fn_from));
654         else
655           inchash::add_expr (gimple_call_fn (fn_from), hstate);
656         for (i = 0; i < expr->ops.call.nargs; i++)
657           inchash::add_expr (expr->ops.call.args[i], hstate);
658       }
659       break;
660 
661     case EXPR_PHI:
662       {
663         size_t i;
664 
665         for (i = 0; i < expr->ops.phi.nargs; i++)
666           inchash::add_expr (expr->ops.phi.args[i], hstate);
667       }
668       break;
669 
670     default:
671       gcc_unreachable ();
672     }
673 }
674 
675 }
676 
677 /* Print a diagnostic dump of an expression hash table entry.  */
678 
679 static void
680 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
681 {
682   fprintf (stream, "STMT ");
683 
684   if (element->lhs)
685     {
686       print_generic_expr (stream, element->lhs, 0);
687       fprintf (stream, " = ");
688     }
689 
690   switch (element->expr.kind)
691     {
692       case EXPR_SINGLE:
693         print_generic_expr (stream, element->expr.ops.single.rhs, 0);
694         break;
695 
696       case EXPR_UNARY:
697 	fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
698         print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
699         break;
700 
701       case EXPR_BINARY:
702         print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
703 	fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
704         print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
705         break;
706 
707       case EXPR_TERNARY:
708 	fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
709         print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
710 	fputs (", ", stream);
711         print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
712 	fputs (", ", stream);
713         print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
714 	fputs (">", stream);
715         break;
716 
717       case EXPR_CALL:
718         {
719           size_t i;
720           size_t nargs = element->expr.ops.call.nargs;
721           gcall *fn_from;
722 
723           fn_from = element->expr.ops.call.fn_from;
724           if (gimple_call_internal_p (fn_from))
725             fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
726                    stream);
727           else
728             print_generic_expr (stream, gimple_call_fn (fn_from), 0);
729           fprintf (stream, " (");
730           for (i = 0; i < nargs; i++)
731             {
732               print_generic_expr (stream, element->expr.ops.call.args[i], 0);
733               if (i + 1 < nargs)
734                 fprintf (stream, ", ");
735             }
736           fprintf (stream, ")");
737         }
738         break;
739 
740       case EXPR_PHI:
741         {
742           size_t i;
743           size_t nargs = element->expr.ops.phi.nargs;
744 
745           fprintf (stream, "PHI <");
746           for (i = 0; i < nargs; i++)
747             {
748               print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
749               if (i + 1 < nargs)
750                 fprintf (stream, ", ");
751             }
752           fprintf (stream, ">");
753         }
754         break;
755     }
756 
757   if (element->vop)
758     {
759       fprintf (stream, " with ");
760       print_generic_expr (stream, element->vop, 0);
761     }
762 
763   fprintf (stream, "\n");
764 }
765 
766 /* Delete variable sized pieces of the expr_hash_elt ELEMENT.  */
767 
768 static void
769 free_expr_hash_elt_contents (struct expr_hash_elt *element)
770 {
771   if (element->expr.kind == EXPR_CALL)
772     free (element->expr.ops.call.args);
773   else if (element->expr.kind == EXPR_PHI)
774     free (element->expr.ops.phi.args);
775 }
776 
777 /* Delete an expr_hash_elt and reclaim its storage.  */
778 
779 static void
780 free_expr_hash_elt (void *elt)
781 {
782   struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
783   free_expr_hash_elt_contents (element);
784   free (element);
785 }
786 
787 /* Allocate an EDGE_INFO for edge E and attach it to E.
788    Return the new EDGE_INFO structure.  */
789 
790 static struct edge_info *
791 allocate_edge_info (edge e)
792 {
793   struct edge_info *edge_info;
794 
795   edge_info = XCNEW (struct edge_info);
796 
797   e->aux = edge_info;
798   return edge_info;
799 }
800 
801 /* Free all EDGE_INFO structures associated with edges in the CFG.
802    If a particular edge can be threaded, copy the redirection
803    target from the EDGE_INFO structure into the edge's AUX field
804    as required by code to update the CFG and SSA graph for
805    jump threading.  */
806 
807 static void
808 free_all_edge_infos (void)
809 {
810   basic_block bb;
811   edge_iterator ei;
812   edge e;
813 
814   FOR_EACH_BB_FN (bb, cfun)
815     {
816       FOR_EACH_EDGE (e, ei, bb->preds)
817         {
818 	 struct edge_info *edge_info = (struct edge_info *) e->aux;
819 
820 	  if (edge_info)
821 	    {
822 	      edge_info->cond_equivalences.release ();
823 	      free (edge_info);
824 	      e->aux = NULL;
825 	    }
826 	}
827     }
828 }
829 
830 class dom_opt_dom_walker : public dom_walker
831 {
832 public:
833   dom_opt_dom_walker (cdi_direction direction)
834     : dom_walker (direction), m_dummy_cond (NULL) {}
835 
836   virtual void before_dom_children (basic_block);
837   virtual void after_dom_children (basic_block);
838 
839 private:
840   void thread_across_edge (edge);
841 
842   gcond *m_dummy_cond;
843 };
844 
845 /* Jump threading, redundancy elimination and const/copy propagation.
846 
847    This pass may expose new symbols that need to be renamed into SSA.  For
848    every new symbol exposed, its corresponding bit will be set in
849    VARS_TO_RENAME.  */
850 
851 namespace {
852 
853 const pass_data pass_data_dominator =
854 {
855   GIMPLE_PASS, /* type */
856   "dom", /* name */
857   OPTGROUP_NONE, /* optinfo_flags */
858   TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
859   ( PROP_cfg | PROP_ssa ), /* properties_required */
860   0, /* properties_provided */
861   0, /* properties_destroyed */
862   0, /* todo_flags_start */
863   ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
864 };
865 
866 class pass_dominator : public gimple_opt_pass
867 {
868 public:
869   pass_dominator (gcc::context *ctxt)
870     : gimple_opt_pass (pass_data_dominator, ctxt)
871   {}
872 
873   /* opt_pass methods: */
874   opt_pass * clone () { return new pass_dominator (m_ctxt); }
875   virtual bool gate (function *) { return flag_tree_dom != 0; }
876   virtual unsigned int execute (function *);
877 
878 }; // class pass_dominator
879 
880 unsigned int
881 pass_dominator::execute (function *fun)
882 {
883   memset (&opt_stats, 0, sizeof (opt_stats));
884 
885   /* Create our hash tables.  */
886   avail_exprs = new hash_table<expr_elt_hasher> (1024);
887   avail_exprs_stack.create (20);
888   const_and_copies_stack.create (20);
889   need_eh_cleanup = BITMAP_ALLOC (NULL);
890   need_noreturn_fixup.create (0);
891 
892   calculate_dominance_info (CDI_DOMINATORS);
893   cfg_altered = false;
894 
895   /* We need to know loop structures in order to avoid destroying them
896      in jump threading.  Note that we still can e.g. thread through loop
897      headers to an exit edge, or through loop header to the loop body, assuming
898      that we update the loop info.
899 
900      TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
901      to several overly conservative bail-outs in jump threading, case
902      gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
903      missing.  We should improve jump threading in future then
904      LOOPS_HAVE_PREHEADERS won't be needed here.  */
905   loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
906 
907   /* Initialize the value-handle array.  */
908   threadedge_initialize_values ();
909 
910   /* We need accurate information regarding back edges in the CFG
911      for jump threading; this may include back edges that are not part of
912      a single loop.  */
913   mark_dfs_back_edges ();
914 
915   /* Recursively walk the dominator tree optimizing statements.  */
916   dom_opt_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
917 
918   {
919     gimple_stmt_iterator gsi;
920     basic_block bb;
921     FOR_EACH_BB_FN (bb, fun)
922       {
923 	for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
924 	  update_stmt_if_modified (gsi_stmt (gsi));
925       }
926   }
927 
928   /* If we exposed any new variables, go ahead and put them into
929      SSA form now, before we handle jump threading.  This simplifies
930      interactions between rewriting of _DECL nodes into SSA form
931      and rewriting SSA_NAME nodes into SSA form after block
932      duplication and CFG manipulation.  */
933   update_ssa (TODO_update_ssa);
934 
935   free_all_edge_infos ();
936 
937   /* Thread jumps, creating duplicate blocks as needed.  */
938   cfg_altered |= thread_through_all_blocks (first_pass_instance);
939 
940   if (cfg_altered)
941     free_dominance_info (CDI_DOMINATORS);
942 
943   /* Removal of statements may make some EH edges dead.  Purge
944      such edges from the CFG as needed.  */
945   if (!bitmap_empty_p (need_eh_cleanup))
946     {
947       unsigned i;
948       bitmap_iterator bi;
949 
950       /* Jump threading may have created forwarder blocks from blocks
951 	 needing EH cleanup; the new successor of these blocks, which
952 	 has inherited from the original block, needs the cleanup.
953 	 Don't clear bits in the bitmap, as that can break the bitmap
954 	 iterator.  */
955       EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
956 	{
957 	  basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
958 	  if (bb == NULL)
959 	    continue;
960 	  while (single_succ_p (bb)
961 		 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
962 	    bb = single_succ (bb);
963 	  if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
964 	    continue;
965 	  if ((unsigned) bb->index != i)
966 	    bitmap_set_bit (need_eh_cleanup, bb->index);
967 	}
968 
969       gimple_purge_all_dead_eh_edges (need_eh_cleanup);
970       bitmap_clear (need_eh_cleanup);
971     }
972 
973   /* Fixup stmts that became noreturn calls.  This may require splitting
974      blocks and thus isn't possible during the dominator walk or before
975      jump threading finished.  Do this in reverse order so we don't
976      inadvertedly remove a stmt we want to fixup by visiting a dominating
977      now noreturn call first.  */
978   while (!need_noreturn_fixup.is_empty ())
979     {
980       gimple stmt = need_noreturn_fixup.pop ();
981       if (dump_file && dump_flags & TDF_DETAILS)
982 	{
983 	  fprintf (dump_file, "Fixing up noreturn call ");
984 	  print_gimple_stmt (dump_file, stmt, 0, 0);
985 	  fprintf (dump_file, "\n");
986 	}
987       fixup_noreturn_call (stmt);
988     }
989 
990   statistics_counter_event (fun, "Redundant expressions eliminated",
991 			    opt_stats.num_re);
992   statistics_counter_event (fun, "Constants propagated",
993 			    opt_stats.num_const_prop);
994   statistics_counter_event (fun, "Copies propagated",
995 			    opt_stats.num_copy_prop);
996 
997   /* Debugging dumps.  */
998   if (dump_file && (dump_flags & TDF_STATS))
999     dump_dominator_optimization_stats (dump_file);
1000 
1001   loop_optimizer_finalize ();
1002 
1003   /* Delete our main hashtable.  */
1004   delete avail_exprs;
1005   avail_exprs = NULL;
1006 
1007   /* Free asserted bitmaps and stacks.  */
1008   BITMAP_FREE (need_eh_cleanup);
1009   need_noreturn_fixup.release ();
1010   avail_exprs_stack.release ();
1011   const_and_copies_stack.release ();
1012 
1013   /* Free the value-handle array.  */
1014   threadedge_finalize_values ();
1015 
1016   return 0;
1017 }
1018 
1019 } // anon namespace
1020 
1021 gimple_opt_pass *
1022 make_pass_dominator (gcc::context *ctxt)
1023 {
1024   return new pass_dominator (ctxt);
1025 }
1026 
1027 
1028 /* Given a conditional statement CONDSTMT, convert the
1029    condition to a canonical form.  */
1030 
1031 static void
1032 canonicalize_comparison (gcond *condstmt)
1033 {
1034   tree op0;
1035   tree op1;
1036   enum tree_code code;
1037 
1038   gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
1039 
1040   op0 = gimple_cond_lhs (condstmt);
1041   op1 = gimple_cond_rhs (condstmt);
1042 
1043   code = gimple_cond_code (condstmt);
1044 
1045   /* If it would be profitable to swap the operands, then do so to
1046      canonicalize the statement, enabling better optimization.
1047 
1048      By placing canonicalization of such expressions here we
1049      transparently keep statements in canonical form, even
1050      when the statement is modified.  */
1051   if (tree_swap_operands_p (op0, op1, false))
1052     {
1053       /* For relationals we need to swap the operands
1054 	 and change the code.  */
1055       if (code == LT_EXPR
1056 	  || code == GT_EXPR
1057 	  || code == LE_EXPR
1058 	  || code == GE_EXPR)
1059 	{
1060           code = swap_tree_comparison (code);
1061 
1062           gimple_cond_set_code (condstmt, code);
1063           gimple_cond_set_lhs (condstmt, op1);
1064           gimple_cond_set_rhs (condstmt, op0);
1065 
1066           update_stmt (condstmt);
1067 	}
1068     }
1069 }
1070 
1071 /* Initialize local stacks for this optimizer and record equivalences
1072    upon entry to BB.  Equivalences can come from the edge traversed to
1073    reach BB or they may come from PHI nodes at the start of BB.  */
1074 
1075 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1076    LIMIT entries left in LOCALs.  */
1077 
1078 static void
1079 remove_local_expressions_from_table (void)
1080 {
1081   /* Remove all the expressions made available in this block.  */
1082   while (avail_exprs_stack.length () > 0)
1083     {
1084       std::pair<expr_hash_elt_t, expr_hash_elt_t> victim
1085 	= avail_exprs_stack.pop ();
1086       expr_hash_elt **slot;
1087 
1088       if (victim.first == NULL)
1089 	break;
1090 
1091       /* This must precede the actual removal from the hash table,
1092          as ELEMENT and the table entry may share a call argument
1093          vector which will be freed during removal.  */
1094       if (dump_file && (dump_flags & TDF_DETAILS))
1095         {
1096           fprintf (dump_file, "<<<< ");
1097           print_expr_hash_elt (dump_file, victim.first);
1098         }
1099 
1100       slot = avail_exprs->find_slot (victim.first, NO_INSERT);
1101       gcc_assert (slot && *slot == victim.first);
1102       if (victim.second != NULL)
1103 	{
1104 	  free_expr_hash_elt (*slot);
1105 	  *slot = victim.second;
1106 	}
1107       else
1108 	avail_exprs->clear_slot (slot);
1109     }
1110 }
1111 
1112 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
1113    CONST_AND_COPIES to its original state, stopping when we hit a
1114    NULL marker.  */
1115 
1116 static void
1117 restore_vars_to_original_value (void)
1118 {
1119   while (const_and_copies_stack.length () > 0)
1120     {
1121       tree prev_value, dest;
1122 
1123       dest = const_and_copies_stack.pop ();
1124 
1125       if (dest == NULL)
1126 	break;
1127 
1128       if (dump_file && (dump_flags & TDF_DETAILS))
1129 	{
1130 	  fprintf (dump_file, "<<<< COPY ");
1131 	  print_generic_expr (dump_file, dest, 0);
1132 	  fprintf (dump_file, " = ");
1133 	  print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
1134 	  fprintf (dump_file, "\n");
1135 	}
1136 
1137       prev_value = const_and_copies_stack.pop ();
1138       set_ssa_name_value (dest, prev_value);
1139     }
1140 }
1141 
1142 /* A trivial wrapper so that we can present the generic jump
1143    threading code with a simple API for simplifying statements.  */
1144 static tree
1145 simplify_stmt_for_jump_threading (gimple stmt,
1146 				  gimple within_stmt ATTRIBUTE_UNUSED)
1147 {
1148   return lookup_avail_expr (stmt, false);
1149 }
1150 
1151 /* Record into the equivalence tables any equivalences implied by
1152    traversing edge E (which are cached in E->aux).
1153 
1154    Callers are responsible for managing the unwinding markers.  */
1155 static void
1156 record_temporary_equivalences (edge e)
1157 {
1158   int i;
1159   struct edge_info *edge_info = (struct edge_info *) e->aux;
1160 
1161   /* If we have info associated with this edge, record it into
1162      our equivalence tables.  */
1163   if (edge_info)
1164     {
1165       cond_equivalence *eq;
1166       tree lhs = edge_info->lhs;
1167       tree rhs = edge_info->rhs;
1168 
1169       /* If we have a simple NAME = VALUE equivalence, record it.  */
1170       if (lhs && TREE_CODE (lhs) == SSA_NAME)
1171 	record_const_or_copy (lhs, rhs);
1172 
1173       /* If we have 0 = COND or 1 = COND equivalences, record them
1174 	 into our expression hash tables.  */
1175       for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1176 	record_cond (eq);
1177     }
1178 }
1179 
1180 /* Wrapper for common code to attempt to thread an edge.  For example,
1181    it handles lazily building the dummy condition and the bookkeeping
1182    when jump threading is successful.  */
1183 
1184 void
1185 dom_opt_dom_walker::thread_across_edge (edge e)
1186 {
1187   if (! m_dummy_cond)
1188     m_dummy_cond =
1189         gimple_build_cond (NE_EXPR,
1190                            integer_zero_node, integer_zero_node,
1191                            NULL, NULL);
1192 
1193   /* Push a marker on both stacks so we can unwind the tables back to their
1194      current state.  */
1195   avail_exprs_stack.safe_push
1196     (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1197   const_and_copies_stack.safe_push (NULL_TREE);
1198 
1199   /* Traversing E may result in equivalences we can utilize.  */
1200   record_temporary_equivalences (e);
1201 
1202   /* With all the edge equivalences in the tables, go ahead and attempt
1203      to thread through E->dest.  */
1204   ::thread_across_edge (m_dummy_cond, e, false,
1205 		        &const_and_copies_stack,
1206 		        simplify_stmt_for_jump_threading);
1207 
1208   /* And restore the various tables to their state before
1209      we threaded this edge.
1210 
1211      XXX The code in tree-ssa-threadedge.c will restore the state of
1212      the const_and_copies table.  We we just have to restore the expression
1213      table.  */
1214   remove_local_expressions_from_table ();
1215 }
1216 
1217 /* PHI nodes can create equivalences too.
1218 
1219    Ignoring any alternatives which are the same as the result, if
1220    all the alternatives are equal, then the PHI node creates an
1221    equivalence.  */
1222 
1223 static void
1224 record_equivalences_from_phis (basic_block bb)
1225 {
1226   gphi_iterator gsi;
1227 
1228   for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1229     {
1230       gphi *phi = gsi.phi ();
1231 
1232       tree lhs = gimple_phi_result (phi);
1233       tree rhs = NULL;
1234       size_t i;
1235 
1236       for (i = 0; i < gimple_phi_num_args (phi); i++)
1237 	{
1238 	  tree t = gimple_phi_arg_def (phi, i);
1239 
1240 	  /* Ignore alternatives which are the same as our LHS.  Since
1241 	     LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1242 	     can simply compare pointers.  */
1243 	  if (lhs == t)
1244 	    continue;
1245 
1246 	  /* If we have not processed an alternative yet, then set
1247 	     RHS to this alternative.  */
1248 	  if (rhs == NULL)
1249 	    rhs = t;
1250 	  /* If we have processed an alternative (stored in RHS), then
1251 	     see if it is equal to this one.  If it isn't, then stop
1252 	     the search.  */
1253 	  else if (! operand_equal_for_phi_arg_p (rhs, t))
1254 	    break;
1255 	}
1256 
1257       /* If we had no interesting alternatives, then all the RHS alternatives
1258 	 must have been the same as LHS.  */
1259       if (!rhs)
1260 	rhs = lhs;
1261 
1262       /* If we managed to iterate through each PHI alternative without
1263 	 breaking out of the loop, then we have a PHI which may create
1264 	 a useful equivalence.  We do not need to record unwind data for
1265 	 this, since this is a true assignment and not an equivalence
1266 	 inferred from a comparison.  All uses of this ssa name are dominated
1267 	 by this assignment, so unwinding just costs time and space.  */
1268       if (i == gimple_phi_num_args (phi)
1269 	  && may_propagate_copy (lhs, rhs))
1270 	set_ssa_name_value (lhs, rhs);
1271     }
1272 }
1273 
1274 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1275    return that edge.  Otherwise return NULL.  */
1276 static edge
1277 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1278 {
1279   edge retval = NULL;
1280   edge e;
1281   edge_iterator ei;
1282 
1283   FOR_EACH_EDGE (e, ei, bb->preds)
1284     {
1285       /* A loop back edge can be identified by the destination of
1286 	 the edge dominating the source of the edge.  */
1287       if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1288 	continue;
1289 
1290       /* If we have already seen a non-loop edge, then we must have
1291 	 multiple incoming non-loop edges and thus we return NULL.  */
1292       if (retval)
1293 	return NULL;
1294 
1295       /* This is the first non-loop incoming edge we have found.  Record
1296 	 it.  */
1297       retval = e;
1298     }
1299 
1300   return retval;
1301 }
1302 
1303 /* Record any equivalences created by the incoming edge to BB.  If BB
1304    has more than one incoming edge, then no equivalence is created.  */
1305 
1306 static void
1307 record_equivalences_from_incoming_edge (basic_block bb)
1308 {
1309   edge e;
1310   basic_block parent;
1311   struct edge_info *edge_info;
1312 
1313   /* If our parent block ended with a control statement, then we may be
1314      able to record some equivalences based on which outgoing edge from
1315      the parent was followed.  */
1316   parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1317 
1318   e = single_incoming_edge_ignoring_loop_edges (bb);
1319 
1320   /* If we had a single incoming edge from our parent block, then enter
1321      any data associated with the edge into our tables.  */
1322   if (e && e->src == parent)
1323     {
1324       unsigned int i;
1325 
1326       edge_info = (struct edge_info *) e->aux;
1327 
1328       if (edge_info)
1329 	{
1330 	  tree lhs = edge_info->lhs;
1331 	  tree rhs = edge_info->rhs;
1332 	  cond_equivalence *eq;
1333 
1334 	  if (lhs)
1335 	    record_equality (lhs, rhs);
1336 
1337 	  /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1338 	     set via a widening type conversion, then we may be able to record
1339 	     additional equivalences.  */
1340 	  if (lhs
1341 	      && TREE_CODE (lhs) == SSA_NAME
1342 	      && is_gimple_constant (rhs)
1343 	      && TREE_CODE (rhs) == INTEGER_CST)
1344 	    {
1345 	      gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1346 
1347 	      if (defstmt
1348 		  && is_gimple_assign (defstmt)
1349 		  && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1350 		{
1351 		  tree old_rhs = gimple_assign_rhs1 (defstmt);
1352 
1353 		  /* If the conversion widens the original value and
1354 		     the constant is in the range of the type of OLD_RHS,
1355 		     then convert the constant and record the equivalence.
1356 
1357 		     Note that int_fits_type_p does not check the precision
1358 		     if the upper and lower bounds are OK.  */
1359 		  if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1360 		      && (TYPE_PRECISION (TREE_TYPE (lhs))
1361 			  > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1362 		      && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1363 		    {
1364 		      tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1365 		      record_equality (old_rhs, newval);
1366 		    }
1367 		}
1368 	    }
1369 
1370 	  for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1371 	    record_cond (eq);
1372 	}
1373     }
1374 }
1375 
1376 /* Dump SSA statistics on FILE.  */
1377 
1378 void
1379 dump_dominator_optimization_stats (FILE *file)
1380 {
1381   fprintf (file, "Total number of statements:                   %6ld\n\n",
1382 	   opt_stats.num_stmts);
1383   fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1384            opt_stats.num_exprs_considered);
1385 
1386   fprintf (file, "\nHash table statistics:\n");
1387 
1388   fprintf (file, "    avail_exprs: ");
1389   htab_statistics (file, *avail_exprs);
1390 }
1391 
1392 
1393 /* Dump SSA statistics on stderr.  */
1394 
1395 DEBUG_FUNCTION void
1396 debug_dominator_optimization_stats (void)
1397 {
1398   dump_dominator_optimization_stats (stderr);
1399 }
1400 
1401 
1402 /* Dump statistics for the hash table HTAB.  */
1403 
1404 static void
1405 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1406 {
1407   fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1408 	   (long) htab.size (),
1409 	   (long) htab.elements (),
1410 	   htab.collisions ());
1411 }
1412 
1413 
1414 /* Enter condition equivalence into the expression hash table.
1415    This indicates that a conditional expression has a known
1416    boolean value.  */
1417 
1418 static void
1419 record_cond (cond_equivalence *p)
1420 {
1421   struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1422   expr_hash_elt **slot;
1423 
1424   initialize_hash_element_from_expr (&p->cond, p->value, element);
1425 
1426   slot = avail_exprs->find_slot_with_hash (element, element->hash, INSERT);
1427   if (*slot == NULL)
1428     {
1429       *slot = element;
1430 
1431       if (dump_file && (dump_flags & TDF_DETAILS))
1432         {
1433           fprintf (dump_file, "1>>> ");
1434           print_expr_hash_elt (dump_file, element);
1435         }
1436 
1437       avail_exprs_stack.safe_push
1438 	(std::pair<expr_hash_elt_t, expr_hash_elt_t> (element, NULL));
1439     }
1440   else
1441     free_expr_hash_elt (element);
1442 }
1443 
1444 /* Build a cond_equivalence record indicating that the comparison
1445    CODE holds between operands OP0 and OP1 and push it to **P.  */
1446 
1447 static void
1448 build_and_record_new_cond (enum tree_code code,
1449                            tree op0, tree op1,
1450                            vec<cond_equivalence> *p)
1451 {
1452   cond_equivalence c;
1453   struct hashable_expr *cond = &c.cond;
1454 
1455   gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1456 
1457   cond->type = boolean_type_node;
1458   cond->kind = EXPR_BINARY;
1459   cond->ops.binary.op = code;
1460   cond->ops.binary.opnd0 = op0;
1461   cond->ops.binary.opnd1 = op1;
1462 
1463   c.value = boolean_true_node;
1464   p->safe_push (c);
1465 }
1466 
1467 /* Record that COND is true and INVERTED is false into the edge information
1468    structure.  Also record that any conditions dominated by COND are true
1469    as well.
1470 
1471    For example, if a < b is true, then a <= b must also be true.  */
1472 
1473 static void
1474 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1475 {
1476   tree op0, op1;
1477   cond_equivalence c;
1478 
1479   if (!COMPARISON_CLASS_P (cond))
1480     return;
1481 
1482   op0 = TREE_OPERAND (cond, 0);
1483   op1 = TREE_OPERAND (cond, 1);
1484 
1485   switch (TREE_CODE (cond))
1486     {
1487     case LT_EXPR:
1488     case GT_EXPR:
1489       if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1490 	{
1491 	  build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1492 				     &edge_info->cond_equivalences);
1493 	  build_and_record_new_cond (LTGT_EXPR, op0, op1,
1494 				     &edge_info->cond_equivalences);
1495 	}
1496 
1497       build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1498 				  ? LE_EXPR : GE_EXPR),
1499 				 op0, op1, &edge_info->cond_equivalences);
1500       build_and_record_new_cond (NE_EXPR, op0, op1,
1501 				 &edge_info->cond_equivalences);
1502       break;
1503 
1504     case GE_EXPR:
1505     case LE_EXPR:
1506       if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1507 	{
1508 	  build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1509 				     &edge_info->cond_equivalences);
1510 	}
1511       break;
1512 
1513     case EQ_EXPR:
1514       if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1515 	{
1516 	  build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1517 				     &edge_info->cond_equivalences);
1518 	}
1519       build_and_record_new_cond (LE_EXPR, op0, op1,
1520 				 &edge_info->cond_equivalences);
1521       build_and_record_new_cond (GE_EXPR, op0, op1,
1522 				 &edge_info->cond_equivalences);
1523       break;
1524 
1525     case UNORDERED_EXPR:
1526       build_and_record_new_cond (NE_EXPR, op0, op1,
1527 				 &edge_info->cond_equivalences);
1528       build_and_record_new_cond (UNLE_EXPR, op0, op1,
1529 				 &edge_info->cond_equivalences);
1530       build_and_record_new_cond (UNGE_EXPR, op0, op1,
1531 				 &edge_info->cond_equivalences);
1532       build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1533 				 &edge_info->cond_equivalences);
1534       build_and_record_new_cond (UNLT_EXPR, op0, op1,
1535 				 &edge_info->cond_equivalences);
1536       build_and_record_new_cond (UNGT_EXPR, op0, op1,
1537 				 &edge_info->cond_equivalences);
1538       break;
1539 
1540     case UNLT_EXPR:
1541     case UNGT_EXPR:
1542       build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1543 				  ? UNLE_EXPR : UNGE_EXPR),
1544 				 op0, op1, &edge_info->cond_equivalences);
1545       build_and_record_new_cond (NE_EXPR, op0, op1,
1546 				 &edge_info->cond_equivalences);
1547       break;
1548 
1549     case UNEQ_EXPR:
1550       build_and_record_new_cond (UNLE_EXPR, op0, op1,
1551 				 &edge_info->cond_equivalences);
1552       build_and_record_new_cond (UNGE_EXPR, op0, op1,
1553 				 &edge_info->cond_equivalences);
1554       break;
1555 
1556     case LTGT_EXPR:
1557       build_and_record_new_cond (NE_EXPR, op0, op1,
1558 				 &edge_info->cond_equivalences);
1559       build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1560 				 &edge_info->cond_equivalences);
1561       break;
1562 
1563     default:
1564       break;
1565     }
1566 
1567   /* Now store the original true and false conditions into the first
1568      two slots.  */
1569   initialize_expr_from_cond (cond, &c.cond);
1570   c.value = boolean_true_node;
1571   edge_info->cond_equivalences.safe_push (c);
1572 
1573   /* It is possible for INVERTED to be the negation of a comparison,
1574      and not a valid RHS or GIMPLE_COND condition.  This happens because
1575      invert_truthvalue may return such an expression when asked to invert
1576      a floating-point comparison.  These comparisons are not assumed to
1577      obey the trichotomy law.  */
1578   initialize_expr_from_cond (inverted, &c.cond);
1579   c.value = boolean_false_node;
1580   edge_info->cond_equivalences.safe_push (c);
1581 }
1582 
1583 /* A helper function for record_const_or_copy and record_equality.
1584    Do the work of recording the value and undo info.  */
1585 
1586 static void
1587 record_const_or_copy_1 (tree x, tree y, tree prev_x)
1588 {
1589   set_ssa_name_value (x, y);
1590 
1591   if (dump_file && (dump_flags & TDF_DETAILS))
1592     {
1593       fprintf (dump_file, "0>>> COPY ");
1594       print_generic_expr (dump_file, x, 0);
1595       fprintf (dump_file, " = ");
1596       print_generic_expr (dump_file, y, 0);
1597       fprintf (dump_file, "\n");
1598     }
1599 
1600   const_and_copies_stack.reserve (2);
1601   const_and_copies_stack.quick_push (prev_x);
1602   const_and_copies_stack.quick_push (x);
1603 }
1604 
1605 /* Record that X is equal to Y in const_and_copies.  Record undo
1606    information in the block-local vector.  */
1607 
1608 static void
1609 record_const_or_copy (tree x, tree y)
1610 {
1611   tree prev_x = SSA_NAME_VALUE (x);
1612 
1613   gcc_assert (TREE_CODE (x) == SSA_NAME);
1614 
1615   if (TREE_CODE (y) == SSA_NAME)
1616     {
1617       tree tmp = SSA_NAME_VALUE (y);
1618       if (tmp)
1619 	y = tmp;
1620     }
1621 
1622   record_const_or_copy_1 (x, y, prev_x);
1623 }
1624 
1625 /* Return the loop depth of the basic block of the defining statement of X.
1626    This number should not be treated as absolutely correct because the loop
1627    information may not be completely up-to-date when dom runs.  However, it
1628    will be relatively correct, and as more passes are taught to keep loop info
1629    up to date, the result will become more and more accurate.  */
1630 
1631 static int
1632 loop_depth_of_name (tree x)
1633 {
1634   gimple defstmt;
1635   basic_block defbb;
1636 
1637   /* If it's not an SSA_NAME, we have no clue where the definition is.  */
1638   if (TREE_CODE (x) != SSA_NAME)
1639     return 0;
1640 
1641   /* Otherwise return the loop depth of the defining statement's bb.
1642      Note that there may not actually be a bb for this statement, if the
1643      ssa_name is live on entry.  */
1644   defstmt = SSA_NAME_DEF_STMT (x);
1645   defbb = gimple_bb (defstmt);
1646   if (!defbb)
1647     return 0;
1648 
1649   return bb_loop_depth (defbb);
1650 }
1651 
1652 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1653    This constrains the cases in which we may treat this as assignment.  */
1654 
1655 static void
1656 record_equality (tree x, tree y)
1657 {
1658   tree prev_x = NULL, prev_y = NULL;
1659 
1660   if (TREE_CODE (x) == SSA_NAME)
1661     prev_x = SSA_NAME_VALUE (x);
1662   if (TREE_CODE (y) == SSA_NAME)
1663     prev_y = SSA_NAME_VALUE (y);
1664 
1665   /* If one of the previous values is invariant, or invariant in more loops
1666      (by depth), then use that.
1667      Otherwise it doesn't matter which value we choose, just so
1668      long as we canonicalize on one value.  */
1669   if (is_gimple_min_invariant (y))
1670     ;
1671   else if (is_gimple_min_invariant (x)
1672 	   /* ???  When threading over backedges the following is important
1673 	      for correctness.  See PR61757.  */
1674 	   || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
1675     prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1676   else if (prev_x && is_gimple_min_invariant (prev_x))
1677     x = y, y = prev_x, prev_x = prev_y;
1678   else if (prev_y)
1679     y = prev_y;
1680 
1681   /* After the swapping, we must have one SSA_NAME.  */
1682   if (TREE_CODE (x) != SSA_NAME)
1683     return;
1684 
1685   /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1686      variable compared against zero.  If we're honoring signed zeros,
1687      then we cannot record this value unless we know that the value is
1688      nonzero.  */
1689   if (HONOR_SIGNED_ZEROS (x)
1690       && (TREE_CODE (y) != REAL_CST
1691 	  || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1692     return;
1693 
1694   record_const_or_copy_1 (x, y, prev_x);
1695 }
1696 
1697 /* Returns true when STMT is a simple iv increment.  It detects the
1698    following situation:
1699 
1700    i_1 = phi (..., i_2)
1701    i_2 = i_1 +/- ...  */
1702 
1703 bool
1704 simple_iv_increment_p (gimple stmt)
1705 {
1706   enum tree_code code;
1707   tree lhs, preinc;
1708   gimple phi;
1709   size_t i;
1710 
1711   if (gimple_code (stmt) != GIMPLE_ASSIGN)
1712     return false;
1713 
1714   lhs = gimple_assign_lhs (stmt);
1715   if (TREE_CODE (lhs) != SSA_NAME)
1716     return false;
1717 
1718   code = gimple_assign_rhs_code (stmt);
1719   if (code != PLUS_EXPR
1720       && code != MINUS_EXPR
1721       && code != POINTER_PLUS_EXPR)
1722     return false;
1723 
1724   preinc = gimple_assign_rhs1 (stmt);
1725   if (TREE_CODE (preinc) != SSA_NAME)
1726     return false;
1727 
1728   phi = SSA_NAME_DEF_STMT (preinc);
1729   if (gimple_code (phi) != GIMPLE_PHI)
1730     return false;
1731 
1732   for (i = 0; i < gimple_phi_num_args (phi); i++)
1733     if (gimple_phi_arg_def (phi, i) == lhs)
1734       return true;
1735 
1736   return false;
1737 }
1738 
1739 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1740    known value for that SSA_NAME (or NULL if no value is known).
1741 
1742    Propagate values from CONST_AND_COPIES into the PHI nodes of the
1743    successors of BB.  */
1744 
1745 static void
1746 cprop_into_successor_phis (basic_block bb)
1747 {
1748   edge e;
1749   edge_iterator ei;
1750 
1751   FOR_EACH_EDGE (e, ei, bb->succs)
1752     {
1753       int indx;
1754       gphi_iterator gsi;
1755 
1756       /* If this is an abnormal edge, then we do not want to copy propagate
1757 	 into the PHI alternative associated with this edge.  */
1758       if (e->flags & EDGE_ABNORMAL)
1759 	continue;
1760 
1761       gsi = gsi_start_phis (e->dest);
1762       if (gsi_end_p (gsi))
1763 	continue;
1764 
1765       /* We may have an equivalence associated with this edge.  While
1766 	 we can not propagate it into non-dominated blocks, we can
1767 	 propagate them into PHIs in non-dominated blocks.  */
1768 
1769       /* Push the unwind marker so we can reset the const and copies
1770 	 table back to its original state after processing this edge.  */
1771       const_and_copies_stack.safe_push (NULL_TREE);
1772 
1773       /* Extract and record any simple NAME = VALUE equivalences.
1774 
1775 	 Don't bother with [01] = COND equivalences, they're not useful
1776 	 here.  */
1777       struct edge_info *edge_info = (struct edge_info *) e->aux;
1778       if (edge_info)
1779 	{
1780 	  tree lhs = edge_info->lhs;
1781 	  tree rhs = edge_info->rhs;
1782 
1783 	  if (lhs && TREE_CODE (lhs) == SSA_NAME)
1784 	    record_const_or_copy (lhs, rhs);
1785 	}
1786 
1787       indx = e->dest_idx;
1788       for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1789 	{
1790 	  tree new_val;
1791 	  use_operand_p orig_p;
1792 	  tree orig_val;
1793           gphi *phi = gsi.phi ();
1794 
1795 	  /* The alternative may be associated with a constant, so verify
1796 	     it is an SSA_NAME before doing anything with it.  */
1797 	  orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1798 	  orig_val = get_use_from_ptr (orig_p);
1799 	  if (TREE_CODE (orig_val) != SSA_NAME)
1800 	    continue;
1801 
1802 	  /* If we have *ORIG_P in our constant/copy table, then replace
1803 	     ORIG_P with its value in our constant/copy table.  */
1804 	  new_val = SSA_NAME_VALUE (orig_val);
1805 	  if (new_val
1806 	      && new_val != orig_val
1807 	      && (TREE_CODE (new_val) == SSA_NAME
1808 		  || is_gimple_min_invariant (new_val))
1809 	      && may_propagate_copy (orig_val, new_val))
1810 	    propagate_value (orig_p, new_val);
1811 	}
1812 
1813       restore_vars_to_original_value ();
1814     }
1815 }
1816 
1817 /* We have finished optimizing BB, record any information implied by
1818    taking a specific outgoing edge from BB.  */
1819 
1820 static void
1821 record_edge_info (basic_block bb)
1822 {
1823   gimple_stmt_iterator gsi = gsi_last_bb (bb);
1824   struct edge_info *edge_info;
1825 
1826   if (! gsi_end_p (gsi))
1827     {
1828       gimple stmt = gsi_stmt (gsi);
1829       location_t loc = gimple_location (stmt);
1830 
1831       if (gimple_code (stmt) == GIMPLE_SWITCH)
1832 	{
1833 	  gswitch *switch_stmt = as_a <gswitch *> (stmt);
1834 	  tree index = gimple_switch_index (switch_stmt);
1835 
1836 	  if (TREE_CODE (index) == SSA_NAME)
1837 	    {
1838 	      int i;
1839               int n_labels = gimple_switch_num_labels (switch_stmt);
1840 	      tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
1841 	      edge e;
1842 	      edge_iterator ei;
1843 
1844 	      for (i = 0; i < n_labels; i++)
1845 		{
1846 		  tree label = gimple_switch_label (switch_stmt, i);
1847 		  basic_block target_bb = label_to_block (CASE_LABEL (label));
1848 		  if (CASE_HIGH (label)
1849 		      || !CASE_LOW (label)
1850 		      || info[target_bb->index])
1851 		    info[target_bb->index] = error_mark_node;
1852 		  else
1853 		    info[target_bb->index] = label;
1854 		}
1855 
1856 	      FOR_EACH_EDGE (e, ei, bb->succs)
1857 		{
1858 		  basic_block target_bb = e->dest;
1859 		  tree label = info[target_bb->index];
1860 
1861 		  if (label != NULL && label != error_mark_node)
1862 		    {
1863 		      tree x = fold_convert_loc (loc, TREE_TYPE (index),
1864 						 CASE_LOW (label));
1865 		      edge_info = allocate_edge_info (e);
1866 		      edge_info->lhs = index;
1867 		      edge_info->rhs = x;
1868 		    }
1869 		}
1870 	      free (info);
1871 	    }
1872 	}
1873 
1874       /* A COND_EXPR may create equivalences too.  */
1875       if (gimple_code (stmt) == GIMPLE_COND)
1876 	{
1877 	  edge true_edge;
1878 	  edge false_edge;
1879 
1880           tree op0 = gimple_cond_lhs (stmt);
1881           tree op1 = gimple_cond_rhs (stmt);
1882           enum tree_code code = gimple_cond_code (stmt);
1883 
1884 	  extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1885 
1886           /* Special case comparing booleans against a constant as we
1887              know the value of OP0 on both arms of the branch.  i.e., we
1888              can record an equivalence for OP0 rather than COND.  */
1889           if ((code == EQ_EXPR || code == NE_EXPR)
1890               && TREE_CODE (op0) == SSA_NAME
1891               && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1892               && is_gimple_min_invariant (op1))
1893             {
1894               if (code == EQ_EXPR)
1895                 {
1896                   edge_info = allocate_edge_info (true_edge);
1897                   edge_info->lhs = op0;
1898                   edge_info->rhs = (integer_zerop (op1)
1899                                     ? boolean_false_node
1900                                     : boolean_true_node);
1901 
1902                   edge_info = allocate_edge_info (false_edge);
1903                   edge_info->lhs = op0;
1904                   edge_info->rhs = (integer_zerop (op1)
1905                                     ? boolean_true_node
1906                                     : boolean_false_node);
1907                 }
1908               else
1909                 {
1910                   edge_info = allocate_edge_info (true_edge);
1911                   edge_info->lhs = op0;
1912                   edge_info->rhs = (integer_zerop (op1)
1913                                     ? boolean_true_node
1914                                     : boolean_false_node);
1915 
1916                   edge_info = allocate_edge_info (false_edge);
1917                   edge_info->lhs = op0;
1918                   edge_info->rhs = (integer_zerop (op1)
1919                                     ? boolean_false_node
1920                                     : boolean_true_node);
1921                 }
1922             }
1923           else if (is_gimple_min_invariant (op0)
1924                    && (TREE_CODE (op1) == SSA_NAME
1925                        || is_gimple_min_invariant (op1)))
1926             {
1927               tree cond = build2 (code, boolean_type_node, op0, op1);
1928               tree inverted = invert_truthvalue_loc (loc, cond);
1929               bool can_infer_simple_equiv
1930                 = !(HONOR_SIGNED_ZEROS (op0)
1931                     && real_zerop (op0));
1932               struct edge_info *edge_info;
1933 
1934               edge_info = allocate_edge_info (true_edge);
1935               record_conditions (edge_info, cond, inverted);
1936 
1937               if (can_infer_simple_equiv && code == EQ_EXPR)
1938                 {
1939                   edge_info->lhs = op1;
1940                   edge_info->rhs = op0;
1941                 }
1942 
1943               edge_info = allocate_edge_info (false_edge);
1944               record_conditions (edge_info, inverted, cond);
1945 
1946               if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1947                 {
1948                   edge_info->lhs = op1;
1949                   edge_info->rhs = op0;
1950                 }
1951             }
1952 
1953           else if (TREE_CODE (op0) == SSA_NAME
1954                    && (TREE_CODE (op1) == SSA_NAME
1955                        || is_gimple_min_invariant (op1)))
1956             {
1957               tree cond = build2 (code, boolean_type_node, op0, op1);
1958               tree inverted = invert_truthvalue_loc (loc, cond);
1959               bool can_infer_simple_equiv
1960                 = !(HONOR_SIGNED_ZEROS (op1)
1961                     && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1962               struct edge_info *edge_info;
1963 
1964               edge_info = allocate_edge_info (true_edge);
1965               record_conditions (edge_info, cond, inverted);
1966 
1967               if (can_infer_simple_equiv && code == EQ_EXPR)
1968                 {
1969                   edge_info->lhs = op0;
1970                   edge_info->rhs = op1;
1971                 }
1972 
1973               edge_info = allocate_edge_info (false_edge);
1974               record_conditions (edge_info, inverted, cond);
1975 
1976               if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1977                 {
1978                   edge_info->lhs = op0;
1979                   edge_info->rhs = op1;
1980                 }
1981             }
1982         }
1983 
1984       /* ??? TRUTH_NOT_EXPR can create an equivalence too.  */
1985     }
1986 }
1987 
1988 void
1989 dom_opt_dom_walker::before_dom_children (basic_block bb)
1990 {
1991   gimple_stmt_iterator gsi;
1992 
1993   if (dump_file && (dump_flags & TDF_DETAILS))
1994     fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1995 
1996   /* Push a marker on the stacks of local information so that we know how
1997      far to unwind when we finalize this block.  */
1998   avail_exprs_stack.safe_push
1999     (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
2000   const_and_copies_stack.safe_push (NULL_TREE);
2001 
2002   record_equivalences_from_incoming_edge (bb);
2003 
2004   /* PHI nodes can create equivalences too.  */
2005   record_equivalences_from_phis (bb);
2006 
2007   /* Create equivalences from redundant PHIs.  PHIs are only truly
2008      redundant when they exist in the same block, so push another
2009      marker and unwind right afterwards.  */
2010   avail_exprs_stack.safe_push
2011     (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
2012   for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2013     eliminate_redundant_computations (&gsi);
2014   remove_local_expressions_from_table ();
2015 
2016   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2017     optimize_stmt (bb, gsi);
2018 
2019   /* Now prepare to process dominated blocks.  */
2020   record_edge_info (bb);
2021   cprop_into_successor_phis (bb);
2022 }
2023 
2024 /* We have finished processing the dominator children of BB, perform
2025    any finalization actions in preparation for leaving this node in
2026    the dominator tree.  */
2027 
2028 void
2029 dom_opt_dom_walker::after_dom_children (basic_block bb)
2030 {
2031   gimple last;
2032 
2033   /* If we have an outgoing edge to a block with multiple incoming and
2034      outgoing edges, then we may be able to thread the edge, i.e., we
2035      may be able to statically determine which of the outgoing edges
2036      will be traversed when the incoming edge from BB is traversed.  */
2037   if (single_succ_p (bb)
2038       && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
2039       && potentially_threadable_block (single_succ (bb)))
2040     {
2041       thread_across_edge (single_succ_edge (bb));
2042     }
2043   else if ((last = last_stmt (bb))
2044 	   && gimple_code (last) == GIMPLE_COND
2045 	   && EDGE_COUNT (bb->succs) == 2
2046 	   && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
2047 	   && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
2048     {
2049       edge true_edge, false_edge;
2050 
2051       extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
2052 
2053       /* Only try to thread the edge if it reaches a target block with
2054 	 more than one predecessor and more than one successor.  */
2055       if (potentially_threadable_block (true_edge->dest))
2056 	thread_across_edge (true_edge);
2057 
2058       /* Similarly for the ELSE arm.  */
2059       if (potentially_threadable_block (false_edge->dest))
2060 	thread_across_edge (false_edge);
2061 
2062     }
2063 
2064   /* These remove expressions local to BB from the tables.  */
2065   remove_local_expressions_from_table ();
2066   restore_vars_to_original_value ();
2067 }
2068 
2069 /* Search for redundant computations in STMT.  If any are found, then
2070    replace them with the variable holding the result of the computation.
2071 
2072    If safe, record this expression into the available expression hash
2073    table.  */
2074 
2075 static void
2076 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2077 {
2078   tree expr_type;
2079   tree cached_lhs;
2080   tree def;
2081   bool insert = true;
2082   bool assigns_var_p = false;
2083 
2084   gimple stmt = gsi_stmt (*gsi);
2085 
2086   if (gimple_code (stmt) == GIMPLE_PHI)
2087     def = gimple_phi_result (stmt);
2088   else
2089     def = gimple_get_lhs (stmt);
2090 
2091   /* Certain expressions on the RHS can be optimized away, but can not
2092      themselves be entered into the hash tables.  */
2093   if (! def
2094       || TREE_CODE (def) != SSA_NAME
2095       || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2096       || gimple_vdef (stmt)
2097       /* Do not record equivalences for increments of ivs.  This would create
2098 	 overlapping live ranges for a very questionable gain.  */
2099       || simple_iv_increment_p (stmt))
2100     insert = false;
2101 
2102   /* Check if the expression has been computed before.  */
2103   cached_lhs = lookup_avail_expr (stmt, insert);
2104 
2105   opt_stats.num_exprs_considered++;
2106 
2107   /* Get the type of the expression we are trying to optimize.  */
2108   if (is_gimple_assign (stmt))
2109     {
2110       expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2111       assigns_var_p = true;
2112     }
2113   else if (gimple_code (stmt) == GIMPLE_COND)
2114     expr_type = boolean_type_node;
2115   else if (is_gimple_call (stmt))
2116     {
2117       gcc_assert (gimple_call_lhs (stmt));
2118       expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2119       assigns_var_p = true;
2120     }
2121   else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2122     expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt));
2123   else if (gimple_code (stmt) == GIMPLE_PHI)
2124     /* We can't propagate into a phi, so the logic below doesn't apply.
2125        Instead record an equivalence between the cached LHS and the
2126        PHI result of this statement, provided they are in the same block.
2127        This should be sufficient to kill the redundant phi.  */
2128     {
2129       if (def && cached_lhs)
2130 	record_const_or_copy (def, cached_lhs);
2131       return;
2132     }
2133   else
2134     gcc_unreachable ();
2135 
2136   if (!cached_lhs)
2137     return;
2138 
2139   /* It is safe to ignore types here since we have already done
2140      type checking in the hashing and equality routines.  In fact
2141      type checking here merely gets in the way of constant
2142      propagation.  Also, make sure that it is safe to propagate
2143      CACHED_LHS into the expression in STMT.  */
2144   if ((TREE_CODE (cached_lhs) != SSA_NAME
2145        && (assigns_var_p
2146            || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2147       || may_propagate_copy_into_stmt (stmt, cached_lhs))
2148   {
2149       gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2150 			   || is_gimple_min_invariant (cached_lhs));
2151 
2152       if (dump_file && (dump_flags & TDF_DETAILS))
2153 	{
2154 	  fprintf (dump_file, "  Replaced redundant expr '");
2155 	  print_gimple_expr (dump_file, stmt, 0, dump_flags);
2156 	  fprintf (dump_file, "' with '");
2157 	  print_generic_expr (dump_file, cached_lhs, dump_flags);
2158           fprintf (dump_file, "'\n");
2159 	}
2160 
2161       opt_stats.num_re++;
2162 
2163       if (assigns_var_p
2164 	  && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2165 	cached_lhs = fold_convert (expr_type, cached_lhs);
2166 
2167       propagate_tree_value_into_stmt (gsi, cached_lhs);
2168 
2169       /* Since it is always necessary to mark the result as modified,
2170          perhaps we should move this into propagate_tree_value_into_stmt
2171          itself.  */
2172       gimple_set_modified (gsi_stmt (*gsi), true);
2173   }
2174 }
2175 
2176 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2177    the available expressions table or the const_and_copies table.
2178    Detect and record those equivalences.  */
2179 /* We handle only very simple copy equivalences here.  The heavy
2180    lifing is done by eliminate_redundant_computations.  */
2181 
2182 static void
2183 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2184 {
2185   tree lhs;
2186   enum tree_code lhs_code;
2187 
2188   gcc_assert (is_gimple_assign (stmt));
2189 
2190   lhs = gimple_assign_lhs (stmt);
2191   lhs_code = TREE_CODE (lhs);
2192 
2193   if (lhs_code == SSA_NAME
2194       && gimple_assign_single_p (stmt))
2195     {
2196       tree rhs = gimple_assign_rhs1 (stmt);
2197 
2198       /* If the RHS of the assignment is a constant or another variable that
2199 	 may be propagated, register it in the CONST_AND_COPIES table.  We
2200 	 do not need to record unwind data for this, since this is a true
2201 	 assignment and not an equivalence inferred from a comparison.  All
2202 	 uses of this ssa name are dominated by this assignment, so unwinding
2203 	 just costs time and space.  */
2204       if (may_optimize_p
2205 	  && (TREE_CODE (rhs) == SSA_NAME
2206 	      || is_gimple_min_invariant (rhs)))
2207       {
2208 	if (dump_file && (dump_flags & TDF_DETAILS))
2209 	  {
2210 	    fprintf (dump_file, "==== ASGN ");
2211 	    print_generic_expr (dump_file, lhs, 0);
2212 	    fprintf (dump_file, " = ");
2213 	    print_generic_expr (dump_file, rhs, 0);
2214 	    fprintf (dump_file, "\n");
2215 	  }
2216 
2217 	set_ssa_name_value (lhs, rhs);
2218       }
2219     }
2220 
2221   /* Make sure we can propagate &x + CST.  */
2222   if (lhs_code == SSA_NAME
2223       && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR
2224       && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR
2225       && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST)
2226     {
2227       tree op0 = gimple_assign_rhs1 (stmt);
2228       tree op1 = gimple_assign_rhs2 (stmt);
2229       tree new_rhs
2230 	= build_fold_addr_expr (fold_build2 (MEM_REF,
2231 					     TREE_TYPE (TREE_TYPE (op0)),
2232 					     unshare_expr (op0),
2233 					     fold_convert (ptr_type_node,
2234 							   op1)));
2235       if (dump_file && (dump_flags & TDF_DETAILS))
2236 	{
2237 	  fprintf (dump_file, "==== ASGN ");
2238 	  print_generic_expr (dump_file, lhs, 0);
2239 	  fprintf (dump_file, " = ");
2240 	  print_generic_expr (dump_file, new_rhs, 0);
2241 	  fprintf (dump_file, "\n");
2242 	}
2243 
2244       set_ssa_name_value (lhs, new_rhs);
2245     }
2246 
2247   /* A memory store, even an aliased store, creates a useful
2248      equivalence.  By exchanging the LHS and RHS, creating suitable
2249      vops and recording the result in the available expression table,
2250      we may be able to expose more redundant loads.  */
2251   if (!gimple_has_volatile_ops (stmt)
2252       && gimple_references_memory_p (stmt)
2253       && gimple_assign_single_p (stmt)
2254       && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2255 	  || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2256       && !is_gimple_reg (lhs))
2257     {
2258       tree rhs = gimple_assign_rhs1 (stmt);
2259       gassign *new_stmt;
2260 
2261       /* Build a new statement with the RHS and LHS exchanged.  */
2262       if (TREE_CODE (rhs) == SSA_NAME)
2263         {
2264           /* NOTE tuples.  The call to gimple_build_assign below replaced
2265              a call to build_gimple_modify_stmt, which did not set the
2266              SSA_NAME_DEF_STMT on the LHS of the assignment.  Doing so
2267              may cause an SSA validation failure, as the LHS may be a
2268              default-initialized name and should have no definition.  I'm
2269              a bit dubious of this, as the artificial statement that we
2270              generate here may in fact be ill-formed, but it is simply
2271              used as an internal device in this pass, and never becomes
2272              part of the CFG.  */
2273           gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2274           new_stmt = gimple_build_assign (rhs, lhs);
2275           SSA_NAME_DEF_STMT (rhs) = defstmt;
2276         }
2277       else
2278         new_stmt = gimple_build_assign (rhs, lhs);
2279 
2280       gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2281 
2282       /* Finally enter the statement into the available expression
2283 	 table.  */
2284       lookup_avail_expr (new_stmt, true);
2285     }
2286 }
2287 
2288 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2289    CONST_AND_COPIES.  */
2290 
2291 static void
2292 cprop_operand (gimple stmt, use_operand_p op_p)
2293 {
2294   tree val;
2295   tree op = USE_FROM_PTR (op_p);
2296 
2297   /* If the operand has a known constant value or it is known to be a
2298      copy of some other variable, use the value or copy stored in
2299      CONST_AND_COPIES.  */
2300   val = SSA_NAME_VALUE (op);
2301   if (val && val != op)
2302     {
2303       /* Do not replace hard register operands in asm statements.  */
2304       if (gimple_code (stmt) == GIMPLE_ASM
2305 	  && !may_propagate_copy_into_asm (op))
2306 	return;
2307 
2308       /* Certain operands are not allowed to be copy propagated due
2309 	 to their interaction with exception handling and some GCC
2310 	 extensions.  */
2311       if (!may_propagate_copy (op, val))
2312 	return;
2313 
2314       /* Do not propagate copies into BIVs.
2315          See PR23821 and PR62217 for how this can disturb IV and
2316 	 number of iteration analysis.  */
2317       if (TREE_CODE (val) != INTEGER_CST)
2318 	{
2319 	  gimple def = SSA_NAME_DEF_STMT (op);
2320 	  if (gimple_code (def) == GIMPLE_PHI
2321 	      && gimple_bb (def)->loop_father->header == gimple_bb (def))
2322 	    return;
2323 	}
2324 
2325       /* Dump details.  */
2326       if (dump_file && (dump_flags & TDF_DETAILS))
2327 	{
2328 	  fprintf (dump_file, "  Replaced '");
2329 	  print_generic_expr (dump_file, op, dump_flags);
2330 	  fprintf (dump_file, "' with %s '",
2331 		   (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2332 	  print_generic_expr (dump_file, val, dump_flags);
2333 	  fprintf (dump_file, "'\n");
2334 	}
2335 
2336       if (TREE_CODE (val) != SSA_NAME)
2337 	opt_stats.num_const_prop++;
2338       else
2339 	opt_stats.num_copy_prop++;
2340 
2341       propagate_value (op_p, val);
2342 
2343       /* And note that we modified this statement.  This is now
2344 	 safe, even if we changed virtual operands since we will
2345 	 rescan the statement and rewrite its operands again.  */
2346       gimple_set_modified (stmt, true);
2347     }
2348 }
2349 
2350 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2351    known value for that SSA_NAME (or NULL if no value is known).
2352 
2353    Propagate values from CONST_AND_COPIES into the uses, vuses and
2354    vdef_ops of STMT.  */
2355 
2356 static void
2357 cprop_into_stmt (gimple stmt)
2358 {
2359   use_operand_p op_p;
2360   ssa_op_iter iter;
2361 
2362   FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2363     cprop_operand (stmt, op_p);
2364 }
2365 
2366 /* Optimize the statement pointed to by iterator SI.
2367 
2368    We try to perform some simplistic global redundancy elimination and
2369    constant propagation:
2370 
2371    1- To detect global redundancy, we keep track of expressions that have
2372       been computed in this block and its dominators.  If we find that the
2373       same expression is computed more than once, we eliminate repeated
2374       computations by using the target of the first one.
2375 
2376    2- Constant values and copy assignments.  This is used to do very
2377       simplistic constant and copy propagation.  When a constant or copy
2378       assignment is found, we map the value on the RHS of the assignment to
2379       the variable in the LHS in the CONST_AND_COPIES table.  */
2380 
2381 static void
2382 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2383 {
2384   gimple stmt, old_stmt;
2385   bool may_optimize_p;
2386   bool modified_p = false;
2387   bool was_noreturn;
2388 
2389   old_stmt = stmt = gsi_stmt (si);
2390   was_noreturn = is_gimple_call (stmt) && gimple_call_noreturn_p (stmt);
2391 
2392   if (dump_file && (dump_flags & TDF_DETAILS))
2393     {
2394       fprintf (dump_file, "Optimizing statement ");
2395       print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2396     }
2397 
2398   if (gimple_code (stmt) == GIMPLE_COND)
2399     canonicalize_comparison (as_a <gcond *> (stmt));
2400 
2401   update_stmt_if_modified (stmt);
2402   opt_stats.num_stmts++;
2403 
2404   /* Const/copy propagate into USES, VUSES and the RHS of VDEFs.  */
2405   cprop_into_stmt (stmt);
2406 
2407   /* If the statement has been modified with constant replacements,
2408      fold its RHS before checking for redundant computations.  */
2409   if (gimple_modified_p (stmt))
2410     {
2411       tree rhs = NULL;
2412 
2413       /* Try to fold the statement making sure that STMT is kept
2414 	 up to date.  */
2415       if (fold_stmt (&si))
2416 	{
2417 	  stmt = gsi_stmt (si);
2418 	  gimple_set_modified (stmt, true);
2419 
2420 	  if (dump_file && (dump_flags & TDF_DETAILS))
2421 	    {
2422 	      fprintf (dump_file, "  Folded to: ");
2423 	      print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2424 	    }
2425 	}
2426 
2427       /* We only need to consider cases that can yield a gimple operand.  */
2428       if (gimple_assign_single_p (stmt))
2429         rhs = gimple_assign_rhs1 (stmt);
2430       else if (gimple_code (stmt) == GIMPLE_GOTO)
2431         rhs = gimple_goto_dest (stmt);
2432       else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2433         /* This should never be an ADDR_EXPR.  */
2434         rhs = gimple_switch_index (swtch_stmt);
2435 
2436       if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2437         recompute_tree_invariant_for_addr_expr (rhs);
2438 
2439       /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2440 	 even if fold_stmt updated the stmt already and thus cleared
2441 	 gimple_modified_p flag on it.  */
2442       modified_p = true;
2443     }
2444 
2445   /* Check for redundant computations.  Do this optimization only
2446      for assignments that have no volatile ops and conditionals.  */
2447   may_optimize_p = (!gimple_has_side_effects (stmt)
2448                     && (is_gimple_assign (stmt)
2449                         || (is_gimple_call (stmt)
2450                             && gimple_call_lhs (stmt) != NULL_TREE)
2451                         || gimple_code (stmt) == GIMPLE_COND
2452                         || gimple_code (stmt) == GIMPLE_SWITCH));
2453 
2454   if (may_optimize_p)
2455     {
2456       if (gimple_code (stmt) == GIMPLE_CALL)
2457 	{
2458 	  /* Resolve __builtin_constant_p.  If it hasn't been
2459 	     folded to integer_one_node by now, it's fairly
2460 	     certain that the value simply isn't constant.  */
2461 	  tree callee = gimple_call_fndecl (stmt);
2462 	  if (callee
2463 	      && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2464 	      && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2465 	    {
2466 	      propagate_tree_value_into_stmt (&si, integer_zero_node);
2467 	      stmt = gsi_stmt (si);
2468 	    }
2469 	}
2470 
2471       update_stmt_if_modified (stmt);
2472       eliminate_redundant_computations (&si);
2473       stmt = gsi_stmt (si);
2474 
2475       /* Perform simple redundant store elimination.  */
2476       if (gimple_assign_single_p (stmt)
2477 	  && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2478 	{
2479 	  tree lhs = gimple_assign_lhs (stmt);
2480 	  tree rhs = gimple_assign_rhs1 (stmt);
2481 	  tree cached_lhs;
2482 	  gassign *new_stmt;
2483 	  if (TREE_CODE (rhs) == SSA_NAME)
2484 	    {
2485 	      tree tem = SSA_NAME_VALUE (rhs);
2486 	      if (tem)
2487 		rhs = tem;
2488 	    }
2489 	  /* Build a new statement with the RHS and LHS exchanged.  */
2490 	  if (TREE_CODE (rhs) == SSA_NAME)
2491 	    {
2492 	      gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2493 	      new_stmt = gimple_build_assign (rhs, lhs);
2494 	      SSA_NAME_DEF_STMT (rhs) = defstmt;
2495 	    }
2496 	  else
2497 	    new_stmt = gimple_build_assign (rhs, lhs);
2498 	  gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2499 	  cached_lhs = lookup_avail_expr (new_stmt, false, false);
2500 	  if (cached_lhs
2501 	      && rhs == cached_lhs)
2502 	    {
2503 	      basic_block bb = gimple_bb (stmt);
2504 	      unlink_stmt_vdef (stmt);
2505 	      if (gsi_remove (&si, true))
2506 		{
2507 		  bitmap_set_bit (need_eh_cleanup, bb->index);
2508 		  if (dump_file && (dump_flags & TDF_DETAILS))
2509 		    fprintf (dump_file, "  Flagged to clear EH edges.\n");
2510 		}
2511 	      release_defs (stmt);
2512 	      return;
2513 	    }
2514 	}
2515     }
2516 
2517   /* Record any additional equivalences created by this statement.  */
2518   if (is_gimple_assign (stmt))
2519     record_equivalences_from_stmt (stmt, may_optimize_p);
2520 
2521   /* If STMT is a COND_EXPR and it was modified, then we may know
2522      where it goes.  If that is the case, then mark the CFG as altered.
2523 
2524      This will cause us to later call remove_unreachable_blocks and
2525      cleanup_tree_cfg when it is safe to do so.  It is not safe to
2526      clean things up here since removal of edges and such can trigger
2527      the removal of PHI nodes, which in turn can release SSA_NAMEs to
2528      the manager.
2529 
2530      That's all fine and good, except that once SSA_NAMEs are released
2531      to the manager, we must not call create_ssa_name until all references
2532      to released SSA_NAMEs have been eliminated.
2533 
2534      All references to the deleted SSA_NAMEs can not be eliminated until
2535      we remove unreachable blocks.
2536 
2537      We can not remove unreachable blocks until after we have completed
2538      any queued jump threading.
2539 
2540      We can not complete any queued jump threads until we have taken
2541      appropriate variables out of SSA form.  Taking variables out of
2542      SSA form can call create_ssa_name and thus we lose.
2543 
2544      Ultimately I suspect we're going to need to change the interface
2545      into the SSA_NAME manager.  */
2546   if (gimple_modified_p (stmt) || modified_p)
2547     {
2548       tree val = NULL;
2549 
2550       update_stmt_if_modified (stmt);
2551 
2552       if (gimple_code (stmt) == GIMPLE_COND)
2553         val = fold_binary_loc (gimple_location (stmt),
2554 			   gimple_cond_code (stmt), boolean_type_node,
2555                            gimple_cond_lhs (stmt),  gimple_cond_rhs (stmt));
2556       else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2557 	val = gimple_switch_index (swtch_stmt);
2558 
2559       if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2560 	cfg_altered = true;
2561 
2562       /* If we simplified a statement in such a way as to be shown that it
2563 	 cannot trap, update the eh information and the cfg to match.  */
2564       if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2565 	{
2566 	  bitmap_set_bit (need_eh_cleanup, bb->index);
2567 	  if (dump_file && (dump_flags & TDF_DETAILS))
2568 	    fprintf (dump_file, "  Flagged to clear EH edges.\n");
2569 	}
2570 
2571       if (!was_noreturn
2572 	  && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt))
2573 	need_noreturn_fixup.safe_push (stmt);
2574     }
2575 }
2576 
2577 /* Helper for walk_non_aliased_vuses.  Determine if we arrived at
2578    the desired memory state.  */
2579 
2580 static void *
2581 vuse_eq (ao_ref *, tree vuse1, unsigned int cnt, void *data)
2582 {
2583   tree vuse2 = (tree) data;
2584   if (vuse1 == vuse2)
2585     return data;
2586 
2587   /* This bounds the stmt walks we perform on reference lookups
2588      to O(1) instead of O(N) where N is the number of dominating
2589      stores leading to a candidate.  We re-use the SCCVN param
2590      for this as it is basically the same complexity.  */
2591   if (cnt > (unsigned) PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS))
2592     return (void *)-1;
2593 
2594   return NULL;
2595 }
2596 
2597 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2598    If found, return its LHS. Otherwise insert STMT in the table and
2599    return NULL_TREE.
2600 
2601    Also, when an expression is first inserted in the  table, it is also
2602    is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2603    we finish processing this block and its children.  */
2604 
2605 static tree
2606 lookup_avail_expr (gimple stmt, bool insert, bool tbaa_p)
2607 {
2608   expr_hash_elt **slot;
2609   tree lhs;
2610   tree temp;
2611   struct expr_hash_elt element;
2612 
2613   /* Get LHS of phi, assignment, or call; else NULL_TREE.  */
2614   if (gimple_code (stmt) == GIMPLE_PHI)
2615     lhs = gimple_phi_result (stmt);
2616   else
2617     lhs = gimple_get_lhs (stmt);
2618 
2619   initialize_hash_element (stmt, lhs, &element);
2620 
2621   if (dump_file && (dump_flags & TDF_DETAILS))
2622     {
2623       fprintf (dump_file, "LKUP ");
2624       print_expr_hash_elt (dump_file, &element);
2625     }
2626 
2627   /* Don't bother remembering constant assignments and copy operations.
2628      Constants and copy operations are handled by the constant/copy propagator
2629      in optimize_stmt.  */
2630   if (element.expr.kind == EXPR_SINGLE
2631       && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2632           || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2633     return NULL_TREE;
2634 
2635   /* Finally try to find the expression in the main expression hash table.  */
2636   slot = avail_exprs->find_slot (&element, (insert ? INSERT : NO_INSERT));
2637   if (slot == NULL)
2638     {
2639       free_expr_hash_elt_contents (&element);
2640       return NULL_TREE;
2641     }
2642   else if (*slot == NULL)
2643     {
2644       struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2645       *element2 = element;
2646       element2->stamp = element2;
2647       *slot = element2;
2648 
2649       if (dump_file && (dump_flags & TDF_DETAILS))
2650         {
2651           fprintf (dump_file, "2>>> ");
2652           print_expr_hash_elt (dump_file, element2);
2653         }
2654 
2655       avail_exprs_stack.safe_push
2656 	(std::pair<expr_hash_elt_t, expr_hash_elt_t> (element2, NULL));
2657       return NULL_TREE;
2658     }
2659 
2660   /* If we found a redundant memory operation do an alias walk to
2661      check if we can re-use it.  */
2662   if (gimple_vuse (stmt) != (*slot)->vop)
2663     {
2664       tree vuse1 = (*slot)->vop;
2665       tree vuse2 = gimple_vuse (stmt);
2666       /* If we have a load of a register and a candidate in the
2667 	 hash with vuse1 then try to reach its stmt by walking
2668 	 up the virtual use-def chain using walk_non_aliased_vuses.
2669 	 But don't do this when removing expressions from the hash.  */
2670       ao_ref ref;
2671       if (!(vuse1 && vuse2
2672 	    && gimple_assign_single_p (stmt)
2673 	    && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
2674 	    && (ao_ref_init (&ref, gimple_assign_rhs1 (stmt)),
2675 		ref.base_alias_set = ref.ref_alias_set = tbaa_p ? -1 : 0, true)
2676 	    && walk_non_aliased_vuses (&ref, vuse2,
2677 				       vuse_eq, NULL, NULL, vuse1) != NULL))
2678 	{
2679 	  if (insert)
2680 	    {
2681 	      struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2682 	      *element2 = element;
2683 	      element2->stamp = element2;
2684 
2685 	      /* Insert the expr into the hash by replacing the current
2686 		 entry and recording the value to restore in the
2687 		 avail_exprs_stack.  */
2688 	      avail_exprs_stack.safe_push (std::make_pair (element2, *slot));
2689 	      *slot = element2;
2690 	      if (dump_file && (dump_flags & TDF_DETAILS))
2691 		{
2692 		  fprintf (dump_file, "2>>> ");
2693 		  print_expr_hash_elt (dump_file, *slot);
2694 		}
2695 	    }
2696 	  return NULL_TREE;
2697 	}
2698     }
2699 
2700   free_expr_hash_elt_contents (&element);
2701 
2702   /* Extract the LHS of the assignment so that it can be used as the current
2703      definition of another variable.  */
2704   lhs = (*slot)->lhs;
2705 
2706   /* See if the LHS appears in the CONST_AND_COPIES table.  If it does, then
2707      use the value from the const_and_copies table.  */
2708   if (TREE_CODE (lhs) == SSA_NAME)
2709     {
2710       temp = SSA_NAME_VALUE (lhs);
2711       if (temp)
2712 	lhs = temp;
2713     }
2714 
2715   if (dump_file && (dump_flags & TDF_DETAILS))
2716     {
2717       fprintf (dump_file, "FIND: ");
2718       print_generic_expr (dump_file, lhs, 0);
2719       fprintf (dump_file, "\n");
2720     }
2721 
2722   return lhs;
2723 }
2724 
2725 /* Hashing and equality functions for AVAIL_EXPRS.  We compute a value number
2726    for expressions using the code of the expression and the SSA numbers of
2727    its operands.  */
2728 
2729 static hashval_t
2730 avail_expr_hash (const void *p)
2731 {
2732   const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2733   inchash::hash hstate;
2734 
2735   inchash::add_hashable_expr (expr, hstate);
2736 
2737   return hstate.end ();
2738 }
2739 
2740 /* PHI-ONLY copy and constant propagation.  This pass is meant to clean
2741    up degenerate PHIs created by or exposed by jump threading.  */
2742 
2743 /* Given a statement STMT, which is either a PHI node or an assignment,
2744    remove it from the IL.  */
2745 
2746 static void
2747 remove_stmt_or_phi (gimple stmt)
2748 {
2749   gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2750 
2751   if (gimple_code (stmt) == GIMPLE_PHI)
2752     remove_phi_node (&gsi, true);
2753   else
2754     {
2755       gsi_remove (&gsi, true);
2756       release_defs (stmt);
2757     }
2758 }
2759 
2760 /* Given a statement STMT, which is either a PHI node or an assignment,
2761    return the "rhs" of the node, in the case of a non-degenerate
2762    phi, NULL is returned.  */
2763 
2764 static tree
2765 get_rhs_or_phi_arg (gimple stmt)
2766 {
2767   if (gimple_code (stmt) == GIMPLE_PHI)
2768     return degenerate_phi_result (as_a <gphi *> (stmt));
2769   else if (gimple_assign_single_p (stmt))
2770     return gimple_assign_rhs1 (stmt);
2771   else
2772     gcc_unreachable ();
2773 }
2774 
2775 
2776 /* Given a statement STMT, which is either a PHI node or an assignment,
2777    return the "lhs" of the node.  */
2778 
2779 static tree
2780 get_lhs_or_phi_result (gimple stmt)
2781 {
2782   if (gimple_code (stmt) == GIMPLE_PHI)
2783     return gimple_phi_result (stmt);
2784   else if (is_gimple_assign (stmt))
2785     return gimple_assign_lhs (stmt);
2786   else
2787     gcc_unreachable ();
2788 }
2789 
2790 /* Propagate RHS into all uses of LHS (when possible).
2791 
2792    RHS and LHS are derived from STMT, which is passed in solely so
2793    that we can remove it if propagation is successful.
2794 
2795    When propagating into a PHI node or into a statement which turns
2796    into a trivial copy or constant initialization, set the
2797    appropriate bit in INTERESTING_NAMEs so that we will visit those
2798    nodes as well in an effort to pick up secondary optimization
2799    opportunities.  */
2800 
2801 static void
2802 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2803 {
2804   /* First verify that propagation is valid.  */
2805   if (may_propagate_copy (lhs, rhs))
2806     {
2807       use_operand_p use_p;
2808       imm_use_iterator iter;
2809       gimple use_stmt;
2810       bool all = true;
2811 
2812       /* Dump details.  */
2813       if (dump_file && (dump_flags & TDF_DETAILS))
2814 	{
2815 	  fprintf (dump_file, "  Replacing '");
2816 	  print_generic_expr (dump_file, lhs, dump_flags);
2817 	  fprintf (dump_file, "' with %s '",
2818 	           (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2819 		   print_generic_expr (dump_file, rhs, dump_flags);
2820 	  fprintf (dump_file, "'\n");
2821 	}
2822 
2823       /* Walk over every use of LHS and try to replace the use with RHS.
2824 	 At this point the only reason why such a propagation would not
2825 	 be successful would be if the use occurs in an ASM_EXPR.  */
2826       FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2827 	{
2828 	  /* Leave debug stmts alone.  If we succeed in propagating
2829 	     all non-debug uses, we'll drop the DEF, and propagation
2830 	     into debug stmts will occur then.  */
2831 	  if (gimple_debug_bind_p (use_stmt))
2832 	    continue;
2833 
2834 	  /* It's not always safe to propagate into an ASM_EXPR.  */
2835 	  if (gimple_code (use_stmt) == GIMPLE_ASM
2836               && ! may_propagate_copy_into_asm (lhs))
2837 	    {
2838 	      all = false;
2839 	      continue;
2840 	    }
2841 
2842 	  /* It's not ok to propagate into the definition stmt of RHS.
2843 		<bb 9>:
2844 		  # prephitmp.12_36 = PHI <g_67.1_6(9)>
2845 		  g_67.1_6 = prephitmp.12_36;
2846 		  goto <bb 9>;
2847 	     While this is strictly all dead code we do not want to
2848 	     deal with this here.  */
2849 	  if (TREE_CODE (rhs) == SSA_NAME
2850 	      && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2851 	    {
2852 	      all = false;
2853 	      continue;
2854 	    }
2855 
2856 	  /* Dump details.  */
2857 	  if (dump_file && (dump_flags & TDF_DETAILS))
2858 	    {
2859 	      fprintf (dump_file, "    Original statement:");
2860 	      print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2861 	    }
2862 
2863 	  /* Propagate the RHS into this use of the LHS.  */
2864 	  FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2865 	    propagate_value (use_p, rhs);
2866 
2867 	  /* Special cases to avoid useless calls into the folding
2868 	     routines, operand scanning, etc.
2869 
2870 	     Propagation into a PHI may cause the PHI to become
2871 	     a degenerate, so mark the PHI as interesting.  No other
2872 	     actions are necessary.  */
2873 	  if (gimple_code (use_stmt) == GIMPLE_PHI)
2874 	    {
2875 	      tree result;
2876 
2877 	      /* Dump details.  */
2878 	      if (dump_file && (dump_flags & TDF_DETAILS))
2879 		{
2880 		  fprintf (dump_file, "    Updated statement:");
2881 		  print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2882 		}
2883 
2884 	      result = get_lhs_or_phi_result (use_stmt);
2885 	      bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2886 	      continue;
2887 	    }
2888 
2889 	  /* From this point onward we are propagating into a
2890 	     real statement.  Folding may (or may not) be possible,
2891 	     we may expose new operands, expose dead EH edges,
2892 	     etc.  */
2893           /* NOTE tuples. In the tuples world, fold_stmt_inplace
2894              cannot fold a call that simplifies to a constant,
2895              because the GIMPLE_CALL must be replaced by a
2896              GIMPLE_ASSIGN, and there is no way to effect such a
2897              transformation in-place.  We might want to consider
2898              using the more general fold_stmt here.  */
2899 	    {
2900 	      gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2901 	      fold_stmt_inplace (&gsi);
2902 	    }
2903 
2904 	  /* Sometimes propagation can expose new operands to the
2905 	     renamer.  */
2906 	  update_stmt (use_stmt);
2907 
2908 	  /* Dump details.  */
2909 	  if (dump_file && (dump_flags & TDF_DETAILS))
2910 	    {
2911 	      fprintf (dump_file, "    Updated statement:");
2912 	      print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2913 	    }
2914 
2915 	  /* If we replaced a variable index with a constant, then
2916 	     we would need to update the invariant flag for ADDR_EXPRs.  */
2917           if (gimple_assign_single_p (use_stmt)
2918               && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2919 	    recompute_tree_invariant_for_addr_expr
2920                 (gimple_assign_rhs1 (use_stmt));
2921 
2922 	  /* If we cleaned up EH information from the statement,
2923 	     mark its containing block as needing EH cleanups.  */
2924 	  if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2925 	    {
2926 	      bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2927 	      if (dump_file && (dump_flags & TDF_DETAILS))
2928 		fprintf (dump_file, "  Flagged to clear EH edges.\n");
2929 	    }
2930 
2931 	  /* Propagation may expose new trivial copy/constant propagation
2932 	     opportunities.  */
2933           if (gimple_assign_single_p (use_stmt)
2934               && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2935               && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2936                   || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2937             {
2938 	      tree result = get_lhs_or_phi_result (use_stmt);
2939 	      bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2940 	    }
2941 
2942 	  /* Propagation into these nodes may make certain edges in
2943 	     the CFG unexecutable.  We want to identify them as PHI nodes
2944 	     at the destination of those unexecutable edges may become
2945 	     degenerates.  */
2946 	  else if (gimple_code (use_stmt) == GIMPLE_COND
2947 		   || gimple_code (use_stmt) == GIMPLE_SWITCH
2948 		   || gimple_code (use_stmt) == GIMPLE_GOTO)
2949             {
2950 	      tree val;
2951 
2952 	      if (gimple_code (use_stmt) == GIMPLE_COND)
2953                 val = fold_binary_loc (gimple_location (use_stmt),
2954 				   gimple_cond_code (use_stmt),
2955                                    boolean_type_node,
2956                                    gimple_cond_lhs (use_stmt),
2957                                    gimple_cond_rhs (use_stmt));
2958               else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2959 		val = gimple_switch_index (as_a <gswitch *> (use_stmt));
2960 	      else
2961 		val = gimple_goto_dest  (use_stmt);
2962 
2963 	      if (val && is_gimple_min_invariant (val))
2964 		{
2965 		  basic_block bb = gimple_bb (use_stmt);
2966 		  edge te = find_taken_edge (bb, val);
2967 		  if (!te)
2968 		    continue;
2969 
2970 		  edge_iterator ei;
2971 		  edge e;
2972 		  gimple_stmt_iterator gsi;
2973 		  gphi_iterator psi;
2974 
2975 		  /* Remove all outgoing edges except TE.  */
2976 		  for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2977 		    {
2978 		      if (e != te)
2979 			{
2980 			  /* Mark all the PHI nodes at the destination of
2981 			     the unexecutable edge as interesting.  */
2982                           for (psi = gsi_start_phis (e->dest);
2983                                !gsi_end_p (psi);
2984                                gsi_next (&psi))
2985                             {
2986                               gphi *phi = psi.phi ();
2987 
2988 			      tree result = gimple_phi_result (phi);
2989 			      int version = SSA_NAME_VERSION (result);
2990 
2991 			      bitmap_set_bit (interesting_names, version);
2992 			    }
2993 
2994 			  te->probability += e->probability;
2995 
2996 			  te->count += e->count;
2997 			  remove_edge (e);
2998 			  cfg_altered = true;
2999 			}
3000 		      else
3001 			ei_next (&ei);
3002 		    }
3003 
3004 		  gsi = gsi_last_bb (gimple_bb (use_stmt));
3005 		  gsi_remove (&gsi, true);
3006 
3007 		  /* And fixup the flags on the single remaining edge.  */
3008 		  te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
3009 		  te->flags &= ~EDGE_ABNORMAL;
3010 		  te->flags |= EDGE_FALLTHRU;
3011 		  if (te->probability > REG_BR_PROB_BASE)
3012 		    te->probability = REG_BR_PROB_BASE;
3013 	        }
3014 	    }
3015 	}
3016 
3017       /* Ensure there is nothing else to do. */
3018       gcc_assert (!all || has_zero_uses (lhs));
3019 
3020       /* If we were able to propagate away all uses of LHS, then
3021 	 we can remove STMT.  */
3022       if (all)
3023 	remove_stmt_or_phi (stmt);
3024     }
3025 }
3026 
3027 /* STMT is either a PHI node (potentially a degenerate PHI node) or
3028    a statement that is a trivial copy or constant initialization.
3029 
3030    Attempt to eliminate T by propagating its RHS into all uses of
3031    its LHS.  This may in turn set new bits in INTERESTING_NAMES
3032    for nodes we want to revisit later.
3033 
3034    All exit paths should clear INTERESTING_NAMES for the result
3035    of STMT.  */
3036 
3037 static void
3038 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
3039 {
3040   tree lhs = get_lhs_or_phi_result (stmt);
3041   tree rhs;
3042   int version = SSA_NAME_VERSION (lhs);
3043 
3044   /* If the LHS of this statement or PHI has no uses, then we can
3045      just eliminate it.  This can occur if, for example, the PHI
3046      was created by block duplication due to threading and its only
3047      use was in the conditional at the end of the block which was
3048      deleted.  */
3049   if (has_zero_uses (lhs))
3050     {
3051       bitmap_clear_bit (interesting_names, version);
3052       remove_stmt_or_phi (stmt);
3053       return;
3054     }
3055 
3056   /* Get the RHS of the assignment or PHI node if the PHI is a
3057      degenerate.  */
3058   rhs = get_rhs_or_phi_arg (stmt);
3059   if (!rhs)
3060     {
3061       bitmap_clear_bit (interesting_names, version);
3062       return;
3063     }
3064 
3065   if (!virtual_operand_p (lhs))
3066     propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
3067   else
3068     {
3069       gimple use_stmt;
3070       imm_use_iterator iter;
3071       use_operand_p use_p;
3072       /* For virtual operands we have to propagate into all uses as
3073          otherwise we will create overlapping life-ranges.  */
3074       FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3075 	FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3076 	  SET_USE (use_p, rhs);
3077       if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
3078 	SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
3079       remove_stmt_or_phi (stmt);
3080     }
3081 
3082   /* Note that STMT may well have been deleted by now, so do
3083      not access it, instead use the saved version # to clear
3084      T's entry in the worklist.  */
3085   bitmap_clear_bit (interesting_names, version);
3086 }
3087 
3088 /* The first phase in degenerate PHI elimination.
3089 
3090    Eliminate the degenerate PHIs in BB, then recurse on the
3091    dominator children of BB.  */
3092 
3093 static void
3094 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
3095 {
3096   gphi_iterator gsi;
3097   basic_block son;
3098 
3099   for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3100     {
3101       gphi *phi = gsi.phi ();
3102 
3103       eliminate_const_or_copy (phi, interesting_names);
3104     }
3105 
3106   /* Recurse into the dominator children of BB.  */
3107   for (son = first_dom_son (CDI_DOMINATORS, bb);
3108        son;
3109        son = next_dom_son (CDI_DOMINATORS, son))
3110     eliminate_degenerate_phis_1 (son, interesting_names);
3111 }
3112 
3113 
3114 /* A very simple pass to eliminate degenerate PHI nodes from the
3115    IL.  This is meant to be fast enough to be able to be run several
3116    times in the optimization pipeline.
3117 
3118    Certain optimizations, particularly those which duplicate blocks
3119    or remove edges from the CFG can create or expose PHIs which are
3120    trivial copies or constant initializations.
3121 
3122    While we could pick up these optimizations in DOM or with the
3123    combination of copy-prop and CCP, those solutions are far too
3124    heavy-weight for our needs.
3125 
3126    This implementation has two phases so that we can efficiently
3127    eliminate the first order degenerate PHIs and second order
3128    degenerate PHIs.
3129 
3130    The first phase performs a dominator walk to identify and eliminate
3131    the vast majority of the degenerate PHIs.  When a degenerate PHI
3132    is identified and eliminated any affected statements or PHIs
3133    are put on a worklist.
3134 
3135    The second phase eliminates degenerate PHIs and trivial copies
3136    or constant initializations using the worklist.  This is how we
3137    pick up the secondary optimization opportunities with minimal
3138    cost.  */
3139 
3140 namespace {
3141 
3142 const pass_data pass_data_phi_only_cprop =
3143 {
3144   GIMPLE_PASS, /* type */
3145   "phicprop", /* name */
3146   OPTGROUP_NONE, /* optinfo_flags */
3147   TV_TREE_PHI_CPROP, /* tv_id */
3148   ( PROP_cfg | PROP_ssa ), /* properties_required */
3149   0, /* properties_provided */
3150   0, /* properties_destroyed */
3151   0, /* todo_flags_start */
3152   ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
3153 };
3154 
3155 class pass_phi_only_cprop : public gimple_opt_pass
3156 {
3157 public:
3158   pass_phi_only_cprop (gcc::context *ctxt)
3159     : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3160   {}
3161 
3162   /* opt_pass methods: */
3163   opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3164   virtual bool gate (function *) { return flag_tree_dom != 0; }
3165   virtual unsigned int execute (function *);
3166 
3167 }; // class pass_phi_only_cprop
3168 
3169 unsigned int
3170 pass_phi_only_cprop::execute (function *fun)
3171 {
3172   bitmap interesting_names;
3173   bitmap interesting_names1;
3174 
3175   /* Bitmap of blocks which need EH information updated.  We can not
3176      update it on-the-fly as doing so invalidates the dominator tree.  */
3177   need_eh_cleanup = BITMAP_ALLOC (NULL);
3178 
3179   /* INTERESTING_NAMES is effectively our worklist, indexed by
3180      SSA_NAME_VERSION.
3181 
3182      A set bit indicates that the statement or PHI node which
3183      defines the SSA_NAME should be (re)examined to determine if
3184      it has become a degenerate PHI or trivial const/copy propagation
3185      opportunity.
3186 
3187      Experiments have show we generally get better compilation
3188      time behavior with bitmaps rather than sbitmaps.  */
3189   interesting_names = BITMAP_ALLOC (NULL);
3190   interesting_names1 = BITMAP_ALLOC (NULL);
3191 
3192   calculate_dominance_info (CDI_DOMINATORS);
3193   cfg_altered = false;
3194 
3195   /* First phase.  Eliminate degenerate PHIs via a dominator
3196      walk of the CFG.
3197 
3198      Experiments have indicated that we generally get better
3199      compile-time behavior by visiting blocks in the first
3200      phase in dominator order.  Presumably this is because walking
3201      in dominator order leaves fewer PHIs for later examination
3202      by the worklist phase.  */
3203   eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (fun),
3204 			       interesting_names);
3205 
3206   /* Second phase.  Eliminate second order degenerate PHIs as well
3207      as trivial copies or constant initializations identified by
3208      the first phase or this phase.  Basically we keep iterating
3209      until our set of INTERESTING_NAMEs is empty.   */
3210   while (!bitmap_empty_p (interesting_names))
3211     {
3212       unsigned int i;
3213       bitmap_iterator bi;
3214 
3215       /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3216 	 changed during the loop.  Copy it to another bitmap and
3217 	 use that.  */
3218       bitmap_copy (interesting_names1, interesting_names);
3219 
3220       EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3221 	{
3222 	  tree name = ssa_name (i);
3223 
3224 	  /* Ignore SSA_NAMEs that have been released because
3225 	     their defining statement was deleted (unreachable).  */
3226 	  if (name)
3227 	    eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3228 				     interesting_names);
3229 	}
3230     }
3231 
3232   if (cfg_altered)
3233     {
3234       free_dominance_info (CDI_DOMINATORS);
3235       /* If we changed the CFG schedule loops for fixup by cfgcleanup.  */
3236       loops_state_set (LOOPS_NEED_FIXUP);
3237     }
3238 
3239   /* Propagation of const and copies may make some EH edges dead.  Purge
3240      such edges from the CFG as needed.  */
3241   if (!bitmap_empty_p (need_eh_cleanup))
3242     {
3243       gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3244       BITMAP_FREE (need_eh_cleanup);
3245     }
3246 
3247   BITMAP_FREE (interesting_names);
3248   BITMAP_FREE (interesting_names1);
3249   return 0;
3250 }
3251 
3252 } // anon namespace
3253 
3254 gimple_opt_pass *
3255 make_pass_phi_only_cprop (gcc::context *ctxt)
3256 {
3257   return new pass_phi_only_cprop (ctxt);
3258 }
3259