xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-ssa-dom.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /* SSA Dominator optimizations for trees
2    Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3    Free Software Foundation, Inc.
4    Contributed by Diego Novillo <dnovillo@redhat.com>
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12 
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 GNU General Public License for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "flags.h"
28 #include "rtl.h"
29 #include "tm_p.h"
30 #include "ggc.h"
31 #include "basic-block.h"
32 #include "cfgloop.h"
33 #include "output.h"
34 #include "expr.h"
35 #include "function.h"
36 #include "diagnostic.h"
37 #include "timevar.h"
38 #include "tree-dump.h"
39 #include "tree-flow.h"
40 #include "domwalk.h"
41 #include "real.h"
42 #include "tree-pass.h"
43 #include "tree-ssa-propagate.h"
44 #include "langhooks.h"
45 #include "params.h"
46 
47 /* This file implements optimizations on the dominator tree.  */
48 
49 /* Representation of a "naked" right-hand-side expression, to be used
50    in recording available expressions in the expression hash table.  */
51 
52 enum expr_kind
53 {
54   EXPR_SINGLE,
55   EXPR_UNARY,
56   EXPR_BINARY,
57   EXPR_CALL
58 };
59 
60 struct hashable_expr
61 {
62   tree type;
63   enum expr_kind kind;
64   union {
65     struct { tree rhs; } single;
66     struct { enum tree_code op;  tree opnd; } unary;
67     struct { enum tree_code op;  tree opnd0; tree opnd1; } binary;
68     struct { tree fn; bool pure; size_t nargs; tree *args; } call;
69   } ops;
70 };
71 
72 /* Structure for recording known values of a conditional expression
73    at the exits from its block.  */
74 
75 struct cond_equivalence
76 {
77   struct hashable_expr cond;
78   tree value;
79 };
80 
81 /* Structure for recording edge equivalences as well as any pending
82    edge redirections during the dominator optimizer.
83 
84    Computing and storing the edge equivalences instead of creating
85    them on-demand can save significant amounts of time, particularly
86    for pathological cases involving switch statements.
87 
88    These structures live for a single iteration of the dominator
89    optimizer in the edge's AUX field.  At the end of an iteration we
90    free each of these structures and update the AUX field to point
91    to any requested redirection target (the code for updating the
92    CFG and SSA graph for edge redirection expects redirection edge
93    targets to be in the AUX field for each edge.  */
94 
95 struct edge_info
96 {
97   /* If this edge creates a simple equivalence, the LHS and RHS of
98      the equivalence will be stored here.  */
99   tree lhs;
100   tree rhs;
101 
102   /* Traversing an edge may also indicate one or more particular conditions
103      are true or false.  The number of recorded conditions can vary, but
104      can be determined by the condition's code.  So we have an array
105      and its maximum index rather than use a varray.  */
106   struct cond_equivalence *cond_equivalences;
107   unsigned int max_cond_equivalences;
108 };
109 
110 /* Hash table with expressions made available during the renaming process.
111    When an assignment of the form X_i = EXPR is found, the statement is
112    stored in this table.  If the same expression EXPR is later found on the
113    RHS of another statement, it is replaced with X_i (thus performing
114    global redundancy elimination).  Similarly as we pass through conditionals
115    we record the conditional itself as having either a true or false value
116    in this table.  */
117 static htab_t avail_exprs;
118 
119 /* Stack of available expressions in AVAIL_EXPRs.  Each block pushes any
120    expressions it enters into the hash table along with a marker entry
121    (null).  When we finish processing the block, we pop off entries and
122    remove the expressions from the global hash table until we hit the
123    marker.  */
124 typedef struct expr_hash_elt * expr_hash_elt_t;
125 DEF_VEC_P(expr_hash_elt_t);
126 DEF_VEC_ALLOC_P(expr_hash_elt_t,heap);
127 
128 static VEC(expr_hash_elt_t,heap) *avail_exprs_stack;
129 
130 /* Structure for entries in the expression hash table.  */
131 
132 struct expr_hash_elt
133 {
134   /* The value (lhs) of this expression.  */
135   tree lhs;
136 
137   /* The expression (rhs) we want to record.  */
138   struct hashable_expr expr;
139 
140   /* The stmt pointer if this element corresponds to a statement.  */
141   gimple stmt;
142 
143   /* The hash value for RHS.  */
144   hashval_t hash;
145 
146   /* A unique stamp, typically the address of the hash
147      element itself, used in removing entries from the table.  */
148   struct expr_hash_elt *stamp;
149 };
150 
151 /* Stack of dest,src pairs that need to be restored during finalization.
152 
153    A NULL entry is used to mark the end of pairs which need to be
154    restored during finalization of this block.  */
155 static VEC(tree,heap) *const_and_copies_stack;
156 
157 /* Track whether or not we have changed the control flow graph.  */
158 static bool cfg_altered;
159 
160 /* Bitmap of blocks that have had EH statements cleaned.  We should
161    remove their dead edges eventually.  */
162 static bitmap need_eh_cleanup;
163 
164 /* Statistics for dominator optimizations.  */
165 struct opt_stats_d
166 {
167   long num_stmts;
168   long num_exprs_considered;
169   long num_re;
170   long num_const_prop;
171   long num_copy_prop;
172 };
173 
174 static struct opt_stats_d opt_stats;
175 
176 /* Local functions.  */
177 static void optimize_stmt (basic_block, gimple_stmt_iterator);
178 static tree lookup_avail_expr (gimple, bool);
179 static hashval_t avail_expr_hash (const void *);
180 static hashval_t real_avail_expr_hash (const void *);
181 static int avail_expr_eq (const void *, const void *);
182 static void htab_statistics (FILE *, htab_t);
183 static void record_cond (struct cond_equivalence *);
184 static void record_const_or_copy (tree, tree);
185 static void record_equality (tree, tree);
186 static void record_equivalences_from_phis (basic_block);
187 static void record_equivalences_from_incoming_edge (basic_block);
188 static void eliminate_redundant_computations (gimple_stmt_iterator *);
189 static void record_equivalences_from_stmt (gimple, int);
190 static void dom_thread_across_edge (struct dom_walk_data *, edge);
191 static void dom_opt_leave_block (struct dom_walk_data *, basic_block);
192 static void dom_opt_enter_block (struct dom_walk_data *, basic_block);
193 static void remove_local_expressions_from_table (void);
194 static void restore_vars_to_original_value (void);
195 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
196 
197 
198 /* Given a statement STMT, initialize the hash table element pointed to
199    by ELEMENT.  */
200 
201 static void
202 initialize_hash_element (gimple stmt, tree lhs,
203                          struct expr_hash_elt *element)
204 {
205   enum gimple_code code = gimple_code (stmt);
206   struct hashable_expr *expr = &element->expr;
207 
208   if (code == GIMPLE_ASSIGN)
209     {
210       enum tree_code subcode = gimple_assign_rhs_code (stmt);
211 
212       switch (get_gimple_rhs_class (subcode))
213         {
214         case GIMPLE_SINGLE_RHS:
215           expr->kind = EXPR_SINGLE;
216 	  expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
217           expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
218           break;
219         case GIMPLE_UNARY_RHS:
220           expr->kind = EXPR_UNARY;
221 	  expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
222           expr->ops.unary.op = subcode;
223           expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
224           break;
225         case GIMPLE_BINARY_RHS:
226           expr->kind = EXPR_BINARY;
227 	  expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
228           expr->ops.binary.op = subcode;
229           expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
230           expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
231           break;
232         default:
233           gcc_unreachable ();
234         }
235     }
236   else if (code == GIMPLE_COND)
237     {
238       expr->type = boolean_type_node;
239       expr->kind = EXPR_BINARY;
240       expr->ops.binary.op = gimple_cond_code (stmt);
241       expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
242       expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
243     }
244   else if (code == GIMPLE_CALL)
245     {
246       size_t nargs = gimple_call_num_args (stmt);
247       size_t i;
248 
249       gcc_assert (gimple_call_lhs (stmt));
250 
251       expr->type = TREE_TYPE (gimple_call_lhs (stmt));
252       expr->kind = EXPR_CALL;
253       expr->ops.call.fn = gimple_call_fn (stmt);
254 
255       if (gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE))
256         expr->ops.call.pure = true;
257       else
258         expr->ops.call.pure = false;
259 
260       expr->ops.call.nargs = nargs;
261       expr->ops.call.args = (tree *) xcalloc (nargs, sizeof (tree));
262       for (i = 0; i < nargs; i++)
263         expr->ops.call.args[i] = gimple_call_arg (stmt, i);
264     }
265   else if (code == GIMPLE_SWITCH)
266     {
267       expr->type = TREE_TYPE (gimple_switch_index (stmt));
268       expr->kind = EXPR_SINGLE;
269       expr->ops.single.rhs = gimple_switch_index (stmt);
270     }
271   else if (code == GIMPLE_GOTO)
272     {
273       expr->type = TREE_TYPE (gimple_goto_dest (stmt));
274       expr->kind = EXPR_SINGLE;
275       expr->ops.single.rhs = gimple_goto_dest (stmt);
276     }
277   else
278     gcc_unreachable ();
279 
280   element->lhs = lhs;
281   element->stmt = stmt;
282   element->hash = avail_expr_hash (element);
283   element->stamp = element;
284 }
285 
286 /* Given a conditional expression COND as a tree, initialize
287    a hashable_expr expression EXPR.  The conditional must be a
288    comparison or logical negation.  A constant or a variable is
289    not permitted.  */
290 
291 static void
292 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
293 {
294   expr->type = boolean_type_node;
295 
296   if (COMPARISON_CLASS_P (cond))
297     {
298       expr->kind = EXPR_BINARY;
299       expr->ops.binary.op = TREE_CODE (cond);
300       expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
301       expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
302     }
303   else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
304     {
305       expr->kind = EXPR_UNARY;
306       expr->ops.unary.op = TRUTH_NOT_EXPR;
307       expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
308     }
309   else
310     gcc_unreachable ();
311 }
312 
313 /* Given a hashable_expr expression EXPR and an LHS,
314    initialize the hash table element pointed to by ELEMENT.  */
315 
316 static void
317 initialize_hash_element_from_expr (struct hashable_expr *expr,
318                                    tree lhs,
319                                    struct expr_hash_elt *element)
320 {
321   element->expr = *expr;
322   element->lhs = lhs;
323   element->stmt = NULL;
324   element->hash = avail_expr_hash (element);
325   element->stamp = element;
326 }
327 
328 /* Compare two hashable_expr structures for equivalence.
329    They are considered equivalent when the the expressions
330    they denote must necessarily be equal.  The logic is intended
331    to follow that of operand_equal_p in fold-const.c  */
332 
333 static bool
334 hashable_expr_equal_p (const struct hashable_expr *expr0,
335                         const struct hashable_expr *expr1)
336 {
337   tree type0 = expr0->type;
338   tree type1 = expr1->type;
339 
340   /* If either type is NULL, there is nothing to check.  */
341   if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
342     return false;
343 
344   /* If both types don't have the same signedness, precision, and mode,
345      then we can't consider  them equal.  */
346   if (type0 != type1
347       && (TREE_CODE (type0) == ERROR_MARK
348 	  || TREE_CODE (type1) == ERROR_MARK
349 	  || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
350 	  || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
351 	  || TYPE_MODE (type0) != TYPE_MODE (type1)))
352     return false;
353 
354   if (expr0->kind != expr1->kind)
355     return false;
356 
357   switch (expr0->kind)
358     {
359     case EXPR_SINGLE:
360       return operand_equal_p (expr0->ops.single.rhs,
361                               expr1->ops.single.rhs, 0);
362 
363     case EXPR_UNARY:
364       if (expr0->ops.unary.op != expr1->ops.unary.op)
365         return false;
366 
367       if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
368            || expr0->ops.unary.op == NON_LVALUE_EXPR)
369           && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
370         return false;
371 
372       return operand_equal_p (expr0->ops.unary.opnd,
373                               expr1->ops.unary.opnd, 0);
374 
375     case EXPR_BINARY:
376       {
377         if (expr0->ops.binary.op != expr1->ops.binary.op)
378           return false;
379 
380         if (operand_equal_p (expr0->ops.binary.opnd0,
381                              expr1->ops.binary.opnd0, 0)
382             && operand_equal_p (expr0->ops.binary.opnd1,
383                                 expr1->ops.binary.opnd1, 0))
384           return true;
385 
386         /* For commutative ops, allow the other order.  */
387         return (commutative_tree_code (expr0->ops.binary.op)
388                 && operand_equal_p (expr0->ops.binary.opnd0,
389                                     expr1->ops.binary.opnd1, 0)
390                 && operand_equal_p (expr0->ops.binary.opnd1,
391                                     expr1->ops.binary.opnd0, 0));
392       }
393 
394     case EXPR_CALL:
395       {
396         size_t i;
397 
398         /* If the calls are to different functions, then they
399            clearly cannot be equal.  */
400         if (! operand_equal_p (expr0->ops.call.fn,
401                                expr1->ops.call.fn, 0))
402           return false;
403 
404         if (! expr0->ops.call.pure)
405           return false;
406 
407         if (expr0->ops.call.nargs !=  expr1->ops.call.nargs)
408           return false;
409 
410         for (i = 0; i < expr0->ops.call.nargs; i++)
411           if (! operand_equal_p (expr0->ops.call.args[i],
412                                  expr1->ops.call.args[i], 0))
413             return false;
414 
415         return true;
416       }
417 
418     default:
419       gcc_unreachable ();
420     }
421 }
422 
423 /* Compute a hash value for a hashable_expr value EXPR and a
424    previously accumulated hash value VAL.  If two hashable_expr
425    values compare equal with hashable_expr_equal_p, they must
426    hash to the same value, given an identical value of VAL.
427    The logic is intended to follow iterative_hash_expr in tree.c.  */
428 
429 static hashval_t
430 iterative_hash_hashable_expr (const struct hashable_expr *expr, hashval_t val)
431 {
432   switch (expr->kind)
433     {
434     case EXPR_SINGLE:
435       val = iterative_hash_expr (expr->ops.single.rhs, val);
436       break;
437 
438     case EXPR_UNARY:
439       val = iterative_hash_object (expr->ops.unary.op, val);
440 
441       /* Make sure to include signedness in the hash computation.
442          Don't hash the type, that can lead to having nodes which
443          compare equal according to operand_equal_p, but which
444          have different hash codes.  */
445       if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
446           || expr->ops.unary.op == NON_LVALUE_EXPR)
447         val += TYPE_UNSIGNED (expr->type);
448 
449       val = iterative_hash_expr (expr->ops.unary.opnd, val);
450       break;
451 
452     case EXPR_BINARY:
453       val = iterative_hash_object (expr->ops.binary.op, val);
454       if (commutative_tree_code (expr->ops.binary.op))
455           val = iterative_hash_exprs_commutative (expr->ops.binary.opnd0,
456                                                   expr->ops.binary.opnd1, val);
457       else
458         {
459           val = iterative_hash_expr (expr->ops.binary.opnd0, val);
460           val = iterative_hash_expr (expr->ops.binary.opnd1, val);
461         }
462       break;
463 
464     case EXPR_CALL:
465       {
466         size_t i;
467         enum tree_code code = CALL_EXPR;
468 
469         val = iterative_hash_object (code, val);
470         val = iterative_hash_expr (expr->ops.call.fn, val);
471         for (i = 0; i < expr->ops.call.nargs; i++)
472           val = iterative_hash_expr (expr->ops.call.args[i], val);
473       }
474       break;
475 
476     default:
477       gcc_unreachable ();
478     }
479 
480   return val;
481 }
482 
483 /* Print a diagnostic dump of an expression hash table entry.  */
484 
485 static void
486 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
487 {
488   if (element->stmt)
489     fprintf (stream, "STMT ");
490   else
491     fprintf (stream, "COND ");
492 
493   if (element->lhs)
494     {
495       print_generic_expr (stream, element->lhs, 0);
496       fprintf (stream, " = ");
497     }
498 
499   switch (element->expr.kind)
500     {
501       case EXPR_SINGLE:
502         print_generic_expr (stream, element->expr.ops.single.rhs, 0);
503         break;
504 
505       case EXPR_UNARY:
506         fprintf (stream, "%s ", tree_code_name[element->expr.ops.unary.op]);
507         print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
508         break;
509 
510       case EXPR_BINARY:
511         print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
512         fprintf (stream, " %s ", tree_code_name[element->expr.ops.binary.op]);
513         print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
514         break;
515 
516       case EXPR_CALL:
517         {
518           size_t i;
519           size_t nargs = element->expr.ops.call.nargs;
520 
521           print_generic_expr (stream, element->expr.ops.call.fn, 0);
522           fprintf (stream, " (");
523           for (i = 0; i < nargs; i++)
524             {
525               print_generic_expr (stream, element->expr.ops.call.args[i], 0);
526               if (i + 1 < nargs)
527                 fprintf (stream, ", ");
528             }
529           fprintf (stream, ")");
530         }
531         break;
532     }
533   fprintf (stream, "\n");
534 
535   if (element->stmt)
536     {
537       fprintf (stream, "          ");
538       print_gimple_stmt (stream, element->stmt, 0, 0);
539     }
540 }
541 
542 /* Delete an expr_hash_elt and reclaim its storage.  */
543 
544 static void
545 free_expr_hash_elt (void *elt)
546 {
547   struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
548 
549   if (element->expr.kind == EXPR_CALL)
550     free (element->expr.ops.call.args);
551 
552   free (element);
553 }
554 
555 /* Allocate an EDGE_INFO for edge E and attach it to E.
556    Return the new EDGE_INFO structure.  */
557 
558 static struct edge_info *
559 allocate_edge_info (edge e)
560 {
561   struct edge_info *edge_info;
562 
563   edge_info = XCNEW (struct edge_info);
564 
565   e->aux = edge_info;
566   return edge_info;
567 }
568 
569 /* Free all EDGE_INFO structures associated with edges in the CFG.
570    If a particular edge can be threaded, copy the redirection
571    target from the EDGE_INFO structure into the edge's AUX field
572    as required by code to update the CFG and SSA graph for
573    jump threading.  */
574 
575 static void
576 free_all_edge_infos (void)
577 {
578   basic_block bb;
579   edge_iterator ei;
580   edge e;
581 
582   FOR_EACH_BB (bb)
583     {
584       FOR_EACH_EDGE (e, ei, bb->preds)
585         {
586 	 struct edge_info *edge_info = (struct edge_info *) e->aux;
587 
588 	  if (edge_info)
589 	    {
590 	      if (edge_info->cond_equivalences)
591 		free (edge_info->cond_equivalences);
592 	      free (edge_info);
593 	      e->aux = NULL;
594 	    }
595 	}
596     }
597 }
598 
599 /* Jump threading, redundancy elimination and const/copy propagation.
600 
601    This pass may expose new symbols that need to be renamed into SSA.  For
602    every new symbol exposed, its corresponding bit will be set in
603    VARS_TO_RENAME.  */
604 
605 static unsigned int
606 tree_ssa_dominator_optimize (void)
607 {
608   struct dom_walk_data walk_data;
609 
610   memset (&opt_stats, 0, sizeof (opt_stats));
611 
612   /* Create our hash tables.  */
613   avail_exprs = htab_create (1024, real_avail_expr_hash, avail_expr_eq, free_expr_hash_elt);
614   avail_exprs_stack = VEC_alloc (expr_hash_elt_t, heap, 20);
615   const_and_copies_stack = VEC_alloc (tree, heap, 20);
616   need_eh_cleanup = BITMAP_ALLOC (NULL);
617 
618   /* Setup callbacks for the generic dominator tree walker.  */
619   walk_data.dom_direction = CDI_DOMINATORS;
620   walk_data.initialize_block_local_data = NULL;
621   walk_data.before_dom_children = dom_opt_enter_block;
622   walk_data.after_dom_children = dom_opt_leave_block;
623   /* Right now we only attach a dummy COND_EXPR to the global data pointer.
624      When we attach more stuff we'll need to fill this out with a real
625      structure.  */
626   walk_data.global_data = NULL;
627   walk_data.block_local_data_size = 0;
628 
629   /* Now initialize the dominator walker.  */
630   init_walk_dominator_tree (&walk_data);
631 
632   calculate_dominance_info (CDI_DOMINATORS);
633   cfg_altered = false;
634 
635   /* We need to know loop structures in order to avoid destroying them
636      in jump threading.  Note that we still can e.g. thread through loop
637      headers to an exit edge, or through loop header to the loop body, assuming
638      that we update the loop info.  */
639   loop_optimizer_init (LOOPS_HAVE_SIMPLE_LATCHES);
640 
641   /* Initialize the value-handle array.  */
642   threadedge_initialize_values ();
643 
644   /* We need accurate information regarding back edges in the CFG
645      for jump threading; this may include back edges that are not part of
646      a single loop.  */
647   mark_dfs_back_edges ();
648 
649   /* Recursively walk the dominator tree optimizing statements.  */
650   walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
651 
652   {
653     gimple_stmt_iterator gsi;
654     basic_block bb;
655     FOR_EACH_BB (bb)
656       {for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
657 	  update_stmt_if_modified (gsi_stmt (gsi));
658       }
659   }
660 
661   /* If we exposed any new variables, go ahead and put them into
662      SSA form now, before we handle jump threading.  This simplifies
663      interactions between rewriting of _DECL nodes into SSA form
664      and rewriting SSA_NAME nodes into SSA form after block
665      duplication and CFG manipulation.  */
666   update_ssa (TODO_update_ssa);
667 
668   free_all_edge_infos ();
669 
670   /* Thread jumps, creating duplicate blocks as needed.  */
671   cfg_altered |= thread_through_all_blocks (first_pass_instance);
672 
673   if (cfg_altered)
674     free_dominance_info (CDI_DOMINATORS);
675 
676   /* Removal of statements may make some EH edges dead.  Purge
677      such edges from the CFG as needed.  */
678   if (!bitmap_empty_p (need_eh_cleanup))
679     {
680       unsigned i;
681       bitmap_iterator bi;
682 
683       /* Jump threading may have created forwarder blocks from blocks
684 	 needing EH cleanup; the new successor of these blocks, which
685 	 has inherited from the original block, needs the cleanup.  */
686       EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
687 	{
688 	  basic_block bb = BASIC_BLOCK (i);
689 	  if (single_succ_p (bb) == 1
690 	      && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
691 	    {
692 	      bitmap_clear_bit (need_eh_cleanup, i);
693 	      bitmap_set_bit (need_eh_cleanup, single_succ (bb)->index);
694 	    }
695 	}
696 
697       gimple_purge_all_dead_eh_edges (need_eh_cleanup);
698       bitmap_zero (need_eh_cleanup);
699     }
700 
701   statistics_counter_event (cfun, "Redundant expressions eliminated",
702 			    opt_stats.num_re);
703   statistics_counter_event (cfun, "Constants propagated",
704 			    opt_stats.num_const_prop);
705   statistics_counter_event (cfun, "Copies propagated",
706 			    opt_stats.num_copy_prop);
707 
708   /* Debugging dumps.  */
709   if (dump_file && (dump_flags & TDF_STATS))
710     dump_dominator_optimization_stats (dump_file);
711 
712   loop_optimizer_finalize ();
713 
714   /* Delete our main hashtable.  */
715   htab_delete (avail_exprs);
716 
717   /* And finalize the dominator walker.  */
718   fini_walk_dominator_tree (&walk_data);
719 
720   /* Free asserted bitmaps and stacks.  */
721   BITMAP_FREE (need_eh_cleanup);
722 
723   VEC_free (expr_hash_elt_t, heap, avail_exprs_stack);
724   VEC_free (tree, heap, const_and_copies_stack);
725 
726   /* Free the value-handle array.  */
727   threadedge_finalize_values ();
728   ssa_name_values = NULL;
729 
730   return 0;
731 }
732 
733 static bool
734 gate_dominator (void)
735 {
736   return flag_tree_dom != 0;
737 }
738 
739 struct gimple_opt_pass pass_dominator =
740 {
741  {
742   GIMPLE_PASS,
743   "dom",				/* name */
744   gate_dominator,			/* gate */
745   tree_ssa_dominator_optimize,		/* execute */
746   NULL,					/* sub */
747   NULL,					/* next */
748   0,					/* static_pass_number */
749   TV_TREE_SSA_DOMINATOR_OPTS,		/* tv_id */
750   PROP_cfg | PROP_ssa,			/* properties_required */
751   0,					/* properties_provided */
752   0,					/* properties_destroyed */
753   0,					/* todo_flags_start */
754   TODO_dump_func
755     | TODO_update_ssa
756     | TODO_cleanup_cfg
757     | TODO_verify_ssa			/* todo_flags_finish */
758  }
759 };
760 
761 
762 /* Given a conditional statement CONDSTMT, convert the
763    condition to a canonical form.  */
764 
765 static void
766 canonicalize_comparison (gimple condstmt)
767 {
768   tree op0;
769   tree op1;
770   enum tree_code code;
771 
772   gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
773 
774   op0 = gimple_cond_lhs (condstmt);
775   op1 = gimple_cond_rhs (condstmt);
776 
777   code = gimple_cond_code (condstmt);
778 
779   /* If it would be profitable to swap the operands, then do so to
780      canonicalize the statement, enabling better optimization.
781 
782      By placing canonicalization of such expressions here we
783      transparently keep statements in canonical form, even
784      when the statement is modified.  */
785   if (tree_swap_operands_p (op0, op1, false))
786     {
787       /* For relationals we need to swap the operands
788 	 and change the code.  */
789       if (code == LT_EXPR
790 	  || code == GT_EXPR
791 	  || code == LE_EXPR
792 	  || code == GE_EXPR)
793 	{
794           code = swap_tree_comparison (code);
795 
796           gimple_cond_set_code (condstmt, code);
797           gimple_cond_set_lhs (condstmt, op1);
798           gimple_cond_set_rhs (condstmt, op0);
799 
800           update_stmt (condstmt);
801 	}
802     }
803 }
804 
805 /* Initialize local stacks for this optimizer and record equivalences
806    upon entry to BB.  Equivalences can come from the edge traversed to
807    reach BB or they may come from PHI nodes at the start of BB.  */
808 
809 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
810    LIMIT entries left in LOCALs.  */
811 
812 static void
813 remove_local_expressions_from_table (void)
814 {
815   /* Remove all the expressions made available in this block.  */
816   while (VEC_length (expr_hash_elt_t, avail_exprs_stack) > 0)
817     {
818       expr_hash_elt_t victim = VEC_pop (expr_hash_elt_t, avail_exprs_stack);
819       void **slot;
820 
821       if (victim == NULL)
822 	break;
823 
824       /* This must precede the actual removal from the hash table,
825          as ELEMENT and the table entry may share a call argument
826          vector which will be freed during removal.  */
827       if (dump_file && (dump_flags & TDF_DETAILS))
828         {
829           fprintf (dump_file, "<<<< ");
830           print_expr_hash_elt (dump_file, victim);
831         }
832 
833       slot = htab_find_slot_with_hash (avail_exprs,
834 				       victim, victim->hash, NO_INSERT);
835       gcc_assert (slot && *slot == (void *) victim);
836       htab_clear_slot (avail_exprs, slot);
837     }
838 }
839 
840 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
841    CONST_AND_COPIES to its original state, stopping when we hit a
842    NULL marker.  */
843 
844 static void
845 restore_vars_to_original_value (void)
846 {
847   while (VEC_length (tree, const_and_copies_stack) > 0)
848     {
849       tree prev_value, dest;
850 
851       dest = VEC_pop (tree, const_and_copies_stack);
852 
853       if (dest == NULL)
854 	break;
855 
856       if (dump_file && (dump_flags & TDF_DETAILS))
857 	{
858 	  fprintf (dump_file, "<<<< COPY ");
859 	  print_generic_expr (dump_file, dest, 0);
860 	  fprintf (dump_file, " = ");
861 	  print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
862 	  fprintf (dump_file, "\n");
863 	}
864 
865       prev_value = VEC_pop (tree, const_and_copies_stack);
866       set_ssa_name_value (dest, prev_value);
867     }
868 }
869 
870 /* A trivial wrapper so that we can present the generic jump
871    threading code with a simple API for simplifying statements.  */
872 static tree
873 simplify_stmt_for_jump_threading (gimple stmt,
874 				  gimple within_stmt ATTRIBUTE_UNUSED)
875 {
876   return lookup_avail_expr (stmt, false);
877 }
878 
879 /* Wrapper for common code to attempt to thread an edge.  For example,
880    it handles lazily building the dummy condition and the bookkeeping
881    when jump threading is successful.  */
882 
883 static void
884 dom_thread_across_edge (struct dom_walk_data *walk_data, edge e)
885 {
886   if (! walk_data->global_data)
887   {
888     gimple dummy_cond =
889         gimple_build_cond (NE_EXPR,
890                            integer_zero_node, integer_zero_node,
891                            NULL, NULL);
892     walk_data->global_data = dummy_cond;
893   }
894 
895   thread_across_edge ((gimple) walk_data->global_data, e, false,
896 		      &const_and_copies_stack,
897 		      simplify_stmt_for_jump_threading);
898 }
899 
900 /* PHI nodes can create equivalences too.
901 
902    Ignoring any alternatives which are the same as the result, if
903    all the alternatives are equal, then the PHI node creates an
904    equivalence.  */
905 
906 static void
907 record_equivalences_from_phis (basic_block bb)
908 {
909   gimple_stmt_iterator gsi;
910 
911   for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
912     {
913       gimple phi = gsi_stmt (gsi);
914 
915       tree lhs = gimple_phi_result (phi);
916       tree rhs = NULL;
917       size_t i;
918 
919       for (i = 0; i < gimple_phi_num_args (phi); i++)
920 	{
921 	  tree t = gimple_phi_arg_def (phi, i);
922 
923 	  /* Ignore alternatives which are the same as our LHS.  Since
924 	     LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
925 	     can simply compare pointers.  */
926 	  if (lhs == t)
927 	    continue;
928 
929 	  /* If we have not processed an alternative yet, then set
930 	     RHS to this alternative.  */
931 	  if (rhs == NULL)
932 	    rhs = t;
933 	  /* If we have processed an alternative (stored in RHS), then
934 	     see if it is equal to this one.  If it isn't, then stop
935 	     the search.  */
936 	  else if (! operand_equal_for_phi_arg_p (rhs, t))
937 	    break;
938 	}
939 
940       /* If we had no interesting alternatives, then all the RHS alternatives
941 	 must have been the same as LHS.  */
942       if (!rhs)
943 	rhs = lhs;
944 
945       /* If we managed to iterate through each PHI alternative without
946 	 breaking out of the loop, then we have a PHI which may create
947 	 a useful equivalence.  We do not need to record unwind data for
948 	 this, since this is a true assignment and not an equivalence
949 	 inferred from a comparison.  All uses of this ssa name are dominated
950 	 by this assignment, so unwinding just costs time and space.  */
951       if (i == gimple_phi_num_args (phi) && may_propagate_copy (lhs, rhs))
952 	set_ssa_name_value (lhs, rhs);
953     }
954 }
955 
956 /* Ignoring loop backedges, if BB has precisely one incoming edge then
957    return that edge.  Otherwise return NULL.  */
958 static edge
959 single_incoming_edge_ignoring_loop_edges (basic_block bb)
960 {
961   edge retval = NULL;
962   edge e;
963   edge_iterator ei;
964 
965   FOR_EACH_EDGE (e, ei, bb->preds)
966     {
967       /* A loop back edge can be identified by the destination of
968 	 the edge dominating the source of the edge.  */
969       if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
970 	continue;
971 
972       /* If we have already seen a non-loop edge, then we must have
973 	 multiple incoming non-loop edges and thus we return NULL.  */
974       if (retval)
975 	return NULL;
976 
977       /* This is the first non-loop incoming edge we have found.  Record
978 	 it.  */
979       retval = e;
980     }
981 
982   return retval;
983 }
984 
985 /* Record any equivalences created by the incoming edge to BB.  If BB
986    has more than one incoming edge, then no equivalence is created.  */
987 
988 static void
989 record_equivalences_from_incoming_edge (basic_block bb)
990 {
991   edge e;
992   basic_block parent;
993   struct edge_info *edge_info;
994 
995   /* If our parent block ended with a control statement, then we may be
996      able to record some equivalences based on which outgoing edge from
997      the parent was followed.  */
998   parent = get_immediate_dominator (CDI_DOMINATORS, bb);
999 
1000   e = single_incoming_edge_ignoring_loop_edges (bb);
1001 
1002   /* If we had a single incoming edge from our parent block, then enter
1003      any data associated with the edge into our tables.  */
1004   if (e && e->src == parent)
1005     {
1006       unsigned int i;
1007 
1008       edge_info = (struct edge_info *) e->aux;
1009 
1010       if (edge_info)
1011 	{
1012 	  tree lhs = edge_info->lhs;
1013 	  tree rhs = edge_info->rhs;
1014 	  struct cond_equivalence *cond_equivalences = edge_info->cond_equivalences;
1015 
1016 	  if (lhs)
1017 	    record_equality (lhs, rhs);
1018 
1019 	  if (cond_equivalences)
1020             for (i = 0; i < edge_info->max_cond_equivalences; i++)
1021               record_cond (&cond_equivalences[i]);
1022 	}
1023     }
1024 }
1025 
1026 /* Dump SSA statistics on FILE.  */
1027 
1028 void
1029 dump_dominator_optimization_stats (FILE *file)
1030 {
1031   fprintf (file, "Total number of statements:                   %6ld\n\n",
1032 	   opt_stats.num_stmts);
1033   fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1034            opt_stats.num_exprs_considered);
1035 
1036   fprintf (file, "\nHash table statistics:\n");
1037 
1038   fprintf (file, "    avail_exprs: ");
1039   htab_statistics (file, avail_exprs);
1040 }
1041 
1042 
1043 /* Dump SSA statistics on stderr.  */
1044 
1045 void
1046 debug_dominator_optimization_stats (void)
1047 {
1048   dump_dominator_optimization_stats (stderr);
1049 }
1050 
1051 
1052 /* Dump statistics for the hash table HTAB.  */
1053 
1054 static void
1055 htab_statistics (FILE *file, htab_t htab)
1056 {
1057   fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1058 	   (long) htab_size (htab),
1059 	   (long) htab_elements (htab),
1060 	   htab_collisions (htab));
1061 }
1062 
1063 
1064 /* Enter condition equivalence into the expression hash table.
1065    This indicates that a conditional expression has a known
1066    boolean value.  */
1067 
1068 static void
1069 record_cond (struct cond_equivalence *p)
1070 {
1071   struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1072   void **slot;
1073 
1074   initialize_hash_element_from_expr (&p->cond, p->value, element);
1075 
1076   slot = htab_find_slot_with_hash (avail_exprs, (void *)element,
1077 				   element->hash, INSERT);
1078   if (*slot == NULL)
1079     {
1080       *slot = (void *) element;
1081 
1082       if (dump_file && (dump_flags & TDF_DETAILS))
1083         {
1084           fprintf (dump_file, "1>>> ");
1085           print_expr_hash_elt (dump_file, element);
1086         }
1087 
1088       VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, element);
1089     }
1090   else
1091     free (element);
1092 }
1093 
1094 /* Build a cond_equivalence record indicating that the comparison
1095    CODE holds between operands OP0 and OP1.  */
1096 
1097 static void
1098 build_and_record_new_cond (enum tree_code code,
1099                            tree op0, tree op1,
1100                            struct cond_equivalence *p)
1101 {
1102   struct hashable_expr *cond = &p->cond;
1103 
1104   gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1105 
1106   cond->type = boolean_type_node;
1107   cond->kind = EXPR_BINARY;
1108   cond->ops.binary.op = code;
1109   cond->ops.binary.opnd0 = op0;
1110   cond->ops.binary.opnd1 = op1;
1111 
1112   p->value = boolean_true_node;
1113 }
1114 
1115 /* Record that COND is true and INVERTED is false into the edge information
1116    structure.  Also record that any conditions dominated by COND are true
1117    as well.
1118 
1119    For example, if a < b is true, then a <= b must also be true.  */
1120 
1121 static void
1122 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1123 {
1124   tree op0, op1;
1125 
1126   if (!COMPARISON_CLASS_P (cond))
1127     return;
1128 
1129   op0 = TREE_OPERAND (cond, 0);
1130   op1 = TREE_OPERAND (cond, 1);
1131 
1132   switch (TREE_CODE (cond))
1133     {
1134     case LT_EXPR:
1135     case GT_EXPR:
1136       if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1137 	{
1138 	  edge_info->max_cond_equivalences = 6;
1139 	  edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 6);
1140 	  build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1141 				     &edge_info->cond_equivalences[4]);
1142 	  build_and_record_new_cond (LTGT_EXPR, op0, op1,
1143 				     &edge_info->cond_equivalences[5]);
1144 	}
1145       else
1146         {
1147           edge_info->max_cond_equivalences = 4;
1148 	  edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4);
1149 	}
1150 
1151       build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1152 				  ? LE_EXPR : GE_EXPR),
1153 				 op0, op1, &edge_info->cond_equivalences[2]);
1154       build_and_record_new_cond (NE_EXPR, op0, op1,
1155 				 &edge_info->cond_equivalences[3]);
1156       break;
1157 
1158     case GE_EXPR:
1159     case LE_EXPR:
1160       if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1161 	{
1162 	  edge_info->max_cond_equivalences = 3;
1163 	  edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 3);
1164 	  build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1165 				     &edge_info->cond_equivalences[2]);
1166 	}
1167       else
1168 	{
1169 	  edge_info->max_cond_equivalences = 2;
1170 	  edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 2);
1171 	}
1172       break;
1173 
1174     case EQ_EXPR:
1175       if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1176 	{
1177 	  edge_info->max_cond_equivalences = 5;
1178 	  edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 5);
1179 	  build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1180 				     &edge_info->cond_equivalences[4]);
1181 	}
1182       else
1183 	{
1184 	  edge_info->max_cond_equivalences = 4;
1185 	  edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4);
1186 	}
1187       build_and_record_new_cond (LE_EXPR, op0, op1,
1188 				 &edge_info->cond_equivalences[2]);
1189       build_and_record_new_cond (GE_EXPR, op0, op1,
1190 				 &edge_info->cond_equivalences[3]);
1191       break;
1192 
1193     case UNORDERED_EXPR:
1194       edge_info->max_cond_equivalences = 8;
1195       edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 8);
1196       build_and_record_new_cond (NE_EXPR, op0, op1,
1197 				 &edge_info->cond_equivalences[2]);
1198       build_and_record_new_cond (UNLE_EXPR, op0, op1,
1199 				 &edge_info->cond_equivalences[3]);
1200       build_and_record_new_cond (UNGE_EXPR, op0, op1,
1201 				 &edge_info->cond_equivalences[4]);
1202       build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1203 				 &edge_info->cond_equivalences[5]);
1204       build_and_record_new_cond (UNLT_EXPR, op0, op1,
1205 				 &edge_info->cond_equivalences[6]);
1206       build_and_record_new_cond (UNGT_EXPR, op0, op1,
1207 				 &edge_info->cond_equivalences[7]);
1208       break;
1209 
1210     case UNLT_EXPR:
1211     case UNGT_EXPR:
1212       edge_info->max_cond_equivalences = 4;
1213       edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4);
1214       build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1215 				  ? UNLE_EXPR : UNGE_EXPR),
1216 				 op0, op1, &edge_info->cond_equivalences[2]);
1217       build_and_record_new_cond (NE_EXPR, op0, op1,
1218 				 &edge_info->cond_equivalences[3]);
1219       break;
1220 
1221     case UNEQ_EXPR:
1222       edge_info->max_cond_equivalences = 4;
1223       edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4);
1224       build_and_record_new_cond (UNLE_EXPR, op0, op1,
1225 				 &edge_info->cond_equivalences[2]);
1226       build_and_record_new_cond (UNGE_EXPR, op0, op1,
1227 				 &edge_info->cond_equivalences[3]);
1228       break;
1229 
1230     case LTGT_EXPR:
1231       edge_info->max_cond_equivalences = 4;
1232       edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 4);
1233       build_and_record_new_cond (NE_EXPR, op0, op1,
1234 				 &edge_info->cond_equivalences[2]);
1235       build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1236 				 &edge_info->cond_equivalences[3]);
1237       break;
1238 
1239     default:
1240       edge_info->max_cond_equivalences = 2;
1241       edge_info->cond_equivalences = XNEWVEC (struct cond_equivalence, 2);
1242       break;
1243     }
1244 
1245   /* Now store the original true and false conditions into the first
1246      two slots.  */
1247   initialize_expr_from_cond (cond, &edge_info->cond_equivalences[0].cond);
1248   edge_info->cond_equivalences[0].value = boolean_true_node;
1249 
1250   /* It is possible for INVERTED to be the negation of a comparison,
1251      and not a valid RHS or GIMPLE_COND condition.  This happens because
1252      invert_truthvalue may return such an expression when asked to invert
1253      a floating-point comparison.  These comparisons are not assumed to
1254      obey the trichotomy law.  */
1255   initialize_expr_from_cond (inverted, &edge_info->cond_equivalences[1].cond);
1256   edge_info->cond_equivalences[1].value = boolean_false_node;
1257 }
1258 
1259 /* A helper function for record_const_or_copy and record_equality.
1260    Do the work of recording the value and undo info.  */
1261 
1262 static void
1263 record_const_or_copy_1 (tree x, tree y, tree prev_x)
1264 {
1265   set_ssa_name_value (x, y);
1266 
1267   if (dump_file && (dump_flags & TDF_DETAILS))
1268     {
1269       fprintf (dump_file, "0>>> COPY ");
1270       print_generic_expr (dump_file, x, 0);
1271       fprintf (dump_file, " = ");
1272       print_generic_expr (dump_file, y, 0);
1273       fprintf (dump_file, "\n");
1274     }
1275 
1276   VEC_reserve (tree, heap, const_and_copies_stack, 2);
1277   VEC_quick_push (tree, const_and_copies_stack, prev_x);
1278   VEC_quick_push (tree, const_and_copies_stack, x);
1279 }
1280 
1281 /* Return the loop depth of the basic block of the defining statement of X.
1282    This number should not be treated as absolutely correct because the loop
1283    information may not be completely up-to-date when dom runs.  However, it
1284    will be relatively correct, and as more passes are taught to keep loop info
1285    up to date, the result will become more and more accurate.  */
1286 
1287 int
1288 loop_depth_of_name (tree x)
1289 {
1290   gimple defstmt;
1291   basic_block defbb;
1292 
1293   /* If it's not an SSA_NAME, we have no clue where the definition is.  */
1294   if (TREE_CODE (x) != SSA_NAME)
1295     return 0;
1296 
1297   /* Otherwise return the loop depth of the defining statement's bb.
1298      Note that there may not actually be a bb for this statement, if the
1299      ssa_name is live on entry.  */
1300   defstmt = SSA_NAME_DEF_STMT (x);
1301   defbb = gimple_bb (defstmt);
1302   if (!defbb)
1303     return 0;
1304 
1305   return defbb->loop_depth;
1306 }
1307 
1308 /* Record that X is equal to Y in const_and_copies.  Record undo
1309    information in the block-local vector.  */
1310 
1311 static void
1312 record_const_or_copy (tree x, tree y)
1313 {
1314   tree prev_x = SSA_NAME_VALUE (x);
1315 
1316   gcc_assert (TREE_CODE (x) == SSA_NAME);
1317 
1318   if (TREE_CODE (y) == SSA_NAME)
1319     {
1320       tree tmp = SSA_NAME_VALUE (y);
1321       if (tmp)
1322 	y = tmp;
1323     }
1324 
1325   record_const_or_copy_1 (x, y, prev_x);
1326 }
1327 
1328 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1329    This constrains the cases in which we may treat this as assignment.  */
1330 
1331 static void
1332 record_equality (tree x, tree y)
1333 {
1334   tree prev_x = NULL, prev_y = NULL;
1335 
1336   if (TREE_CODE (x) == SSA_NAME)
1337     prev_x = SSA_NAME_VALUE (x);
1338   if (TREE_CODE (y) == SSA_NAME)
1339     prev_y = SSA_NAME_VALUE (y);
1340 
1341   /* If one of the previous values is invariant, or invariant in more loops
1342      (by depth), then use that.
1343      Otherwise it doesn't matter which value we choose, just so
1344      long as we canonicalize on one value.  */
1345   if (is_gimple_min_invariant (y))
1346     ;
1347   else if (is_gimple_min_invariant (x)
1348 	   || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
1349     prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1350   else if (prev_x && is_gimple_min_invariant (prev_x))
1351     x = y, y = prev_x, prev_x = prev_y;
1352   else if (prev_y)
1353     y = prev_y;
1354 
1355   /* After the swapping, we must have one SSA_NAME.  */
1356   if (TREE_CODE (x) != SSA_NAME)
1357     return;
1358 
1359   /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1360      variable compared against zero.  If we're honoring signed zeros,
1361      then we cannot record this value unless we know that the value is
1362      nonzero.  */
1363   if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x)))
1364       && (TREE_CODE (y) != REAL_CST
1365 	  || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1366     return;
1367 
1368   record_const_or_copy_1 (x, y, prev_x);
1369 }
1370 
1371 /* Returns true when STMT is a simple iv increment.  It detects the
1372    following situation:
1373 
1374    i_1 = phi (..., i_2)
1375    i_2 = i_1 +/- ...  */
1376 
1377 static bool
1378 simple_iv_increment_p (gimple stmt)
1379 {
1380   tree lhs, preinc;
1381   gimple phi;
1382   size_t i;
1383 
1384   if (gimple_code (stmt) != GIMPLE_ASSIGN)
1385     return false;
1386 
1387   lhs = gimple_assign_lhs (stmt);
1388   if (TREE_CODE (lhs) != SSA_NAME)
1389     return false;
1390 
1391   if (gimple_assign_rhs_code (stmt) != PLUS_EXPR
1392       && gimple_assign_rhs_code (stmt) != MINUS_EXPR)
1393     return false;
1394 
1395   preinc = gimple_assign_rhs1 (stmt);
1396 
1397   if (TREE_CODE (preinc) != SSA_NAME)
1398     return false;
1399 
1400   phi = SSA_NAME_DEF_STMT (preinc);
1401   if (gimple_code (phi) != GIMPLE_PHI)
1402     return false;
1403 
1404   for (i = 0; i < gimple_phi_num_args (phi); i++)
1405     if (gimple_phi_arg_def (phi, i) == lhs)
1406       return true;
1407 
1408   return false;
1409 }
1410 
1411 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1412    known value for that SSA_NAME (or NULL if no value is known).
1413 
1414    Propagate values from CONST_AND_COPIES into the PHI nodes of the
1415    successors of BB.  */
1416 
1417 static void
1418 cprop_into_successor_phis (basic_block bb)
1419 {
1420   edge e;
1421   edge_iterator ei;
1422 
1423   FOR_EACH_EDGE (e, ei, bb->succs)
1424     {
1425       int indx;
1426       gimple_stmt_iterator gsi;
1427 
1428       /* If this is an abnormal edge, then we do not want to copy propagate
1429 	 into the PHI alternative associated with this edge.  */
1430       if (e->flags & EDGE_ABNORMAL)
1431 	continue;
1432 
1433       gsi = gsi_start_phis (e->dest);
1434       if (gsi_end_p (gsi))
1435 	continue;
1436 
1437       indx = e->dest_idx;
1438       for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1439 	{
1440 	  tree new_val;
1441 	  use_operand_p orig_p;
1442 	  tree orig_val;
1443           gimple phi = gsi_stmt (gsi);
1444 
1445 	  /* The alternative may be associated with a constant, so verify
1446 	     it is an SSA_NAME before doing anything with it.  */
1447 	  orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1448 	  orig_val = get_use_from_ptr (orig_p);
1449 	  if (TREE_CODE (orig_val) != SSA_NAME)
1450 	    continue;
1451 
1452 	  /* If we have *ORIG_P in our constant/copy table, then replace
1453 	     ORIG_P with its value in our constant/copy table.  */
1454 	  new_val = SSA_NAME_VALUE (orig_val);
1455 	  if (new_val
1456 	      && new_val != orig_val
1457 	      && (TREE_CODE (new_val) == SSA_NAME
1458 		  || is_gimple_min_invariant (new_val))
1459 	      && may_propagate_copy (orig_val, new_val))
1460 	    propagate_value (orig_p, new_val);
1461 	}
1462     }
1463 }
1464 
1465 /* We have finished optimizing BB, record any information implied by
1466    taking a specific outgoing edge from BB.  */
1467 
1468 static void
1469 record_edge_info (basic_block bb)
1470 {
1471   gimple_stmt_iterator gsi = gsi_last_bb (bb);
1472   struct edge_info *edge_info;
1473 
1474   if (! gsi_end_p (gsi))
1475     {
1476       gimple stmt = gsi_stmt (gsi);
1477       location_t loc = gimple_location (stmt);
1478 
1479       if (gimple_code (stmt) == GIMPLE_SWITCH)
1480 	{
1481 	  tree index = gimple_switch_index (stmt);
1482 
1483 	  if (TREE_CODE (index) == SSA_NAME)
1484 	    {
1485 	      int i;
1486               int n_labels = gimple_switch_num_labels (stmt);
1487 	      tree *info = XCNEWVEC (tree, last_basic_block);
1488 	      edge e;
1489 	      edge_iterator ei;
1490 
1491 	      for (i = 0; i < n_labels; i++)
1492 		{
1493 		  tree label = gimple_switch_label (stmt, i);
1494 		  basic_block target_bb = label_to_block (CASE_LABEL (label));
1495 		  if (CASE_HIGH (label)
1496 		      || !CASE_LOW (label)
1497 		      || info[target_bb->index])
1498 		    info[target_bb->index] = error_mark_node;
1499 		  else
1500 		    info[target_bb->index] = label;
1501 		}
1502 
1503 	      FOR_EACH_EDGE (e, ei, bb->succs)
1504 		{
1505 		  basic_block target_bb = e->dest;
1506 		  tree label = info[target_bb->index];
1507 
1508 		  if (label != NULL && label != error_mark_node)
1509 		    {
1510 		      tree x = fold_convert_loc (loc, TREE_TYPE (index),
1511 						 CASE_LOW (label));
1512 		      edge_info = allocate_edge_info (e);
1513 		      edge_info->lhs = index;
1514 		      edge_info->rhs = x;
1515 		    }
1516 		}
1517 	      free (info);
1518 	    }
1519 	}
1520 
1521       /* A COND_EXPR may create equivalences too.  */
1522       if (gimple_code (stmt) == GIMPLE_COND)
1523 	{
1524 	  edge true_edge;
1525 	  edge false_edge;
1526 
1527           tree op0 = gimple_cond_lhs (stmt);
1528           tree op1 = gimple_cond_rhs (stmt);
1529           enum tree_code code = gimple_cond_code (stmt);
1530 
1531 	  extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1532 
1533           /* Special case comparing booleans against a constant as we
1534              know the value of OP0 on both arms of the branch.  i.e., we
1535              can record an equivalence for OP0 rather than COND.  */
1536           if ((code == EQ_EXPR || code == NE_EXPR)
1537               && TREE_CODE (op0) == SSA_NAME
1538               && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1539               && is_gimple_min_invariant (op1))
1540             {
1541               if (code == EQ_EXPR)
1542                 {
1543                   edge_info = allocate_edge_info (true_edge);
1544                   edge_info->lhs = op0;
1545                   edge_info->rhs = (integer_zerop (op1)
1546                                     ? boolean_false_node
1547                                     : boolean_true_node);
1548 
1549                   edge_info = allocate_edge_info (false_edge);
1550                   edge_info->lhs = op0;
1551                   edge_info->rhs = (integer_zerop (op1)
1552                                     ? boolean_true_node
1553                                     : boolean_false_node);
1554                 }
1555               else
1556                 {
1557                   edge_info = allocate_edge_info (true_edge);
1558                   edge_info->lhs = op0;
1559                   edge_info->rhs = (integer_zerop (op1)
1560                                     ? boolean_true_node
1561                                     : boolean_false_node);
1562 
1563                   edge_info = allocate_edge_info (false_edge);
1564                   edge_info->lhs = op0;
1565                   edge_info->rhs = (integer_zerop (op1)
1566                                     ? boolean_false_node
1567                                     : boolean_true_node);
1568                 }
1569             }
1570           else if (is_gimple_min_invariant (op0)
1571                    && (TREE_CODE (op1) == SSA_NAME
1572                        || is_gimple_min_invariant (op1)))
1573             {
1574               tree cond = build2 (code, boolean_type_node, op0, op1);
1575               tree inverted = invert_truthvalue_loc (loc, cond);
1576               struct edge_info *edge_info;
1577 
1578               edge_info = allocate_edge_info (true_edge);
1579               record_conditions (edge_info, cond, inverted);
1580 
1581               if (code == EQ_EXPR)
1582                 {
1583                   edge_info->lhs = op1;
1584                   edge_info->rhs = op0;
1585                 }
1586 
1587               edge_info = allocate_edge_info (false_edge);
1588               record_conditions (edge_info, inverted, cond);
1589 
1590               if (TREE_CODE (inverted) == EQ_EXPR)
1591                 {
1592                   edge_info->lhs = op1;
1593                   edge_info->rhs = op0;
1594                 }
1595             }
1596 
1597           else if (TREE_CODE (op0) == SSA_NAME
1598                    && (is_gimple_min_invariant (op1)
1599                        || TREE_CODE (op1) == SSA_NAME))
1600             {
1601               tree cond = build2 (code, boolean_type_node, op0, op1);
1602               tree inverted = invert_truthvalue_loc (loc, cond);
1603               struct edge_info *edge_info;
1604 
1605               edge_info = allocate_edge_info (true_edge);
1606               record_conditions (edge_info, cond, inverted);
1607 
1608               if (code == EQ_EXPR)
1609                 {
1610                   edge_info->lhs = op0;
1611                   edge_info->rhs = op1;
1612                 }
1613 
1614               edge_info = allocate_edge_info (false_edge);
1615               record_conditions (edge_info, inverted, cond);
1616 
1617               if (TREE_CODE (inverted) == EQ_EXPR)
1618                 {
1619                   edge_info->lhs = op0;
1620                   edge_info->rhs = op1;
1621                 }
1622             }
1623         }
1624 
1625       /* ??? TRUTH_NOT_EXPR can create an equivalence too.  */
1626     }
1627 }
1628 
1629 static void
1630 dom_opt_enter_block (struct dom_walk_data *walk_data ATTRIBUTE_UNUSED,
1631 		     basic_block bb)
1632 {
1633   gimple_stmt_iterator gsi;
1634 
1635   if (dump_file && (dump_flags & TDF_DETAILS))
1636     fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1637 
1638   /* Push a marker on the stacks of local information so that we know how
1639      far to unwind when we finalize this block.  */
1640   VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, NULL);
1641   VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
1642 
1643   record_equivalences_from_incoming_edge (bb);
1644 
1645   /* PHI nodes can create equivalences too.  */
1646   record_equivalences_from_phis (bb);
1647 
1648   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1649     optimize_stmt (bb, gsi);
1650 
1651   /* Now prepare to process dominated blocks.  */
1652   record_edge_info (bb);
1653   cprop_into_successor_phis (bb);
1654 }
1655 
1656 /* We have finished processing the dominator children of BB, perform
1657    any finalization actions in preparation for leaving this node in
1658    the dominator tree.  */
1659 
1660 static void
1661 dom_opt_leave_block (struct dom_walk_data *walk_data, basic_block bb)
1662 {
1663   gimple last;
1664 
1665   /* If we have an outgoing edge to a block with multiple incoming and
1666      outgoing edges, then we may be able to thread the edge, i.e., we
1667      may be able to statically determine which of the outgoing edges
1668      will be traversed when the incoming edge from BB is traversed.  */
1669   if (single_succ_p (bb)
1670       && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
1671       && potentially_threadable_block (single_succ (bb)))
1672     {
1673       dom_thread_across_edge (walk_data, single_succ_edge (bb));
1674     }
1675   else if ((last = last_stmt (bb))
1676 	   && gimple_code (last) == GIMPLE_COND
1677 	   && EDGE_COUNT (bb->succs) == 2
1678 	   && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
1679 	   && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
1680     {
1681       edge true_edge, false_edge;
1682 
1683       extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1684 
1685       /* Only try to thread the edge if it reaches a target block with
1686 	 more than one predecessor and more than one successor.  */
1687       if (potentially_threadable_block (true_edge->dest))
1688 	{
1689 	  struct edge_info *edge_info;
1690 	  unsigned int i;
1691 
1692 	  /* Push a marker onto the available expression stack so that we
1693 	     unwind any expressions related to the TRUE arm before processing
1694 	     the false arm below.  */
1695           VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, NULL);
1696 	  VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
1697 
1698 	  edge_info = (struct edge_info *) true_edge->aux;
1699 
1700 	  /* If we have info associated with this edge, record it into
1701 	     our equivalence tables.  */
1702 	  if (edge_info)
1703 	    {
1704 	      struct cond_equivalence *cond_equivalences = edge_info->cond_equivalences;
1705 	      tree lhs = edge_info->lhs;
1706 	      tree rhs = edge_info->rhs;
1707 
1708 	      /* If we have a simple NAME = VALUE equivalence, record it.  */
1709 	      if (lhs && TREE_CODE (lhs) == SSA_NAME)
1710 		record_const_or_copy (lhs, rhs);
1711 
1712 	      /* If we have 0 = COND or 1 = COND equivalences, record them
1713 		 into our expression hash tables.  */
1714 	      if (cond_equivalences)
1715 		for (i = 0; i < edge_info->max_cond_equivalences; i++)
1716                   record_cond (&cond_equivalences[i]);
1717 	    }
1718 
1719 	  dom_thread_across_edge (walk_data, true_edge);
1720 
1721 	  /* And restore the various tables to their state before
1722 	     we threaded this edge.  */
1723 	  remove_local_expressions_from_table ();
1724 	}
1725 
1726       /* Similarly for the ELSE arm.  */
1727       if (potentially_threadable_block (false_edge->dest))
1728 	{
1729 	  struct edge_info *edge_info;
1730 	  unsigned int i;
1731 
1732 	  VEC_safe_push (tree, heap, const_and_copies_stack, NULL_TREE);
1733 	  edge_info = (struct edge_info *) false_edge->aux;
1734 
1735 	  /* If we have info associated with this edge, record it into
1736 	     our equivalence tables.  */
1737 	  if (edge_info)
1738 	    {
1739 	      struct cond_equivalence *cond_equivalences = edge_info->cond_equivalences;
1740 	      tree lhs = edge_info->lhs;
1741 	      tree rhs = edge_info->rhs;
1742 
1743 	      /* If we have a simple NAME = VALUE equivalence, record it.  */
1744 	      if (lhs && TREE_CODE (lhs) == SSA_NAME)
1745 		record_const_or_copy (lhs, rhs);
1746 
1747 	      /* If we have 0 = COND or 1 = COND equivalences, record them
1748 		 into our expression hash tables.  */
1749 	      if (cond_equivalences)
1750 		for (i = 0; i < edge_info->max_cond_equivalences; i++)
1751                   record_cond (&cond_equivalences[i]);
1752 	    }
1753 
1754 	  /* Now thread the edge.  */
1755 	  dom_thread_across_edge (walk_data, false_edge);
1756 
1757 	  /* No need to remove local expressions from our tables
1758 	     or restore vars to their original value as that will
1759 	     be done immediately below.  */
1760 	}
1761     }
1762 
1763   remove_local_expressions_from_table ();
1764   restore_vars_to_original_value ();
1765 }
1766 
1767 /* Search for redundant computations in STMT.  If any are found, then
1768    replace them with the variable holding the result of the computation.
1769 
1770    If safe, record this expression into the available expression hash
1771    table.  */
1772 
1773 static void
1774 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
1775 {
1776   tree expr_type;
1777   tree cached_lhs;
1778   bool insert = true;
1779   bool assigns_var_p = false;
1780 
1781   gimple stmt = gsi_stmt (*gsi);
1782 
1783   tree def = gimple_get_lhs (stmt);
1784 
1785   /* Certain expressions on the RHS can be optimized away, but can not
1786      themselves be entered into the hash tables.  */
1787   if (! def
1788       || TREE_CODE (def) != SSA_NAME
1789       || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
1790       || gimple_vdef (stmt)
1791       /* Do not record equivalences for increments of ivs.  This would create
1792 	 overlapping live ranges for a very questionable gain.  */
1793       || simple_iv_increment_p (stmt))
1794     insert = false;
1795 
1796   /* Check if the expression has been computed before.  */
1797   cached_lhs = lookup_avail_expr (stmt, insert);
1798 
1799   opt_stats.num_exprs_considered++;
1800 
1801   /* Get the type of the expression we are trying to optimize.  */
1802   if (is_gimple_assign (stmt))
1803     {
1804       expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
1805       assigns_var_p = true;
1806     }
1807   else if (gimple_code (stmt) == GIMPLE_COND)
1808     expr_type = boolean_type_node;
1809   else if (is_gimple_call (stmt))
1810     {
1811       gcc_assert (gimple_call_lhs (stmt));
1812       expr_type = TREE_TYPE (gimple_call_lhs (stmt));
1813       assigns_var_p = true;
1814     }
1815   else if (gimple_code (stmt) == GIMPLE_SWITCH)
1816     expr_type = TREE_TYPE (gimple_switch_index (stmt));
1817   else
1818     gcc_unreachable ();
1819 
1820   if (!cached_lhs)
1821     return;
1822 
1823   /* It is safe to ignore types here since we have already done
1824      type checking in the hashing and equality routines.  In fact
1825      type checking here merely gets in the way of constant
1826      propagation.  Also, make sure that it is safe to propagate
1827      CACHED_LHS into the expression in STMT.  */
1828   if ((TREE_CODE (cached_lhs) != SSA_NAME
1829        && (assigns_var_p
1830            || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
1831       || may_propagate_copy_into_stmt (stmt, cached_lhs))
1832   {
1833 #if defined ENABLE_CHECKING
1834       gcc_assert (TREE_CODE (cached_lhs) == SSA_NAME
1835 		  || is_gimple_min_invariant (cached_lhs));
1836 #endif
1837 
1838       if (dump_file && (dump_flags & TDF_DETAILS))
1839 	{
1840 	  fprintf (dump_file, "  Replaced redundant expr '");
1841 	  print_gimple_expr (dump_file, stmt, 0, dump_flags);
1842 	  fprintf (dump_file, "' with '");
1843 	  print_generic_expr (dump_file, cached_lhs, dump_flags);
1844           fprintf (dump_file, "'\n");
1845 	}
1846 
1847       opt_stats.num_re++;
1848 
1849       if (assigns_var_p
1850 	  && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
1851 	cached_lhs = fold_convert (expr_type, cached_lhs);
1852 
1853       propagate_tree_value_into_stmt (gsi, cached_lhs);
1854 
1855       /* Since it is always necessary to mark the result as modified,
1856          perhaps we should move this into propagate_tree_value_into_stmt
1857          itself.  */
1858       gimple_set_modified (gsi_stmt (*gsi), true);
1859   }
1860 }
1861 
1862 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
1863    the available expressions table or the const_and_copies table.
1864    Detect and record those equivalences.  */
1865 /* We handle only very simple copy equivalences here.  The heavy
1866    lifing is done by eliminate_redundant_computations.  */
1867 
1868 static void
1869 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
1870 {
1871   tree lhs;
1872   enum tree_code lhs_code;
1873 
1874   gcc_assert (is_gimple_assign (stmt));
1875 
1876   lhs = gimple_assign_lhs (stmt);
1877   lhs_code = TREE_CODE (lhs);
1878 
1879   if (lhs_code == SSA_NAME
1880       && gimple_assign_single_p (stmt))
1881     {
1882       tree rhs = gimple_assign_rhs1 (stmt);
1883 
1884       /* If the RHS of the assignment is a constant or another variable that
1885 	 may be propagated, register it in the CONST_AND_COPIES table.  We
1886 	 do not need to record unwind data for this, since this is a true
1887 	 assignment and not an equivalence inferred from a comparison.  All
1888 	 uses of this ssa name are dominated by this assignment, so unwinding
1889 	 just costs time and space.  */
1890       if (may_optimize_p
1891 	  && (TREE_CODE (rhs) == SSA_NAME
1892 	      || is_gimple_min_invariant (rhs)))
1893       {
1894 	if (dump_file && (dump_flags & TDF_DETAILS))
1895 	  {
1896 	    fprintf (dump_file, "==== ASGN ");
1897 	    print_generic_expr (dump_file, lhs, 0);
1898 	    fprintf (dump_file, " = ");
1899 	    print_generic_expr (dump_file, rhs, 0);
1900 	    fprintf (dump_file, "\n");
1901 	  }
1902 
1903 	set_ssa_name_value (lhs, rhs);
1904       }
1905     }
1906 
1907   /* A memory store, even an aliased store, creates a useful
1908      equivalence.  By exchanging the LHS and RHS, creating suitable
1909      vops and recording the result in the available expression table,
1910      we may be able to expose more redundant loads.  */
1911   if (!gimple_has_volatile_ops (stmt)
1912       && gimple_references_memory_p (stmt)
1913       && gimple_assign_single_p (stmt)
1914       && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
1915 	  || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
1916       && !is_gimple_reg (lhs))
1917     {
1918       tree rhs = gimple_assign_rhs1 (stmt);
1919       gimple new_stmt;
1920 
1921       /* Build a new statement with the RHS and LHS exchanged.  */
1922       if (TREE_CODE (rhs) == SSA_NAME)
1923         {
1924           /* NOTE tuples.  The call to gimple_build_assign below replaced
1925              a call to build_gimple_modify_stmt, which did not set the
1926              SSA_NAME_DEF_STMT on the LHS of the assignment.  Doing so
1927              may cause an SSA validation failure, as the LHS may be a
1928              default-initialized name and should have no definition.  I'm
1929              a bit dubious of this, as the artificial statement that we
1930              generate here may in fact be ill-formed, but it is simply
1931              used as an internal device in this pass, and never becomes
1932              part of the CFG.  */
1933           gimple defstmt = SSA_NAME_DEF_STMT (rhs);
1934           new_stmt = gimple_build_assign (rhs, lhs);
1935           SSA_NAME_DEF_STMT (rhs) = defstmt;
1936         }
1937       else
1938         new_stmt = gimple_build_assign (rhs, lhs);
1939 
1940       gimple_set_vuse (new_stmt, gimple_vdef (stmt));
1941 
1942       /* Finally enter the statement into the available expression
1943 	 table.  */
1944       lookup_avail_expr (new_stmt, true);
1945     }
1946 }
1947 
1948 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
1949    CONST_AND_COPIES.  */
1950 
1951 static void
1952 cprop_operand (gimple stmt, use_operand_p op_p)
1953 {
1954   tree val;
1955   tree op = USE_FROM_PTR (op_p);
1956 
1957   /* If the operand has a known constant value or it is known to be a
1958      copy of some other variable, use the value or copy stored in
1959      CONST_AND_COPIES.  */
1960   val = SSA_NAME_VALUE (op);
1961   if (val && val != op)
1962     {
1963       /* Do not change the base variable in the virtual operand
1964 	 tables.  That would make it impossible to reconstruct
1965 	 the renamed virtual operand if we later modify this
1966 	 statement.  Also only allow the new value to be an SSA_NAME
1967 	 for propagation into virtual operands.  */
1968       if (!is_gimple_reg (op)
1969 	  && (TREE_CODE (val) != SSA_NAME
1970 	      || is_gimple_reg (val)
1971 	      || get_virtual_var (val) != get_virtual_var (op)))
1972 	return;
1973 
1974       /* Do not replace hard register operands in asm statements.  */
1975       if (gimple_code (stmt) == GIMPLE_ASM
1976 	  && !may_propagate_copy_into_asm (op))
1977 	return;
1978 
1979       /* Certain operands are not allowed to be copy propagated due
1980 	 to their interaction with exception handling and some GCC
1981 	 extensions.  */
1982       if (!may_propagate_copy (op, val))
1983 	return;
1984 
1985       /* Do not propagate addresses that point to volatiles into memory
1986 	 stmts without volatile operands.  */
1987       if (POINTER_TYPE_P (TREE_TYPE (val))
1988 	  && TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (val)))
1989 	  && gimple_has_mem_ops (stmt)
1990 	  && !gimple_has_volatile_ops (stmt))
1991 	return;
1992 
1993       /* Do not propagate copies if the propagated value is at a deeper loop
1994 	 depth than the propagatee.  Otherwise, this may move loop variant
1995 	 variables outside of their loops and prevent coalescing
1996 	 opportunities.  If the value was loop invariant, it will be hoisted
1997 	 by LICM and exposed for copy propagation.  */
1998       if (loop_depth_of_name (val) > loop_depth_of_name (op))
1999 	return;
2000 
2001       /* Do not propagate copies into simple IV increment statements.
2002          See PR23821 for how this can disturb IV analysis.  */
2003       if (TREE_CODE (val) != INTEGER_CST
2004 	  && simple_iv_increment_p (stmt))
2005 	return;
2006 
2007       /* Dump details.  */
2008       if (dump_file && (dump_flags & TDF_DETAILS))
2009 	{
2010 	  fprintf (dump_file, "  Replaced '");
2011 	  print_generic_expr (dump_file, op, dump_flags);
2012 	  fprintf (dump_file, "' with %s '",
2013 		   (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2014 	  print_generic_expr (dump_file, val, dump_flags);
2015 	  fprintf (dump_file, "'\n");
2016 	}
2017 
2018       if (TREE_CODE (val) != SSA_NAME)
2019 	opt_stats.num_const_prop++;
2020       else
2021 	opt_stats.num_copy_prop++;
2022 
2023       propagate_value (op_p, val);
2024 
2025       /* And note that we modified this statement.  This is now
2026 	 safe, even if we changed virtual operands since we will
2027 	 rescan the statement and rewrite its operands again.  */
2028       gimple_set_modified (stmt, true);
2029     }
2030 }
2031 
2032 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2033    known value for that SSA_NAME (or NULL if no value is known).
2034 
2035    Propagate values from CONST_AND_COPIES into the uses, vuses and
2036    vdef_ops of STMT.  */
2037 
2038 static void
2039 cprop_into_stmt (gimple stmt)
2040 {
2041   use_operand_p op_p;
2042   ssa_op_iter iter;
2043 
2044   FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_ALL_USES)
2045     {
2046       if (TREE_CODE (USE_FROM_PTR (op_p)) == SSA_NAME)
2047 	cprop_operand (stmt, op_p);
2048     }
2049 }
2050 
2051 /* Optimize the statement pointed to by iterator SI.
2052 
2053    We try to perform some simplistic global redundancy elimination and
2054    constant propagation:
2055 
2056    1- To detect global redundancy, we keep track of expressions that have
2057       been computed in this block and its dominators.  If we find that the
2058       same expression is computed more than once, we eliminate repeated
2059       computations by using the target of the first one.
2060 
2061    2- Constant values and copy assignments.  This is used to do very
2062       simplistic constant and copy propagation.  When a constant or copy
2063       assignment is found, we map the value on the RHS of the assignment to
2064       the variable in the LHS in the CONST_AND_COPIES table.  */
2065 
2066 static void
2067 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2068 {
2069   gimple stmt, old_stmt;
2070   bool may_optimize_p;
2071   bool modified_p = false;
2072 
2073   old_stmt = stmt = gsi_stmt (si);
2074 
2075   if (gimple_code (stmt) == GIMPLE_COND)
2076     canonicalize_comparison (stmt);
2077 
2078   update_stmt_if_modified (stmt);
2079   opt_stats.num_stmts++;
2080 
2081   if (dump_file && (dump_flags & TDF_DETAILS))
2082     {
2083       fprintf (dump_file, "Optimizing statement ");
2084       print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2085     }
2086 
2087   /* Const/copy propagate into USES, VUSES and the RHS of VDEFs.  */
2088   cprop_into_stmt (stmt);
2089 
2090   /* If the statement has been modified with constant replacements,
2091      fold its RHS before checking for redundant computations.  */
2092   if (gimple_modified_p (stmt))
2093     {
2094       tree rhs = NULL;
2095 
2096       /* Try to fold the statement making sure that STMT is kept
2097 	 up to date.  */
2098       if (fold_stmt (&si))
2099 	{
2100 	  stmt = gsi_stmt (si);
2101 	  gimple_set_modified (stmt, true);
2102 
2103 	  if (dump_file && (dump_flags & TDF_DETAILS))
2104 	    {
2105 	      fprintf (dump_file, "  Folded to: ");
2106 	      print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2107 	    }
2108 	}
2109 
2110       /* We only need to consider cases that can yield a gimple operand.  */
2111       if (gimple_assign_single_p (stmt))
2112         rhs = gimple_assign_rhs1 (stmt);
2113       else if (gimple_code (stmt) == GIMPLE_GOTO)
2114         rhs = gimple_goto_dest (stmt);
2115       else if (gimple_code (stmt) == GIMPLE_SWITCH)
2116         /* This should never be an ADDR_EXPR.  */
2117         rhs = gimple_switch_index (stmt);
2118 
2119       if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2120         recompute_tree_invariant_for_addr_expr (rhs);
2121 
2122       /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2123 	 even if fold_stmt updated the stmt already and thus cleared
2124 	 gimple_modified_p flag on it.  */
2125       modified_p = true;
2126     }
2127 
2128   /* Check for redundant computations.  Do this optimization only
2129      for assignments that have no volatile ops and conditionals.  */
2130   may_optimize_p = (!gimple_has_volatile_ops (stmt)
2131                     && ((is_gimple_assign (stmt)
2132                          && !gimple_rhs_has_side_effects (stmt))
2133                         || (is_gimple_call (stmt)
2134                             && gimple_call_lhs (stmt) != NULL_TREE
2135                             && !gimple_rhs_has_side_effects (stmt))
2136                         || gimple_code (stmt) == GIMPLE_COND
2137                         || gimple_code (stmt) == GIMPLE_SWITCH));
2138 
2139   if (may_optimize_p)
2140     {
2141       if (gimple_code (stmt) == GIMPLE_CALL)
2142 	{
2143 	  /* Resolve __builtin_constant_p.  If it hasn't been
2144 	     folded to integer_one_node by now, it's fairly
2145 	     certain that the value simply isn't constant.  */
2146 	  tree callee = gimple_call_fndecl (stmt);
2147 	  if (callee
2148 	      && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2149 	      && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2150 	    {
2151 	      propagate_tree_value_into_stmt (&si, integer_zero_node);
2152 	      stmt = gsi_stmt (si);
2153 	    }
2154 	}
2155 
2156       update_stmt_if_modified (stmt);
2157       eliminate_redundant_computations (&si);
2158       stmt = gsi_stmt (si);
2159     }
2160 
2161   /* Record any additional equivalences created by this statement.  */
2162   if (is_gimple_assign (stmt))
2163     record_equivalences_from_stmt (stmt, may_optimize_p);
2164 
2165   /* If STMT is a COND_EXPR and it was modified, then we may know
2166      where it goes.  If that is the case, then mark the CFG as altered.
2167 
2168      This will cause us to later call remove_unreachable_blocks and
2169      cleanup_tree_cfg when it is safe to do so.  It is not safe to
2170      clean things up here since removal of edges and such can trigger
2171      the removal of PHI nodes, which in turn can release SSA_NAMEs to
2172      the manager.
2173 
2174      That's all fine and good, except that once SSA_NAMEs are released
2175      to the manager, we must not call create_ssa_name until all references
2176      to released SSA_NAMEs have been eliminated.
2177 
2178      All references to the deleted SSA_NAMEs can not be eliminated until
2179      we remove unreachable blocks.
2180 
2181      We can not remove unreachable blocks until after we have completed
2182      any queued jump threading.
2183 
2184      We can not complete any queued jump threads until we have taken
2185      appropriate variables out of SSA form.  Taking variables out of
2186      SSA form can call create_ssa_name and thus we lose.
2187 
2188      Ultimately I suspect we're going to need to change the interface
2189      into the SSA_NAME manager.  */
2190   if (gimple_modified_p (stmt) || modified_p)
2191     {
2192       tree val = NULL;
2193 
2194       update_stmt_if_modified (stmt);
2195 
2196       if (gimple_code (stmt) == GIMPLE_COND)
2197         val = fold_binary_loc (gimple_location (stmt),
2198 			   gimple_cond_code (stmt), boolean_type_node,
2199                            gimple_cond_lhs (stmt),  gimple_cond_rhs (stmt));
2200       else if (gimple_code (stmt) == GIMPLE_SWITCH)
2201 	val = gimple_switch_index (stmt);
2202 
2203       if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2204 	cfg_altered = true;
2205 
2206       /* If we simplified a statement in such a way as to be shown that it
2207 	 cannot trap, update the eh information and the cfg to match.  */
2208       if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2209 	{
2210 	  bitmap_set_bit (need_eh_cleanup, bb->index);
2211 	  if (dump_file && (dump_flags & TDF_DETAILS))
2212 	    fprintf (dump_file, "  Flagged to clear EH edges.\n");
2213 	}
2214     }
2215 }
2216 
2217 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2218    If found, return its LHS. Otherwise insert STMT in the table and
2219    return NULL_TREE.
2220 
2221    Also, when an expression is first inserted in the  table, it is also
2222    is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2223    we finish processing this block and its children.  */
2224 
2225 static tree
2226 lookup_avail_expr (gimple stmt, bool insert)
2227 {
2228   void **slot;
2229   tree lhs;
2230   tree temp;
2231   struct expr_hash_elt element;
2232 
2233   /* Get LHS of assignment or call, else NULL_TREE.  */
2234   lhs = gimple_get_lhs (stmt);
2235 
2236   initialize_hash_element (stmt, lhs, &element);
2237 
2238   if (dump_file && (dump_flags & TDF_DETAILS))
2239     {
2240       fprintf (dump_file, "LKUP ");
2241       print_expr_hash_elt (dump_file, &element);
2242     }
2243 
2244   /* Don't bother remembering constant assignments and copy operations.
2245      Constants and copy operations are handled by the constant/copy propagator
2246      in optimize_stmt.  */
2247   if (element.expr.kind == EXPR_SINGLE
2248       && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2249           || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2250     return NULL_TREE;
2251 
2252   /* Finally try to find the expression in the main expression hash table.  */
2253   slot = htab_find_slot_with_hash (avail_exprs, &element, element.hash,
2254 				   (insert ? INSERT : NO_INSERT));
2255   if (slot == NULL)
2256     return NULL_TREE;
2257 
2258   if (*slot == NULL)
2259     {
2260       struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2261       *element2 = element;
2262       element2->stamp = element2;
2263       *slot = (void *) element2;
2264 
2265       if (dump_file && (dump_flags & TDF_DETAILS))
2266         {
2267           fprintf (dump_file, "2>>> ");
2268           print_expr_hash_elt (dump_file, element2);
2269         }
2270 
2271       VEC_safe_push (expr_hash_elt_t, heap, avail_exprs_stack, element2);
2272       return NULL_TREE;
2273     }
2274 
2275   /* Extract the LHS of the assignment so that it can be used as the current
2276      definition of another variable.  */
2277   lhs = ((struct expr_hash_elt *)*slot)->lhs;
2278 
2279   /* See if the LHS appears in the CONST_AND_COPIES table.  If it does, then
2280      use the value from the const_and_copies table.  */
2281   if (TREE_CODE (lhs) == SSA_NAME)
2282     {
2283       temp = SSA_NAME_VALUE (lhs);
2284       if (temp)
2285 	lhs = temp;
2286     }
2287 
2288   if (dump_file && (dump_flags & TDF_DETAILS))
2289     {
2290       fprintf (dump_file, "FIND: ");
2291       print_generic_expr (dump_file, lhs, 0);
2292       fprintf (dump_file, "\n");
2293     }
2294 
2295   return lhs;
2296 }
2297 
2298 /* Hashing and equality functions for AVAIL_EXPRS.  We compute a value number
2299    for expressions using the code of the expression and the SSA numbers of
2300    its operands.  */
2301 
2302 static hashval_t
2303 avail_expr_hash (const void *p)
2304 {
2305   gimple stmt = ((const struct expr_hash_elt *)p)->stmt;
2306   const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2307   tree vuse;
2308   hashval_t val = 0;
2309 
2310   val = iterative_hash_hashable_expr (expr, val);
2311 
2312   /* If the hash table entry is not associated with a statement, then we
2313      can just hash the expression and not worry about virtual operands
2314      and such.  */
2315   if (!stmt)
2316     return val;
2317 
2318   /* Add the SSA version numbers of the vuse operand.  This is important
2319      because compound variables like arrays are not renamed in the
2320      operands.  Rather, the rename is done on the virtual variable
2321      representing all the elements of the array.  */
2322   if ((vuse = gimple_vuse (stmt)))
2323     val = iterative_hash_expr (vuse, val);
2324 
2325   return val;
2326 }
2327 
2328 static hashval_t
2329 real_avail_expr_hash (const void *p)
2330 {
2331   return ((const struct expr_hash_elt *)p)->hash;
2332 }
2333 
2334 static int
2335 avail_expr_eq (const void *p1, const void *p2)
2336 {
2337   gimple stmt1 = ((const struct expr_hash_elt *)p1)->stmt;
2338   const struct hashable_expr *expr1 = &((const struct expr_hash_elt *)p1)->expr;
2339   const struct expr_hash_elt *stamp1 = ((const struct expr_hash_elt *)p1)->stamp;
2340   gimple stmt2 = ((const struct expr_hash_elt *)p2)->stmt;
2341   const struct hashable_expr *expr2 = &((const struct expr_hash_elt *)p2)->expr;
2342   const struct expr_hash_elt *stamp2 = ((const struct expr_hash_elt *)p2)->stamp;
2343 
2344   /* This case should apply only when removing entries from the table.  */
2345   if (stamp1 == stamp2)
2346     return true;
2347 
2348   /* FIXME tuples:
2349      We add stmts to a hash table and them modify them. To detect the case
2350      that we modify a stmt and then search for it, we assume that the hash
2351      is always modified by that change.
2352      We have to fully check why this doesn't happen on trunk or rewrite
2353      this in a more  reliable (and easier to understand) way. */
2354   if (((const struct expr_hash_elt *)p1)->hash
2355       != ((const struct expr_hash_elt *)p2)->hash)
2356     return false;
2357 
2358   /* In case of a collision, both RHS have to be identical and have the
2359      same VUSE operands.  */
2360   if (hashable_expr_equal_p (expr1, expr2)
2361       && types_compatible_p (expr1->type, expr2->type))
2362     {
2363       /* Note that STMT1 and/or STMT2 may be NULL.  */
2364       return ((stmt1 ? gimple_vuse (stmt1) : NULL_TREE)
2365 	      == (stmt2 ? gimple_vuse (stmt2) : NULL_TREE));
2366     }
2367 
2368   return false;
2369 }
2370 
2371 /* PHI-ONLY copy and constant propagation.  This pass is meant to clean
2372    up degenerate PHIs created by or exposed by jump threading.  */
2373 
2374 /* Given PHI, return its RHS if the PHI is a degenerate, otherwise return
2375    NULL.  */
2376 
2377 tree
2378 degenerate_phi_result (gimple phi)
2379 {
2380   tree lhs = gimple_phi_result (phi);
2381   tree val = NULL;
2382   size_t i;
2383 
2384   /* Ignoring arguments which are the same as LHS, if all the remaining
2385      arguments are the same, then the PHI is a degenerate and has the
2386      value of that common argument.  */
2387   for (i = 0; i < gimple_phi_num_args (phi); i++)
2388     {
2389       tree arg = gimple_phi_arg_def (phi, i);
2390 
2391       if (arg == lhs)
2392 	continue;
2393       else if (!arg)
2394 	break;
2395       else if (!val)
2396 	val = arg;
2397       else if (arg == val)
2398 	continue;
2399       /* We bring in some of operand_equal_p not only to speed things
2400 	 up, but also to avoid crashing when dereferencing the type of
2401 	 a released SSA name.  */
2402       else if (TREE_CODE (val) != TREE_CODE (arg)
2403 	       || TREE_CODE (val) == SSA_NAME
2404 	       || !operand_equal_p (arg, val, 0))
2405 	break;
2406     }
2407   return (i == gimple_phi_num_args (phi) ? val : NULL);
2408 }
2409 
2410 /* Given a statement STMT, which is either a PHI node or an assignment,
2411    remove it from the IL.  */
2412 
2413 static void
2414 remove_stmt_or_phi (gimple stmt)
2415 {
2416   gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2417 
2418   if (gimple_code (stmt) == GIMPLE_PHI)
2419     remove_phi_node (&gsi, true);
2420   else
2421     {
2422       gsi_remove (&gsi, true);
2423       release_defs (stmt);
2424     }
2425 }
2426 
2427 /* Given a statement STMT, which is either a PHI node or an assignment,
2428    return the "rhs" of the node, in the case of a non-degenerate
2429    phi, NULL is returned.  */
2430 
2431 static tree
2432 get_rhs_or_phi_arg (gimple stmt)
2433 {
2434   if (gimple_code (stmt) == GIMPLE_PHI)
2435     return degenerate_phi_result (stmt);
2436   else if (gimple_assign_single_p (stmt))
2437     return gimple_assign_rhs1 (stmt);
2438   else
2439     gcc_unreachable ();
2440 }
2441 
2442 
2443 /* Given a statement STMT, which is either a PHI node or an assignment,
2444    return the "lhs" of the node.  */
2445 
2446 static tree
2447 get_lhs_or_phi_result (gimple stmt)
2448 {
2449   if (gimple_code (stmt) == GIMPLE_PHI)
2450     return gimple_phi_result (stmt);
2451   else if (is_gimple_assign (stmt))
2452     return gimple_assign_lhs (stmt);
2453   else
2454     gcc_unreachable ();
2455 }
2456 
2457 /* Propagate RHS into all uses of LHS (when possible).
2458 
2459    RHS and LHS are derived from STMT, which is passed in solely so
2460    that we can remove it if propagation is successful.
2461 
2462    When propagating into a PHI node or into a statement which turns
2463    into a trivial copy or constant initialization, set the
2464    appropriate bit in INTERESTING_NAMEs so that we will visit those
2465    nodes as well in an effort to pick up secondary optimization
2466    opportunities.  */
2467 
2468 static void
2469 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2470 {
2471   /* First verify that propagation is valid and isn't going to move a
2472      loop variant variable outside its loop.  */
2473   if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
2474       && (TREE_CODE (rhs) != SSA_NAME
2475 	  || ! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs))
2476       && may_propagate_copy (lhs, rhs)
2477       && loop_depth_of_name (lhs) >= loop_depth_of_name (rhs))
2478     {
2479       use_operand_p use_p;
2480       imm_use_iterator iter;
2481       gimple use_stmt;
2482       bool all = true;
2483 
2484       /* Dump details.  */
2485       if (dump_file && (dump_flags & TDF_DETAILS))
2486 	{
2487 	  fprintf (dump_file, "  Replacing '");
2488 	  print_generic_expr (dump_file, lhs, dump_flags);
2489 	  fprintf (dump_file, "' with %s '",
2490 	           (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2491 		   print_generic_expr (dump_file, rhs, dump_flags);
2492 	  fprintf (dump_file, "'\n");
2493 	}
2494 
2495       /* Walk over every use of LHS and try to replace the use with RHS.
2496 	 At this point the only reason why such a propagation would not
2497 	 be successful would be if the use occurs in an ASM_EXPR.  */
2498       FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2499 	{
2500 	  /* Leave debug stmts alone.  If we succeed in propagating
2501 	     all non-debug uses, we'll drop the DEF, and propagation
2502 	     into debug stmts will occur then.  */
2503 	  if (gimple_debug_bind_p (use_stmt))
2504 	    continue;
2505 
2506 	  /* It's not always safe to propagate into an ASM_EXPR.  */
2507 	  if (gimple_code (use_stmt) == GIMPLE_ASM
2508               && ! may_propagate_copy_into_asm (lhs))
2509 	    {
2510 	      all = false;
2511 	      continue;
2512 	    }
2513 
2514 	  /* It's not ok to propagate into the definition stmt of RHS.
2515 		<bb 9>:
2516 		  # prephitmp.12_36 = PHI <g_67.1_6(9)>
2517 		  g_67.1_6 = prephitmp.12_36;
2518 		  goto <bb 9>;
2519 	     While this is strictly all dead code we do not want to
2520 	     deal with this here.  */
2521 	  if (TREE_CODE (rhs) == SSA_NAME
2522 	      && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2523 	    {
2524 	      all = false;
2525 	      continue;
2526 	    }
2527 
2528 	  /* Dump details.  */
2529 	  if (dump_file && (dump_flags & TDF_DETAILS))
2530 	    {
2531 	      fprintf (dump_file, "    Original statement:");
2532 	      print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2533 	    }
2534 
2535 	  /* Propagate the RHS into this use of the LHS.  */
2536 	  FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2537 	    propagate_value (use_p, rhs);
2538 
2539 	  /* Special cases to avoid useless calls into the folding
2540 	     routines, operand scanning, etc.
2541 
2542 	     First, propagation into a PHI may cause the PHI to become
2543 	     a degenerate, so mark the PHI as interesting.  No other
2544 	     actions are necessary.
2545 
2546 	     Second, if we're propagating a virtual operand and the
2547 	     propagation does not change the underlying _DECL node for
2548 	     the virtual operand, then no further actions are necessary.  */
2549 	  if (gimple_code (use_stmt) == GIMPLE_PHI
2550 	      || (! is_gimple_reg (lhs)
2551 		  && TREE_CODE (rhs) == SSA_NAME
2552 		  && SSA_NAME_VAR (lhs) == SSA_NAME_VAR (rhs)))
2553 	    {
2554 	      /* Dump details.  */
2555 	      if (dump_file && (dump_flags & TDF_DETAILS))
2556 		{
2557 		  fprintf (dump_file, "    Updated statement:");
2558 		  print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2559 		}
2560 
2561 	      /* Propagation into a PHI may expose new degenerate PHIs,
2562 		 so mark the result of the PHI as interesting.  */
2563 	      if (gimple_code (use_stmt) == GIMPLE_PHI)
2564 		{
2565 		  tree result = get_lhs_or_phi_result (use_stmt);
2566 		  bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2567 		}
2568 
2569 	      continue;
2570 	    }
2571 
2572 	  /* From this point onward we are propagating into a
2573 	     real statement.  Folding may (or may not) be possible,
2574 	     we may expose new operands, expose dead EH edges,
2575 	     etc.  */
2576           /* NOTE tuples. In the tuples world, fold_stmt_inplace
2577              cannot fold a call that simplifies to a constant,
2578              because the GIMPLE_CALL must be replaced by a
2579              GIMPLE_ASSIGN, and there is no way to effect such a
2580              transformation in-place.  We might want to consider
2581              using the more general fold_stmt here.  */
2582 	  fold_stmt_inplace (use_stmt);
2583 
2584 	  /* Sometimes propagation can expose new operands to the
2585 	     renamer.  */
2586 	  update_stmt (use_stmt);
2587 
2588 	  /* Dump details.  */
2589 	  if (dump_file && (dump_flags & TDF_DETAILS))
2590 	    {
2591 	      fprintf (dump_file, "    Updated statement:");
2592 	      print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2593 	    }
2594 
2595 	  /* If we replaced a variable index with a constant, then
2596 	     we would need to update the invariant flag for ADDR_EXPRs.  */
2597           if (gimple_assign_single_p (use_stmt)
2598               && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2599 	    recompute_tree_invariant_for_addr_expr
2600                 (gimple_assign_rhs1 (use_stmt));
2601 
2602 	  /* If we cleaned up EH information from the statement,
2603 	     mark its containing block as needing EH cleanups.  */
2604 	  if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2605 	    {
2606 	      bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2607 	      if (dump_file && (dump_flags & TDF_DETAILS))
2608 		fprintf (dump_file, "  Flagged to clear EH edges.\n");
2609 	    }
2610 
2611 	  /* Propagation may expose new trivial copy/constant propagation
2612 	     opportunities.  */
2613           if (gimple_assign_single_p (use_stmt)
2614               && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2615               && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2616                   || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2617             {
2618 	      tree result = get_lhs_or_phi_result (use_stmt);
2619 	      bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2620 	    }
2621 
2622 	  /* Propagation into these nodes may make certain edges in
2623 	     the CFG unexecutable.  We want to identify them as PHI nodes
2624 	     at the destination of those unexecutable edges may become
2625 	     degenerates.  */
2626 	  else if (gimple_code (use_stmt) == GIMPLE_COND
2627 		   || gimple_code (use_stmt) == GIMPLE_SWITCH
2628 		   || gimple_code (use_stmt) == GIMPLE_GOTO)
2629             {
2630 	      tree val;
2631 
2632 	      if (gimple_code (use_stmt) == GIMPLE_COND)
2633                 val = fold_binary_loc (gimple_location (use_stmt),
2634 				   gimple_cond_code (use_stmt),
2635                                    boolean_type_node,
2636                                    gimple_cond_lhs (use_stmt),
2637                                    gimple_cond_rhs (use_stmt));
2638               else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2639 		val = gimple_switch_index (use_stmt);
2640 	      else
2641 		val = gimple_goto_dest  (use_stmt);
2642 
2643 	      if (val && is_gimple_min_invariant (val))
2644 		{
2645 		  basic_block bb = gimple_bb (use_stmt);
2646 		  edge te = find_taken_edge (bb, val);
2647 		  edge_iterator ei;
2648 		  edge e;
2649 		  gimple_stmt_iterator gsi, psi;
2650 
2651 		  /* Remove all outgoing edges except TE.  */
2652 		  for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2653 		    {
2654 		      if (e != te)
2655 			{
2656 			  /* Mark all the PHI nodes at the destination of
2657 			     the unexecutable edge as interesting.  */
2658                           for (psi = gsi_start_phis (e->dest);
2659                                !gsi_end_p (psi);
2660                                gsi_next (&psi))
2661                             {
2662                               gimple phi = gsi_stmt (psi);
2663 
2664 			      tree result = gimple_phi_result (phi);
2665 			      int version = SSA_NAME_VERSION (result);
2666 
2667 			      bitmap_set_bit (interesting_names, version);
2668 			    }
2669 
2670 			  te->probability += e->probability;
2671 
2672 			  te->count += e->count;
2673 			  remove_edge (e);
2674 			  cfg_altered = true;
2675 			}
2676 		      else
2677 			ei_next (&ei);
2678 		    }
2679 
2680 		  gsi = gsi_last_bb (gimple_bb (use_stmt));
2681 		  gsi_remove (&gsi, true);
2682 
2683 		  /* And fixup the flags on the single remaining edge.  */
2684 		  te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2685 		  te->flags &= ~EDGE_ABNORMAL;
2686 		  te->flags |= EDGE_FALLTHRU;
2687 		  if (te->probability > REG_BR_PROB_BASE)
2688 		    te->probability = REG_BR_PROB_BASE;
2689 	        }
2690 	    }
2691 	}
2692 
2693       /* Ensure there is nothing else to do. */
2694       gcc_assert (!all || has_zero_uses (lhs));
2695 
2696       /* If we were able to propagate away all uses of LHS, then
2697 	 we can remove STMT.  */
2698       if (all)
2699 	remove_stmt_or_phi (stmt);
2700     }
2701 }
2702 
2703 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2704    a statement that is a trivial copy or constant initialization.
2705 
2706    Attempt to eliminate T by propagating its RHS into all uses of
2707    its LHS.  This may in turn set new bits in INTERESTING_NAMES
2708    for nodes we want to revisit later.
2709 
2710    All exit paths should clear INTERESTING_NAMES for the result
2711    of STMT.  */
2712 
2713 static void
2714 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2715 {
2716   tree lhs = get_lhs_or_phi_result (stmt);
2717   tree rhs;
2718   int version = SSA_NAME_VERSION (lhs);
2719 
2720   /* If the LHS of this statement or PHI has no uses, then we can
2721      just eliminate it.  This can occur if, for example, the PHI
2722      was created by block duplication due to threading and its only
2723      use was in the conditional at the end of the block which was
2724      deleted.  */
2725   if (has_zero_uses (lhs))
2726     {
2727       bitmap_clear_bit (interesting_names, version);
2728       remove_stmt_or_phi (stmt);
2729       return;
2730     }
2731 
2732   /* Get the RHS of the assignment or PHI node if the PHI is a
2733      degenerate.  */
2734   rhs = get_rhs_or_phi_arg (stmt);
2735   if (!rhs)
2736     {
2737       bitmap_clear_bit (interesting_names, version);
2738       return;
2739     }
2740 
2741   propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
2742 
2743   /* Note that STMT may well have been deleted by now, so do
2744      not access it, instead use the saved version # to clear
2745      T's entry in the worklist.  */
2746   bitmap_clear_bit (interesting_names, version);
2747 }
2748 
2749 /* The first phase in degenerate PHI elimination.
2750 
2751    Eliminate the degenerate PHIs in BB, then recurse on the
2752    dominator children of BB.  */
2753 
2754 static void
2755 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
2756 {
2757   gimple_stmt_iterator gsi;
2758   basic_block son;
2759 
2760   for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2761     {
2762       gimple phi = gsi_stmt (gsi);
2763 
2764       eliminate_const_or_copy (phi, interesting_names);
2765     }
2766 
2767   /* Recurse into the dominator children of BB.  */
2768   for (son = first_dom_son (CDI_DOMINATORS, bb);
2769        son;
2770        son = next_dom_son (CDI_DOMINATORS, son))
2771     eliminate_degenerate_phis_1 (son, interesting_names);
2772 }
2773 
2774 
2775 /* A very simple pass to eliminate degenerate PHI nodes from the
2776    IL.  This is meant to be fast enough to be able to be run several
2777    times in the optimization pipeline.
2778 
2779    Certain optimizations, particularly those which duplicate blocks
2780    or remove edges from the CFG can create or expose PHIs which are
2781    trivial copies or constant initializations.
2782 
2783    While we could pick up these optimizations in DOM or with the
2784    combination of copy-prop and CCP, those solutions are far too
2785    heavy-weight for our needs.
2786 
2787    This implementation has two phases so that we can efficiently
2788    eliminate the first order degenerate PHIs and second order
2789    degenerate PHIs.
2790 
2791    The first phase performs a dominator walk to identify and eliminate
2792    the vast majority of the degenerate PHIs.  When a degenerate PHI
2793    is identified and eliminated any affected statements or PHIs
2794    are put on a worklist.
2795 
2796    The second phase eliminates degenerate PHIs and trivial copies
2797    or constant initializations using the worklist.  This is how we
2798    pick up the secondary optimization opportunities with minimal
2799    cost.  */
2800 
2801 static unsigned int
2802 eliminate_degenerate_phis (void)
2803 {
2804   bitmap interesting_names;
2805   bitmap interesting_names1;
2806 
2807   /* Bitmap of blocks which need EH information updated.  We can not
2808      update it on-the-fly as doing so invalidates the dominator tree.  */
2809   need_eh_cleanup = BITMAP_ALLOC (NULL);
2810 
2811   /* INTERESTING_NAMES is effectively our worklist, indexed by
2812      SSA_NAME_VERSION.
2813 
2814      A set bit indicates that the statement or PHI node which
2815      defines the SSA_NAME should be (re)examined to determine if
2816      it has become a degenerate PHI or trivial const/copy propagation
2817      opportunity.
2818 
2819      Experiments have show we generally get better compilation
2820      time behavior with bitmaps rather than sbitmaps.  */
2821   interesting_names = BITMAP_ALLOC (NULL);
2822   interesting_names1 = BITMAP_ALLOC (NULL);
2823 
2824   calculate_dominance_info (CDI_DOMINATORS);
2825   cfg_altered = false;
2826 
2827   /* First phase.  Eliminate degenerate PHIs via a dominator
2828      walk of the CFG.
2829 
2830      Experiments have indicated that we generally get better
2831      compile-time behavior by visiting blocks in the first
2832      phase in dominator order.  Presumably this is because walking
2833      in dominator order leaves fewer PHIs for later examination
2834      by the worklist phase.  */
2835   eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR, interesting_names);
2836 
2837   /* Second phase.  Eliminate second order degenerate PHIs as well
2838      as trivial copies or constant initializations identified by
2839      the first phase or this phase.  Basically we keep iterating
2840      until our set of INTERESTING_NAMEs is empty.   */
2841   while (!bitmap_empty_p (interesting_names))
2842     {
2843       unsigned int i;
2844       bitmap_iterator bi;
2845 
2846       /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
2847 	 changed during the loop.  Copy it to another bitmap and
2848 	 use that.  */
2849       bitmap_copy (interesting_names1, interesting_names);
2850 
2851       EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
2852 	{
2853 	  tree name = ssa_name (i);
2854 
2855 	  /* Ignore SSA_NAMEs that have been released because
2856 	     their defining statement was deleted (unreachable).  */
2857 	  if (name)
2858 	    eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
2859 				     interesting_names);
2860 	}
2861     }
2862 
2863   if (cfg_altered)
2864     free_dominance_info (CDI_DOMINATORS);
2865 
2866   /* Propagation of const and copies may make some EH edges dead.  Purge
2867      such edges from the CFG as needed.  */
2868   if (!bitmap_empty_p (need_eh_cleanup))
2869     {
2870       gimple_purge_all_dead_eh_edges (need_eh_cleanup);
2871       BITMAP_FREE (need_eh_cleanup);
2872     }
2873 
2874   BITMAP_FREE (interesting_names);
2875   BITMAP_FREE (interesting_names1);
2876   return 0;
2877 }
2878 
2879 struct gimple_opt_pass pass_phi_only_cprop =
2880 {
2881  {
2882   GIMPLE_PASS,
2883   "phicprop",                           /* name */
2884   gate_dominator,                       /* gate */
2885   eliminate_degenerate_phis,            /* execute */
2886   NULL,                                 /* sub */
2887   NULL,                                 /* next */
2888   0,                                    /* static_pass_number */
2889   TV_TREE_PHI_CPROP,                    /* tv_id */
2890   PROP_cfg | PROP_ssa,			/* properties_required */
2891   0,                                    /* properties_provided */
2892   0,		                        /* properties_destroyed */
2893   0,                                    /* todo_flags_start */
2894   TODO_cleanup_cfg
2895     | TODO_dump_func
2896     | TODO_ggc_collect
2897     | TODO_verify_ssa
2898     | TODO_verify_stmts
2899     | TODO_update_ssa			/* todo_flags_finish */
2900  }
2901 };
2902