xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-ssa-dom.c (revision cef8759bd76c1b621f8eab8faa6f208faabc2e15)
1 /* SSA Dominator optimizations for trees
2    Copyright (C) 2001-2017 Free Software Foundation, Inc.
3    Contributed by Diego Novillo <dnovillo@redhat.com>
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11 
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 GNU General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "fold-const.h"
31 #include "cfganal.h"
32 #include "cfgloop.h"
33 #include "gimple-fold.h"
34 #include "tree-eh.h"
35 #include "gimple-iterator.h"
36 #include "tree-cfg.h"
37 #include "tree-into-ssa.h"
38 #include "domwalk.h"
39 #include "tree-ssa-propagate.h"
40 #include "tree-ssa-threadupdate.h"
41 #include "params.h"
42 #include "tree-ssa-scopedtables.h"
43 #include "tree-ssa-threadedge.h"
44 #include "tree-ssa-dom.h"
45 #include "gimplify.h"
46 #include "tree-cfgcleanup.h"
47 #include "dbgcnt.h"
48 
49 /* This file implements optimizations on the dominator tree.  */
50 
51 /* Structure for recording edge equivalences.
52 
53    Computing and storing the edge equivalences instead of creating
54    them on-demand can save significant amounts of time, particularly
55    for pathological cases involving switch statements.
56 
57    These structures live for a single iteration of the dominator
58    optimizer in the edge's AUX field.  At the end of an iteration we
59    free each of these structures.  */
60 
61 struct edge_info
62 {
63   /* If this edge creates a simple equivalence, the LHS and RHS of
64      the equivalence will be stored here.  */
65   tree lhs;
66   tree rhs;
67 
68   /* Traversing an edge may also indicate one or more particular conditions
69      are true or false.  */
70   vec<cond_equivalence> cond_equivalences;
71 };
72 
73 /* Track whether or not we have changed the control flow graph.  */
74 static bool cfg_altered;
75 
76 /* Bitmap of blocks that have had EH statements cleaned.  We should
77    remove their dead edges eventually.  */
78 static bitmap need_eh_cleanup;
79 static vec<gimple *> need_noreturn_fixup;
80 
81 /* Statistics for dominator optimizations.  */
82 struct opt_stats_d
83 {
84   long num_stmts;
85   long num_exprs_considered;
86   long num_re;
87   long num_const_prop;
88   long num_copy_prop;
89 };
90 
91 static struct opt_stats_d opt_stats;
92 
93 /* Local functions.  */
94 static edge optimize_stmt (basic_block, gimple_stmt_iterator,
95 			   class const_and_copies *,
96 			   class avail_exprs_stack *);
97 static void record_equality (tree, tree, class const_and_copies *);
98 static void record_equivalences_from_phis (basic_block);
99 static void record_equivalences_from_incoming_edge (basic_block,
100 						    class const_and_copies *,
101 						    class avail_exprs_stack *);
102 static void eliminate_redundant_computations (gimple_stmt_iterator *,
103 					      class const_and_copies *,
104 					      class avail_exprs_stack *);
105 static void record_equivalences_from_stmt (gimple *, int,
106 					   class avail_exprs_stack *);
107 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
108 static void dump_dominator_optimization_stats (FILE *file,
109 					       hash_table<expr_elt_hasher> *);
110 
111 
112 /* Free the edge_info data attached to E, if it exists.  */
113 
114 void
115 free_dom_edge_info (edge e)
116 {
117   struct edge_info *edge_info = (struct edge_info *)e->aux;
118 
119   if (edge_info)
120     {
121       edge_info->cond_equivalences.release ();
122       free (edge_info);
123     }
124 }
125 
126 /* Allocate an EDGE_INFO for edge E and attach it to E.
127    Return the new EDGE_INFO structure.  */
128 
129 static struct edge_info *
130 allocate_edge_info (edge e)
131 {
132   struct edge_info *edge_info;
133 
134   /* Free the old one, if it exists.  */
135   free_dom_edge_info (e);
136 
137   edge_info = XCNEW (struct edge_info);
138 
139   e->aux = edge_info;
140   return edge_info;
141 }
142 
143 /* Free all EDGE_INFO structures associated with edges in the CFG.
144    If a particular edge can be threaded, copy the redirection
145    target from the EDGE_INFO structure into the edge's AUX field
146    as required by code to update the CFG and SSA graph for
147    jump threading.  */
148 
149 static void
150 free_all_edge_infos (void)
151 {
152   basic_block bb;
153   edge_iterator ei;
154   edge e;
155 
156   FOR_EACH_BB_FN (bb, cfun)
157     {
158       FOR_EACH_EDGE (e, ei, bb->preds)
159         {
160 	  free_dom_edge_info (e);
161 	  e->aux = NULL;
162 	}
163     }
164 }
165 
166 /* We have finished optimizing BB, record any information implied by
167    taking a specific outgoing edge from BB.  */
168 
169 static void
170 record_edge_info (basic_block bb)
171 {
172   gimple_stmt_iterator gsi = gsi_last_bb (bb);
173   struct edge_info *edge_info;
174 
175   if (! gsi_end_p (gsi))
176     {
177       gimple *stmt = gsi_stmt (gsi);
178       location_t loc = gimple_location (stmt);
179 
180       if (gimple_code (stmt) == GIMPLE_SWITCH)
181 	{
182 	  gswitch *switch_stmt = as_a <gswitch *> (stmt);
183 	  tree index = gimple_switch_index (switch_stmt);
184 
185 	  if (TREE_CODE (index) == SSA_NAME)
186 	    {
187 	      int i;
188               int n_labels = gimple_switch_num_labels (switch_stmt);
189 	      tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
190 	      edge e;
191 	      edge_iterator ei;
192 
193 	      for (i = 0; i < n_labels; i++)
194 		{
195 		  tree label = gimple_switch_label (switch_stmt, i);
196 		  basic_block target_bb = label_to_block (CASE_LABEL (label));
197 		  if (CASE_HIGH (label)
198 		      || !CASE_LOW (label)
199 		      || info[target_bb->index])
200 		    info[target_bb->index] = error_mark_node;
201 		  else
202 		    info[target_bb->index] = label;
203 		}
204 
205 	      FOR_EACH_EDGE (e, ei, bb->succs)
206 		{
207 		  basic_block target_bb = e->dest;
208 		  tree label = info[target_bb->index];
209 
210 		  if (label != NULL && label != error_mark_node)
211 		    {
212 		      tree x = fold_convert_loc (loc, TREE_TYPE (index),
213 						 CASE_LOW (label));
214 		      edge_info = allocate_edge_info (e);
215 		      edge_info->lhs = index;
216 		      edge_info->rhs = x;
217 		    }
218 		}
219 	      free (info);
220 	    }
221 	}
222 
223       /* A COND_EXPR may create equivalences too.  */
224       if (gimple_code (stmt) == GIMPLE_COND)
225 	{
226 	  edge true_edge;
227 	  edge false_edge;
228 
229           tree op0 = gimple_cond_lhs (stmt);
230           tree op1 = gimple_cond_rhs (stmt);
231           enum tree_code code = gimple_cond_code (stmt);
232 
233 	  extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
234 
235           /* Special case comparing booleans against a constant as we
236              know the value of OP0 on both arms of the branch.  i.e., we
237              can record an equivalence for OP0 rather than COND.
238 
239 	     However, don't do this if the constant isn't zero or one.
240 	     Such conditionals will get optimized more thoroughly during
241 	     the domwalk.  */
242 	  if ((code == EQ_EXPR || code == NE_EXPR)
243 	      && TREE_CODE (op0) == SSA_NAME
244 	      && ssa_name_has_boolean_range (op0)
245 	      && is_gimple_min_invariant (op1)
246 	      && (integer_zerop (op1) || integer_onep (op1)))
247             {
248 	      tree true_val = constant_boolean_node (true, TREE_TYPE (op0));
249 	      tree false_val = constant_boolean_node (false, TREE_TYPE (op0));
250 
251               if (code == EQ_EXPR)
252                 {
253                   edge_info = allocate_edge_info (true_edge);
254                   edge_info->lhs = op0;
255                   edge_info->rhs = (integer_zerop (op1) ? false_val : true_val);
256 
257                   edge_info = allocate_edge_info (false_edge);
258                   edge_info->lhs = op0;
259                   edge_info->rhs = (integer_zerop (op1) ? true_val : false_val);
260                 }
261               else
262                 {
263                   edge_info = allocate_edge_info (true_edge);
264                   edge_info->lhs = op0;
265                   edge_info->rhs = (integer_zerop (op1) ? true_val : false_val);
266 
267                   edge_info = allocate_edge_info (false_edge);
268                   edge_info->lhs = op0;
269                   edge_info->rhs = (integer_zerop (op1) ? false_val : true_val);
270                 }
271             }
272           else if (is_gimple_min_invariant (op0)
273                    && (TREE_CODE (op1) == SSA_NAME
274                        || is_gimple_min_invariant (op1)))
275             {
276               tree cond = build2 (code, boolean_type_node, op0, op1);
277               tree inverted = invert_truthvalue_loc (loc, cond);
278               bool can_infer_simple_equiv
279                 = !(HONOR_SIGNED_ZEROS (op0)
280                     && real_zerop (op0));
281               struct edge_info *edge_info;
282 
283               edge_info = allocate_edge_info (true_edge);
284               record_conditions (&edge_info->cond_equivalences, cond, inverted);
285 
286               if (can_infer_simple_equiv && code == EQ_EXPR)
287                 {
288                   edge_info->lhs = op1;
289                   edge_info->rhs = op0;
290                 }
291 
292               edge_info = allocate_edge_info (false_edge);
293               record_conditions (&edge_info->cond_equivalences, inverted, cond);
294 
295               if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
296                 {
297                   edge_info->lhs = op1;
298                   edge_info->rhs = op0;
299                 }
300             }
301 
302           else if (TREE_CODE (op0) == SSA_NAME
303                    && (TREE_CODE (op1) == SSA_NAME
304                        || is_gimple_min_invariant (op1)))
305             {
306               tree cond = build2 (code, boolean_type_node, op0, op1);
307               tree inverted = invert_truthvalue_loc (loc, cond);
308               bool can_infer_simple_equiv
309                 = !(HONOR_SIGNED_ZEROS (op1)
310                     && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
311               struct edge_info *edge_info;
312 
313               edge_info = allocate_edge_info (true_edge);
314               record_conditions (&edge_info->cond_equivalences, cond, inverted);
315 
316               if (can_infer_simple_equiv && code == EQ_EXPR)
317                 {
318                   edge_info->lhs = op0;
319                   edge_info->rhs = op1;
320                 }
321 
322               edge_info = allocate_edge_info (false_edge);
323               record_conditions (&edge_info->cond_equivalences, inverted, cond);
324 
325               if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
326                 {
327                   edge_info->lhs = op0;
328                   edge_info->rhs = op1;
329                 }
330             }
331         }
332 
333       /* ??? TRUTH_NOT_EXPR can create an equivalence too.  */
334     }
335 }
336 
337 
338 class dom_opt_dom_walker : public dom_walker
339 {
340 public:
341   dom_opt_dom_walker (cdi_direction direction,
342 		      class const_and_copies *const_and_copies,
343 		      class avail_exprs_stack *avail_exprs_stack)
344     : dom_walker (direction, true),
345       m_const_and_copies (const_and_copies),
346       m_avail_exprs_stack (avail_exprs_stack),
347       m_dummy_cond (NULL) {}
348 
349   virtual edge before_dom_children (basic_block);
350   virtual void after_dom_children (basic_block);
351 
352 private:
353 
354   /* Unwindable equivalences, both const/copy and expression varieties.  */
355   class const_and_copies *m_const_and_copies;
356   class avail_exprs_stack *m_avail_exprs_stack;
357 
358   gcond *m_dummy_cond;
359 };
360 
361 /* Jump threading, redundancy elimination and const/copy propagation.
362 
363    This pass may expose new symbols that need to be renamed into SSA.  For
364    every new symbol exposed, its corresponding bit will be set in
365    VARS_TO_RENAME.  */
366 
367 namespace {
368 
369 const pass_data pass_data_dominator =
370 {
371   GIMPLE_PASS, /* type */
372   "dom", /* name */
373   OPTGROUP_NONE, /* optinfo_flags */
374   TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
375   ( PROP_cfg | PROP_ssa ), /* properties_required */
376   0, /* properties_provided */
377   0, /* properties_destroyed */
378   0, /* todo_flags_start */
379   ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
380 };
381 
382 class pass_dominator : public gimple_opt_pass
383 {
384 public:
385   pass_dominator (gcc::context *ctxt)
386     : gimple_opt_pass (pass_data_dominator, ctxt),
387       may_peel_loop_headers_p (false)
388   {}
389 
390   /* opt_pass methods: */
391   opt_pass * clone () { return new pass_dominator (m_ctxt); }
392   void set_pass_param (unsigned int n, bool param)
393     {
394       gcc_assert (n == 0);
395       may_peel_loop_headers_p = param;
396     }
397   virtual bool gate (function *) { return flag_tree_dom != 0; }
398   virtual unsigned int execute (function *);
399 
400  private:
401   /* This flag is used to prevent loops from being peeled repeatedly in jump
402      threading; it will be removed once we preserve loop structures throughout
403      the compilation -- we will be able to mark the affected loops directly in
404      jump threading, and avoid peeling them next time.  */
405   bool may_peel_loop_headers_p;
406 }; // class pass_dominator
407 
408 unsigned int
409 pass_dominator::execute (function *fun)
410 {
411   memset (&opt_stats, 0, sizeof (opt_stats));
412 
413   /* Create our hash tables.  */
414   hash_table<expr_elt_hasher> *avail_exprs
415     = new hash_table<expr_elt_hasher> (1024);
416   class avail_exprs_stack *avail_exprs_stack
417     = new class avail_exprs_stack (avail_exprs);
418   class const_and_copies *const_and_copies = new class const_and_copies ();
419   need_eh_cleanup = BITMAP_ALLOC (NULL);
420   need_noreturn_fixup.create (0);
421 
422   calculate_dominance_info (CDI_DOMINATORS);
423   cfg_altered = false;
424 
425   /* We need to know loop structures in order to avoid destroying them
426      in jump threading.  Note that we still can e.g. thread through loop
427      headers to an exit edge, or through loop header to the loop body, assuming
428      that we update the loop info.
429 
430      TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
431      to several overly conservative bail-outs in jump threading, case
432      gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
433      missing.  We should improve jump threading in future then
434      LOOPS_HAVE_PREHEADERS won't be needed here.  */
435   loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
436 
437   /* Initialize the value-handle array.  */
438   threadedge_initialize_values ();
439 
440   /* We need accurate information regarding back edges in the CFG
441      for jump threading; this may include back edges that are not part of
442      a single loop.  */
443   mark_dfs_back_edges ();
444 
445   /* We want to create the edge info structures before the dominator walk
446      so that they'll be in place for the jump threader, particularly when
447      threading through a join block.
448 
449      The conditions will be lazily updated with global equivalences as
450      we reach them during the dominator walk.  */
451   basic_block bb;
452   FOR_EACH_BB_FN (bb, fun)
453     record_edge_info (bb);
454 
455   /* Recursively walk the dominator tree optimizing statements.  */
456   dom_opt_dom_walker walker (CDI_DOMINATORS,
457 			     const_and_copies,
458 			     avail_exprs_stack);
459   walker.walk (fun->cfg->x_entry_block_ptr);
460 
461   /* Look for blocks where we cleared EDGE_EXECUTABLE on an outgoing
462      edge.  When found, remove jump threads which contain any outgoing
463      edge from the affected block.  */
464   if (cfg_altered)
465     {
466       FOR_EACH_BB_FN (bb, fun)
467 	{
468 	  edge_iterator ei;
469 	  edge e;
470 
471 	  /* First see if there are any edges without EDGE_EXECUTABLE
472 	     set.  */
473 	  bool found = false;
474 	  FOR_EACH_EDGE (e, ei, bb->succs)
475 	    {
476 	      if ((e->flags & EDGE_EXECUTABLE) == 0)
477 		{
478 		  found = true;
479 		  break;
480 		}
481 	    }
482 
483 	  /* If there were any such edges found, then remove jump threads
484 	     containing any edge leaving BB.  */
485 	  if (found)
486 	    FOR_EACH_EDGE (e, ei, bb->succs)
487 	      remove_jump_threads_including (e);
488 	}
489     }
490 
491   {
492     gimple_stmt_iterator gsi;
493     basic_block bb;
494     FOR_EACH_BB_FN (bb, fun)
495       {
496 	for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
497 	  update_stmt_if_modified (gsi_stmt (gsi));
498       }
499   }
500 
501   /* If we exposed any new variables, go ahead and put them into
502      SSA form now, before we handle jump threading.  This simplifies
503      interactions between rewriting of _DECL nodes into SSA form
504      and rewriting SSA_NAME nodes into SSA form after block
505      duplication and CFG manipulation.  */
506   update_ssa (TODO_update_ssa);
507 
508   free_all_edge_infos ();
509 
510   /* Thread jumps, creating duplicate blocks as needed.  */
511   cfg_altered |= thread_through_all_blocks (may_peel_loop_headers_p);
512 
513   if (cfg_altered)
514     free_dominance_info (CDI_DOMINATORS);
515 
516   /* Removal of statements may make some EH edges dead.  Purge
517      such edges from the CFG as needed.  */
518   if (!bitmap_empty_p (need_eh_cleanup))
519     {
520       unsigned i;
521       bitmap_iterator bi;
522 
523       /* Jump threading may have created forwarder blocks from blocks
524 	 needing EH cleanup; the new successor of these blocks, which
525 	 has inherited from the original block, needs the cleanup.
526 	 Don't clear bits in the bitmap, as that can break the bitmap
527 	 iterator.  */
528       EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
529 	{
530 	  basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
531 	  if (bb == NULL)
532 	    continue;
533 	  while (single_succ_p (bb)
534 		 && (single_succ_edge (bb)->flags
535 		     & (EDGE_EH|EDGE_DFS_BACK)) == 0)
536 	    bb = single_succ (bb);
537 	  if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
538 	    continue;
539 	  if ((unsigned) bb->index != i)
540 	    bitmap_set_bit (need_eh_cleanup, bb->index);
541 	}
542 
543       gimple_purge_all_dead_eh_edges (need_eh_cleanup);
544       bitmap_clear (need_eh_cleanup);
545     }
546 
547   /* Fixup stmts that became noreturn calls.  This may require splitting
548      blocks and thus isn't possible during the dominator walk or before
549      jump threading finished.  Do this in reverse order so we don't
550      inadvertedly remove a stmt we want to fixup by visiting a dominating
551      now noreturn call first.  */
552   while (!need_noreturn_fixup.is_empty ())
553     {
554       gimple *stmt = need_noreturn_fixup.pop ();
555       if (dump_file && dump_flags & TDF_DETAILS)
556 	{
557 	  fprintf (dump_file, "Fixing up noreturn call ");
558 	  print_gimple_stmt (dump_file, stmt, 0, 0);
559 	  fprintf (dump_file, "\n");
560 	}
561       fixup_noreturn_call (stmt);
562     }
563 
564   statistics_counter_event (fun, "Redundant expressions eliminated",
565 			    opt_stats.num_re);
566   statistics_counter_event (fun, "Constants propagated",
567 			    opt_stats.num_const_prop);
568   statistics_counter_event (fun, "Copies propagated",
569 			    opt_stats.num_copy_prop);
570 
571   /* Debugging dumps.  */
572   if (dump_file && (dump_flags & TDF_STATS))
573     dump_dominator_optimization_stats (dump_file, avail_exprs);
574 
575   loop_optimizer_finalize ();
576 
577   /* Delete our main hashtable.  */
578   delete avail_exprs;
579   avail_exprs = NULL;
580 
581   /* Free asserted bitmaps and stacks.  */
582   BITMAP_FREE (need_eh_cleanup);
583   need_noreturn_fixup.release ();
584   delete avail_exprs_stack;
585   delete const_and_copies;
586 
587   /* Free the value-handle array.  */
588   threadedge_finalize_values ();
589 
590   return 0;
591 }
592 
593 } // anon namespace
594 
595 gimple_opt_pass *
596 make_pass_dominator (gcc::context *ctxt)
597 {
598   return new pass_dominator (ctxt);
599 }
600 
601 
602 /* A trivial wrapper so that we can present the generic jump
603    threading code with a simple API for simplifying statements.  */
604 static tree
605 simplify_stmt_for_jump_threading (gimple *stmt,
606 				  gimple *within_stmt ATTRIBUTE_UNUSED,
607 				  class avail_exprs_stack *avail_exprs_stack,
608 				  basic_block bb ATTRIBUTE_UNUSED)
609 {
610   return avail_exprs_stack->lookup_avail_expr (stmt, false, true);
611 }
612 
613 /* Valueize hook for gimple_fold_stmt_to_constant_1.  */
614 
615 static tree
616 dom_valueize (tree t)
617 {
618   if (TREE_CODE (t) == SSA_NAME)
619     {
620       tree tem = SSA_NAME_VALUE (t);
621       if (tem)
622 	return tem;
623     }
624   return t;
625 }
626 
627 /* We have just found an equivalence for LHS on an edge E.
628    Look backwards to other uses of LHS and see if we can derive
629    additional equivalences that are valid on edge E.  */
630 static void
631 back_propagate_equivalences (tree lhs, edge e,
632 			     class const_and_copies *const_and_copies)
633 {
634   use_operand_p use_p;
635   imm_use_iterator iter;
636   bitmap domby = NULL;
637   basic_block dest = e->dest;
638 
639   /* Iterate over the uses of LHS to see if any dominate E->dest.
640      If so, they may create useful equivalences too.
641 
642      ???  If the code gets re-organized to a worklist to catch more
643      indirect opportunities and it is made to handle PHIs then this
644      should only consider use_stmts in basic-blocks we have already visited.  */
645   FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
646     {
647       gimple *use_stmt = USE_STMT (use_p);
648 
649       /* Often the use is in DEST, which we trivially know we can't use.
650 	 This is cheaper than the dominator set tests below.  */
651       if (dest == gimple_bb (use_stmt))
652 	continue;
653 
654       /* Filter out statements that can never produce a useful
655 	 equivalence.  */
656       tree lhs2 = gimple_get_lhs (use_stmt);
657       if (!lhs2 || TREE_CODE (lhs2) != SSA_NAME)
658 	continue;
659 
660       /* Profiling has shown the domination tests here can be fairly
661 	 expensive.  We get significant improvements by building the
662 	 set of blocks that dominate BB.  We can then just test
663 	 for set membership below.
664 
665 	 We also initialize the set lazily since often the only uses
666 	 are going to be in the same block as DEST.  */
667       if (!domby)
668 	{
669 	  domby = BITMAP_ALLOC (NULL);
670 	  basic_block bb = get_immediate_dominator (CDI_DOMINATORS, dest);
671 	  while (bb)
672 	    {
673 	      bitmap_set_bit (domby, bb->index);
674 	      bb = get_immediate_dominator (CDI_DOMINATORS, bb);
675 	    }
676 	}
677 
678       /* This tests if USE_STMT does not dominate DEST.  */
679       if (!bitmap_bit_p (domby, gimple_bb (use_stmt)->index))
680 	continue;
681 
682       /* At this point USE_STMT dominates DEST and may result in a
683 	 useful equivalence.  Try to simplify its RHS to a constant
684 	 or SSA_NAME.  */
685       tree res = gimple_fold_stmt_to_constant_1 (use_stmt, dom_valueize,
686 						 no_follow_ssa_edges);
687       if (res && (TREE_CODE (res) == SSA_NAME || is_gimple_min_invariant (res)))
688 	record_equality (lhs2, res, const_and_copies);
689     }
690 
691   if (domby)
692     BITMAP_FREE (domby);
693 }
694 
695 /* Record NAME has the value zero and if NAME was set from a BIT_IOR_EXPR
696    recurse into both operands recording their values as zero too.
697    RECURSION_DEPTH controls how far back we recurse through the operands
698    of the BIT_IOR_EXPR.  */
699 
700 static void
701 derive_equivalences_from_bit_ior (tree name,
702 				  const_and_copies *const_and_copies,
703 				  int recursion_limit)
704 {
705   if (recursion_limit == 0)
706     return;
707 
708   if (TREE_CODE (name) == SSA_NAME)
709     {
710       tree value = build_zero_cst (TREE_TYPE (name));
711 
712       /* This records the equivalence for the toplevel object.  */
713       record_equality (name, value, const_and_copies);
714 
715       /* And we can recurse into each operand to potentially find more
716 	 equivalences.  */
717       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
718       if (is_gimple_assign (def_stmt)
719 	  && gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR)
720 	{
721 	  derive_equivalences_from_bit_ior (gimple_assign_rhs1 (def_stmt),
722 					    const_and_copies,
723 					    recursion_limit - 1);
724 	  derive_equivalences_from_bit_ior (gimple_assign_rhs2 (def_stmt),
725 					    const_and_copies,
726 					    recursion_limit - 1);
727 	}
728     }
729 }
730 
731 /* Record into CONST_AND_COPIES and AVAIL_EXPRS_STACK any equivalences implied
732    by traversing edge E (which are cached in E->aux).
733 
734    Callers are responsible for managing the unwinding markers.  */
735 void
736 record_temporary_equivalences (edge e,
737 			       class const_and_copies *const_and_copies,
738 			       class avail_exprs_stack *avail_exprs_stack)
739 {
740   int i;
741   struct edge_info *edge_info = (struct edge_info *) e->aux;
742 
743   /* If we have info associated with this edge, record it into
744      our equivalence tables.  */
745   if (edge_info)
746     {
747       cond_equivalence *eq;
748       /* If we have 0 = COND or 1 = COND equivalences, record them
749 	 into our expression hash tables.  */
750       for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
751 	{
752 	  avail_exprs_stack->record_cond (eq);
753 
754 	  /* If the condition is testing that X == 0 is true or X != 0 is false
755 	     and X is set from a BIT_IOR_EXPR, then we can record equivalences
756 	     for the operands of the BIT_IOR_EXPR (and recurse on those).  */
757 	  tree op0 = eq->cond.ops.binary.opnd0;
758 	  tree op1 = eq->cond.ops.binary.opnd1;
759 	  if (TREE_CODE (op0) == SSA_NAME && integer_zerop (op1))
760 	    {
761 	      enum tree_code code = eq->cond.ops.binary.op;
762 	      if ((code == EQ_EXPR && eq->value == boolean_true_node)
763 		  || (code == NE_EXPR && eq->value == boolean_false_node))
764 		derive_equivalences_from_bit_ior (op0, const_and_copies, 4);
765 
766 	      /* TODO: We could handle BIT_AND_EXPR in a similar fashion
767 		 recording that the operands have a nonzero value.  */
768 
769 	      /* TODO: We can handle more cases here, particularly when OP0 is
770 		 known to have a boolean range.  */
771 	    }
772 	}
773 
774       tree lhs = edge_info->lhs;
775       if (!lhs || TREE_CODE (lhs) != SSA_NAME)
776 	return;
777 
778       /* Record the simple NAME = VALUE equivalence.  */
779       tree rhs = edge_info->rhs;
780       record_equality (lhs, rhs, const_and_copies);
781 
782       /* We already recorded that LHS = RHS, with canonicalization,
783 	 value chain following, etc.
784 
785 	 We also want to record RHS = LHS, but without any canonicalization
786 	 or value chain following.  */
787       if (TREE_CODE (rhs) == SSA_NAME)
788 	const_and_copies->record_const_or_copy_raw (rhs, lhs,
789 						    SSA_NAME_VALUE (rhs));
790 
791       /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
792 	 set via a widening type conversion, then we may be able to record
793 	 additional equivalences.  */
794       if (TREE_CODE (rhs) == INTEGER_CST)
795 	{
796 	  gimple *defstmt = SSA_NAME_DEF_STMT (lhs);
797 
798 	  if (defstmt
799 	      && is_gimple_assign (defstmt)
800 	      && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
801 	    {
802 	      tree old_rhs = gimple_assign_rhs1 (defstmt);
803 
804 	      /* If the conversion widens the original value and
805 		 the constant is in the range of the type of OLD_RHS,
806 		 then convert the constant and record the equivalence.
807 
808 		 Note that int_fits_type_p does not check the precision
809 		 if the upper and lower bounds are OK.  */
810 	      if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
811 		  && (TYPE_PRECISION (TREE_TYPE (lhs))
812 		      > TYPE_PRECISION (TREE_TYPE (old_rhs)))
813 		  && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
814 		{
815 		  tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
816 		  record_equality (old_rhs, newval, const_and_copies);
817 		}
818 	    }
819 	}
820 
821       /* Any equivalence found for LHS may result in additional
822 	 equivalences for other uses of LHS that we have already
823 	 processed.  */
824       back_propagate_equivalences (lhs, e, const_and_copies);
825     }
826 }
827 
828 /* PHI nodes can create equivalences too.
829 
830    Ignoring any alternatives which are the same as the result, if
831    all the alternatives are equal, then the PHI node creates an
832    equivalence.  */
833 
834 static void
835 record_equivalences_from_phis (basic_block bb)
836 {
837   gphi_iterator gsi;
838 
839   for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
840     {
841       gphi *phi = gsi.phi ();
842 
843       tree lhs = gimple_phi_result (phi);
844       tree rhs = NULL;
845       size_t i;
846 
847       for (i = 0; i < gimple_phi_num_args (phi); i++)
848 	{
849 	  tree t = gimple_phi_arg_def (phi, i);
850 
851 	  /* Ignore alternatives which are the same as our LHS.  Since
852 	     LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
853 	     can simply compare pointers.  */
854 	  if (lhs == t)
855 	    continue;
856 
857 	  /* If the associated edge is not marked as executable, then it
858 	     can be ignored.  */
859 	  if ((gimple_phi_arg_edge (phi, i)->flags & EDGE_EXECUTABLE) == 0)
860 	    continue;
861 
862 	  t = dom_valueize (t);
863 
864 	  /* If we have not processed an alternative yet, then set
865 	     RHS to this alternative.  */
866 	  if (rhs == NULL)
867 	    rhs = t;
868 	  /* If we have processed an alternative (stored in RHS), then
869 	     see if it is equal to this one.  If it isn't, then stop
870 	     the search.  */
871 	  else if (! operand_equal_for_phi_arg_p (rhs, t))
872 	    break;
873 	}
874 
875       /* If we had no interesting alternatives, then all the RHS alternatives
876 	 must have been the same as LHS.  */
877       if (!rhs)
878 	rhs = lhs;
879 
880       /* If we managed to iterate through each PHI alternative without
881 	 breaking out of the loop, then we have a PHI which may create
882 	 a useful equivalence.  We do not need to record unwind data for
883 	 this, since this is a true assignment and not an equivalence
884 	 inferred from a comparison.  All uses of this ssa name are dominated
885 	 by this assignment, so unwinding just costs time and space.  */
886       if (i == gimple_phi_num_args (phi)
887 	  && may_propagate_copy (lhs, rhs))
888 	set_ssa_name_value (lhs, rhs);
889     }
890 }
891 
892 /* Ignoring loop backedges, if BB has precisely one incoming edge then
893    return that edge.  Otherwise return NULL.  */
894 static edge
895 single_incoming_edge_ignoring_loop_edges (basic_block bb)
896 {
897   edge retval = NULL;
898   edge e;
899   edge_iterator ei;
900 
901   FOR_EACH_EDGE (e, ei, bb->preds)
902     {
903       /* A loop back edge can be identified by the destination of
904 	 the edge dominating the source of the edge.  */
905       if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
906 	continue;
907 
908       /* We can safely ignore edges that are not executable.  */
909       if ((e->flags & EDGE_EXECUTABLE) == 0)
910 	continue;
911 
912       /* If we have already seen a non-loop edge, then we must have
913 	 multiple incoming non-loop edges and thus we return NULL.  */
914       if (retval)
915 	return NULL;
916 
917       /* This is the first non-loop incoming edge we have found.  Record
918 	 it.  */
919       retval = e;
920     }
921 
922   return retval;
923 }
924 
925 /* Record any equivalences created by the incoming edge to BB into
926    CONST_AND_COPIES and AVAIL_EXPRS_STACK.  If BB has more than one
927    incoming edge, then no equivalence is created.  */
928 
929 static void
930 record_equivalences_from_incoming_edge (basic_block bb,
931     class const_and_copies *const_and_copies,
932     class avail_exprs_stack *avail_exprs_stack)
933 {
934   edge e;
935   basic_block parent;
936 
937   /* If our parent block ended with a control statement, then we may be
938      able to record some equivalences based on which outgoing edge from
939      the parent was followed.  */
940   parent = get_immediate_dominator (CDI_DOMINATORS, bb);
941 
942   e = single_incoming_edge_ignoring_loop_edges (bb);
943 
944   /* If we had a single incoming edge from our parent block, then enter
945      any data associated with the edge into our tables.  */
946   if (e && e->src == parent)
947     record_temporary_equivalences (e, const_and_copies, avail_exprs_stack);
948 }
949 
950 /* Dump statistics for the hash table HTAB.  */
951 
952 static void
953 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
954 {
955   fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
956 	   (long) htab.size (),
957 	   (long) htab.elements (),
958 	   htab.collisions ());
959 }
960 
961 /* Dump SSA statistics on FILE.  */
962 
963 static void
964 dump_dominator_optimization_stats (FILE *file,
965 				   hash_table<expr_elt_hasher> *avail_exprs)
966 {
967   fprintf (file, "Total number of statements:                   %6ld\n\n",
968 	   opt_stats.num_stmts);
969   fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
970            opt_stats.num_exprs_considered);
971 
972   fprintf (file, "\nHash table statistics:\n");
973 
974   fprintf (file, "    avail_exprs: ");
975   htab_statistics (file, *avail_exprs);
976 }
977 
978 
979 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
980    This constrains the cases in which we may treat this as assignment.  */
981 
982 static void
983 record_equality (tree x, tree y, class const_and_copies *const_and_copies)
984 {
985   tree prev_x = NULL, prev_y = NULL;
986 
987   if (tree_swap_operands_p (x, y))
988     std::swap (x, y);
989 
990   /* Most of the time tree_swap_operands_p does what we want.  But there
991      are cases where we know one operand is better for copy propagation than
992      the other.  Given no other code cares about ordering of equality
993      comparison operators for that purpose, we just handle the special cases
994      here.  */
995   if (TREE_CODE (x) == SSA_NAME && TREE_CODE (y) == SSA_NAME)
996     {
997       /* If one operand is a single use operand, then make it
998 	 X.  This will preserve its single use properly and if this
999 	 conditional is eliminated, the computation of X can be
1000 	 eliminated as well.  */
1001       if (has_single_use (y) && ! has_single_use (x))
1002 	std::swap (x, y);
1003     }
1004   if (TREE_CODE (x) == SSA_NAME)
1005     prev_x = SSA_NAME_VALUE (x);
1006   if (TREE_CODE (y) == SSA_NAME)
1007     prev_y = SSA_NAME_VALUE (y);
1008 
1009   /* If one of the previous values is invariant, or invariant in more loops
1010      (by depth), then use that.
1011      Otherwise it doesn't matter which value we choose, just so
1012      long as we canonicalize on one value.  */
1013   if (is_gimple_min_invariant (y))
1014     ;
1015   else if (is_gimple_min_invariant (x))
1016     prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1017   else if (prev_x && is_gimple_min_invariant (prev_x))
1018     x = y, y = prev_x, prev_x = prev_y;
1019   else if (prev_y)
1020     y = prev_y;
1021 
1022   /* After the swapping, we must have one SSA_NAME.  */
1023   if (TREE_CODE (x) != SSA_NAME)
1024     return;
1025 
1026   /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1027      variable compared against zero.  If we're honoring signed zeros,
1028      then we cannot record this value unless we know that the value is
1029      nonzero.  */
1030   if (HONOR_SIGNED_ZEROS (x)
1031       && (TREE_CODE (y) != REAL_CST
1032 	  || real_equal (&dconst0, &TREE_REAL_CST (y))))
1033     return;
1034 
1035   const_and_copies->record_const_or_copy (x, y, prev_x);
1036 }
1037 
1038 /* Returns true when STMT is a simple iv increment.  It detects the
1039    following situation:
1040 
1041    i_1 = phi (..., i_2)
1042    i_2 = i_1 +/- ...  */
1043 
1044 bool
1045 simple_iv_increment_p (gimple *stmt)
1046 {
1047   enum tree_code code;
1048   tree lhs, preinc;
1049   gimple *phi;
1050   size_t i;
1051 
1052   if (gimple_code (stmt) != GIMPLE_ASSIGN)
1053     return false;
1054 
1055   lhs = gimple_assign_lhs (stmt);
1056   if (TREE_CODE (lhs) != SSA_NAME)
1057     return false;
1058 
1059   code = gimple_assign_rhs_code (stmt);
1060   if (code != PLUS_EXPR
1061       && code != MINUS_EXPR
1062       && code != POINTER_PLUS_EXPR)
1063     return false;
1064 
1065   preinc = gimple_assign_rhs1 (stmt);
1066   if (TREE_CODE (preinc) != SSA_NAME)
1067     return false;
1068 
1069   phi = SSA_NAME_DEF_STMT (preinc);
1070   if (gimple_code (phi) != GIMPLE_PHI)
1071     return false;
1072 
1073   for (i = 0; i < gimple_phi_num_args (phi); i++)
1074     if (gimple_phi_arg_def (phi, i) == lhs)
1075       return true;
1076 
1077   return false;
1078 }
1079 
1080 /* Propagate know values from SSA_NAME_VALUE into the PHI nodes of the
1081    successors of BB.  */
1082 
1083 static void
1084 cprop_into_successor_phis (basic_block bb,
1085 			   class const_and_copies *const_and_copies)
1086 {
1087   edge e;
1088   edge_iterator ei;
1089 
1090   FOR_EACH_EDGE (e, ei, bb->succs)
1091     {
1092       int indx;
1093       gphi_iterator gsi;
1094 
1095       /* If this is an abnormal edge, then we do not want to copy propagate
1096 	 into the PHI alternative associated with this edge.  */
1097       if (e->flags & EDGE_ABNORMAL)
1098 	continue;
1099 
1100       gsi = gsi_start_phis (e->dest);
1101       if (gsi_end_p (gsi))
1102 	continue;
1103 
1104       /* We may have an equivalence associated with this edge.  While
1105 	 we can not propagate it into non-dominated blocks, we can
1106 	 propagate them into PHIs in non-dominated blocks.  */
1107 
1108       /* Push the unwind marker so we can reset the const and copies
1109 	 table back to its original state after processing this edge.  */
1110       const_and_copies->push_marker ();
1111 
1112       /* Extract and record any simple NAME = VALUE equivalences.
1113 
1114 	 Don't bother with [01] = COND equivalences, they're not useful
1115 	 here.  */
1116       struct edge_info *edge_info = (struct edge_info *) e->aux;
1117       if (edge_info)
1118 	{
1119 	  tree lhs = edge_info->lhs;
1120 	  tree rhs = edge_info->rhs;
1121 
1122 	  if (lhs && TREE_CODE (lhs) == SSA_NAME)
1123 	    const_and_copies->record_const_or_copy (lhs, rhs);
1124 	}
1125 
1126       indx = e->dest_idx;
1127       for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1128 	{
1129 	  tree new_val;
1130 	  use_operand_p orig_p;
1131 	  tree orig_val;
1132           gphi *phi = gsi.phi ();
1133 
1134 	  /* The alternative may be associated with a constant, so verify
1135 	     it is an SSA_NAME before doing anything with it.  */
1136 	  orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1137 	  orig_val = get_use_from_ptr (orig_p);
1138 	  if (TREE_CODE (orig_val) != SSA_NAME)
1139 	    continue;
1140 
1141 	  /* If we have *ORIG_P in our constant/copy table, then replace
1142 	     ORIG_P with its value in our constant/copy table.  */
1143 	  new_val = SSA_NAME_VALUE (orig_val);
1144 	  if (new_val
1145 	      && new_val != orig_val
1146 	      && may_propagate_copy (orig_val, new_val))
1147 	    propagate_value (orig_p, new_val);
1148 	}
1149 
1150       const_and_copies->pop_to_marker ();
1151     }
1152 }
1153 
1154 edge
1155 dom_opt_dom_walker::before_dom_children (basic_block bb)
1156 {
1157   gimple_stmt_iterator gsi;
1158 
1159   if (dump_file && (dump_flags & TDF_DETAILS))
1160     fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1161 
1162   /* Push a marker on the stacks of local information so that we know how
1163      far to unwind when we finalize this block.  */
1164   m_avail_exprs_stack->push_marker ();
1165   m_const_and_copies->push_marker ();
1166 
1167   record_equivalences_from_incoming_edge (bb, m_const_and_copies,
1168 					  m_avail_exprs_stack);
1169 
1170   /* PHI nodes can create equivalences too.  */
1171   record_equivalences_from_phis (bb);
1172 
1173   /* Create equivalences from redundant PHIs.  PHIs are only truly
1174      redundant when they exist in the same block, so push another
1175      marker and unwind right afterwards.  */
1176   m_avail_exprs_stack->push_marker ();
1177   for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1178     eliminate_redundant_computations (&gsi, m_const_and_copies,
1179 				      m_avail_exprs_stack);
1180   m_avail_exprs_stack->pop_to_marker ();
1181 
1182   edge taken_edge = NULL;
1183   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1184     taken_edge
1185       = optimize_stmt (bb, gsi, m_const_and_copies, m_avail_exprs_stack);
1186 
1187   /* Now prepare to process dominated blocks.  */
1188   record_edge_info (bb);
1189   cprop_into_successor_phis (bb, m_const_and_copies);
1190   if (taken_edge && !dbg_cnt (dom_unreachable_edges))
1191     return NULL;
1192 
1193   return taken_edge;
1194 }
1195 
1196 /* We have finished processing the dominator children of BB, perform
1197    any finalization actions in preparation for leaving this node in
1198    the dominator tree.  */
1199 
1200 void
1201 dom_opt_dom_walker::after_dom_children (basic_block bb)
1202 {
1203   if (! m_dummy_cond)
1204     m_dummy_cond = gimple_build_cond (NE_EXPR, integer_zero_node,
1205 				      integer_zero_node, NULL, NULL);
1206 
1207   thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
1208 			 m_avail_exprs_stack,
1209 			 simplify_stmt_for_jump_threading);
1210 
1211   /* These remove expressions local to BB from the tables.  */
1212   m_avail_exprs_stack->pop_to_marker ();
1213   m_const_and_copies->pop_to_marker ();
1214 }
1215 
1216 /* Search for redundant computations in STMT.  If any are found, then
1217    replace them with the variable holding the result of the computation.
1218 
1219    If safe, record this expression into AVAIL_EXPRS_STACK and
1220    CONST_AND_COPIES.  */
1221 
1222 static void
1223 eliminate_redundant_computations (gimple_stmt_iterator* gsi,
1224 				  class const_and_copies *const_and_copies,
1225 				  class avail_exprs_stack *avail_exprs_stack)
1226 {
1227   tree expr_type;
1228   tree cached_lhs;
1229   tree def;
1230   bool insert = true;
1231   bool assigns_var_p = false;
1232 
1233   gimple *stmt = gsi_stmt (*gsi);
1234 
1235   if (gimple_code (stmt) == GIMPLE_PHI)
1236     def = gimple_phi_result (stmt);
1237   else
1238     def = gimple_get_lhs (stmt);
1239 
1240   /* Certain expressions on the RHS can be optimized away, but can not
1241      themselves be entered into the hash tables.  */
1242   if (! def
1243       || TREE_CODE (def) != SSA_NAME
1244       || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
1245       || gimple_vdef (stmt)
1246       /* Do not record equivalences for increments of ivs.  This would create
1247 	 overlapping live ranges for a very questionable gain.  */
1248       || simple_iv_increment_p (stmt))
1249     insert = false;
1250 
1251   /* Check if the expression has been computed before.  */
1252   cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, insert, true);
1253 
1254   opt_stats.num_exprs_considered++;
1255 
1256   /* Get the type of the expression we are trying to optimize.  */
1257   if (is_gimple_assign (stmt))
1258     {
1259       expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
1260       assigns_var_p = true;
1261     }
1262   else if (gimple_code (stmt) == GIMPLE_COND)
1263     expr_type = boolean_type_node;
1264   else if (is_gimple_call (stmt))
1265     {
1266       gcc_assert (gimple_call_lhs (stmt));
1267       expr_type = TREE_TYPE (gimple_call_lhs (stmt));
1268       assigns_var_p = true;
1269     }
1270   else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
1271     expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt));
1272   else if (gimple_code (stmt) == GIMPLE_PHI)
1273     /* We can't propagate into a phi, so the logic below doesn't apply.
1274        Instead record an equivalence between the cached LHS and the
1275        PHI result of this statement, provided they are in the same block.
1276        This should be sufficient to kill the redundant phi.  */
1277     {
1278       if (def && cached_lhs)
1279 	const_and_copies->record_const_or_copy (def, cached_lhs);
1280       return;
1281     }
1282   else
1283     gcc_unreachable ();
1284 
1285   if (!cached_lhs)
1286     return;
1287 
1288   /* It is safe to ignore types here since we have already done
1289      type checking in the hashing and equality routines.  In fact
1290      type checking here merely gets in the way of constant
1291      propagation.  Also, make sure that it is safe to propagate
1292      CACHED_LHS into the expression in STMT.  */
1293   if ((TREE_CODE (cached_lhs) != SSA_NAME
1294        && (assigns_var_p
1295            || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
1296       || may_propagate_copy_into_stmt (stmt, cached_lhs))
1297   {
1298       gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
1299 			   || is_gimple_min_invariant (cached_lhs));
1300 
1301       if (dump_file && (dump_flags & TDF_DETAILS))
1302 	{
1303 	  fprintf (dump_file, "  Replaced redundant expr '");
1304 	  print_gimple_expr (dump_file, stmt, 0, dump_flags);
1305 	  fprintf (dump_file, "' with '");
1306 	  print_generic_expr (dump_file, cached_lhs, dump_flags);
1307           fprintf (dump_file, "'\n");
1308 	}
1309 
1310       opt_stats.num_re++;
1311 
1312       if (assigns_var_p
1313 	  && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
1314 	cached_lhs = fold_convert (expr_type, cached_lhs);
1315 
1316       propagate_tree_value_into_stmt (gsi, cached_lhs);
1317 
1318       /* Since it is always necessary to mark the result as modified,
1319          perhaps we should move this into propagate_tree_value_into_stmt
1320          itself.  */
1321       gimple_set_modified (gsi_stmt (*gsi), true);
1322   }
1323 }
1324 
1325 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
1326    the available expressions table or the const_and_copies table.
1327    Detect and record those equivalences into AVAIL_EXPRS_STACK.
1328 
1329    We handle only very simple copy equivalences here.  The heavy
1330    lifing is done by eliminate_redundant_computations.  */
1331 
1332 static void
1333 record_equivalences_from_stmt (gimple *stmt, int may_optimize_p,
1334 			       class avail_exprs_stack *avail_exprs_stack)
1335 {
1336   tree lhs;
1337   enum tree_code lhs_code;
1338 
1339   gcc_assert (is_gimple_assign (stmt));
1340 
1341   lhs = gimple_assign_lhs (stmt);
1342   lhs_code = TREE_CODE (lhs);
1343 
1344   if (lhs_code == SSA_NAME
1345       && gimple_assign_single_p (stmt))
1346     {
1347       tree rhs = gimple_assign_rhs1 (stmt);
1348 
1349       /* If the RHS of the assignment is a constant or another variable that
1350 	 may be propagated, register it in the CONST_AND_COPIES table.  We
1351 	 do not need to record unwind data for this, since this is a true
1352 	 assignment and not an equivalence inferred from a comparison.  All
1353 	 uses of this ssa name are dominated by this assignment, so unwinding
1354 	 just costs time and space.  */
1355       if (may_optimize_p
1356 	  && (TREE_CODE (rhs) == SSA_NAME
1357 	      || is_gimple_min_invariant (rhs)))
1358 	{
1359 	  rhs = dom_valueize (rhs);
1360 
1361 	  if (dump_file && (dump_flags & TDF_DETAILS))
1362 	    {
1363 	      fprintf (dump_file, "==== ASGN ");
1364 	      print_generic_expr (dump_file, lhs, 0);
1365 	      fprintf (dump_file, " = ");
1366 	      print_generic_expr (dump_file, rhs, 0);
1367 	      fprintf (dump_file, "\n");
1368 	    }
1369 
1370 	  set_ssa_name_value (lhs, rhs);
1371 	}
1372     }
1373 
1374   /* Make sure we can propagate &x + CST.  */
1375   if (lhs_code == SSA_NAME
1376       && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR
1377       && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR
1378       && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST)
1379     {
1380       tree op0 = gimple_assign_rhs1 (stmt);
1381       tree op1 = gimple_assign_rhs2 (stmt);
1382       tree new_rhs
1383 	= build_fold_addr_expr (fold_build2 (MEM_REF,
1384 					     TREE_TYPE (TREE_TYPE (op0)),
1385 					     unshare_expr (op0),
1386 					     fold_convert (ptr_type_node,
1387 							   op1)));
1388       if (dump_file && (dump_flags & TDF_DETAILS))
1389 	{
1390 	  fprintf (dump_file, "==== ASGN ");
1391 	  print_generic_expr (dump_file, lhs, 0);
1392 	  fprintf (dump_file, " = ");
1393 	  print_generic_expr (dump_file, new_rhs, 0);
1394 	  fprintf (dump_file, "\n");
1395 	}
1396 
1397       set_ssa_name_value (lhs, new_rhs);
1398     }
1399 
1400   /* A memory store, even an aliased store, creates a useful
1401      equivalence.  By exchanging the LHS and RHS, creating suitable
1402      vops and recording the result in the available expression table,
1403      we may be able to expose more redundant loads.  */
1404   if (!gimple_has_volatile_ops (stmt)
1405       && gimple_references_memory_p (stmt)
1406       && gimple_assign_single_p (stmt)
1407       && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
1408 	  || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
1409       && !is_gimple_reg (lhs))
1410     {
1411       tree rhs = gimple_assign_rhs1 (stmt);
1412       gassign *new_stmt;
1413 
1414       /* Build a new statement with the RHS and LHS exchanged.  */
1415       if (TREE_CODE (rhs) == SSA_NAME)
1416         {
1417           /* NOTE tuples.  The call to gimple_build_assign below replaced
1418              a call to build_gimple_modify_stmt, which did not set the
1419              SSA_NAME_DEF_STMT on the LHS of the assignment.  Doing so
1420              may cause an SSA validation failure, as the LHS may be a
1421              default-initialized name and should have no definition.  I'm
1422              a bit dubious of this, as the artificial statement that we
1423              generate here may in fact be ill-formed, but it is simply
1424              used as an internal device in this pass, and never becomes
1425              part of the CFG.  */
1426 	  gimple *defstmt = SSA_NAME_DEF_STMT (rhs);
1427           new_stmt = gimple_build_assign (rhs, lhs);
1428           SSA_NAME_DEF_STMT (rhs) = defstmt;
1429         }
1430       else
1431         new_stmt = gimple_build_assign (rhs, lhs);
1432 
1433       gimple_set_vuse (new_stmt, gimple_vdef (stmt));
1434 
1435       /* Finally enter the statement into the available expression
1436 	 table.  */
1437       avail_exprs_stack->lookup_avail_expr (new_stmt, true, true);
1438     }
1439 }
1440 
1441 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
1442    CONST_AND_COPIES.  */
1443 
1444 static void
1445 cprop_operand (gimple *stmt, use_operand_p op_p)
1446 {
1447   tree val;
1448   tree op = USE_FROM_PTR (op_p);
1449 
1450   /* If the operand has a known constant value or it is known to be a
1451      copy of some other variable, use the value or copy stored in
1452      CONST_AND_COPIES.  */
1453   val = SSA_NAME_VALUE (op);
1454   if (val && val != op)
1455     {
1456       /* Do not replace hard register operands in asm statements.  */
1457       if (gimple_code (stmt) == GIMPLE_ASM
1458 	  && !may_propagate_copy_into_asm (op))
1459 	return;
1460 
1461       /* Certain operands are not allowed to be copy propagated due
1462 	 to their interaction with exception handling and some GCC
1463 	 extensions.  */
1464       if (!may_propagate_copy (op, val))
1465 	return;
1466 
1467       /* Do not propagate copies into BIVs.
1468          See PR23821 and PR62217 for how this can disturb IV and
1469 	 number of iteration analysis.  */
1470       if (TREE_CODE (val) != INTEGER_CST)
1471 	{
1472 	  gimple *def = SSA_NAME_DEF_STMT (op);
1473 	  if (gimple_code (def) == GIMPLE_PHI
1474 	      && gimple_bb (def)->loop_father->header == gimple_bb (def))
1475 	    return;
1476 	}
1477 
1478       /* Dump details.  */
1479       if (dump_file && (dump_flags & TDF_DETAILS))
1480 	{
1481 	  fprintf (dump_file, "  Replaced '");
1482 	  print_generic_expr (dump_file, op, dump_flags);
1483 	  fprintf (dump_file, "' with %s '",
1484 		   (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
1485 	  print_generic_expr (dump_file, val, dump_flags);
1486 	  fprintf (dump_file, "'\n");
1487 	}
1488 
1489       if (TREE_CODE (val) != SSA_NAME)
1490 	opt_stats.num_const_prop++;
1491       else
1492 	opt_stats.num_copy_prop++;
1493 
1494       propagate_value (op_p, val);
1495 
1496       /* And note that we modified this statement.  This is now
1497 	 safe, even if we changed virtual operands since we will
1498 	 rescan the statement and rewrite its operands again.  */
1499       gimple_set_modified (stmt, true);
1500     }
1501 }
1502 
1503 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1504    known value for that SSA_NAME (or NULL if no value is known).
1505 
1506    Propagate values from CONST_AND_COPIES into the uses, vuses and
1507    vdef_ops of STMT.  */
1508 
1509 static void
1510 cprop_into_stmt (gimple *stmt)
1511 {
1512   use_operand_p op_p;
1513   ssa_op_iter iter;
1514   tree last_copy_propagated_op = NULL;
1515 
1516   FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
1517     {
1518       tree old_op = USE_FROM_PTR (op_p);
1519 
1520       /* If we have A = B and B = A in the copy propagation tables
1521 	 (due to an equality comparison), avoid substituting B for A
1522 	 then A for B in the trivially discovered cases.   This allows
1523 	 optimization of statements were A and B appear as input
1524 	 operands.  */
1525       if (old_op != last_copy_propagated_op)
1526 	{
1527 	  cprop_operand (stmt, op_p);
1528 
1529 	  tree new_op = USE_FROM_PTR (op_p);
1530 	  if (new_op != old_op && TREE_CODE (new_op) == SSA_NAME)
1531 	    last_copy_propagated_op = new_op;
1532 	}
1533     }
1534 }
1535 
1536 /* Optimize the statement in block BB pointed to by iterator SI
1537    using equivalences from CONST_AND_COPIES and AVAIL_EXPRS_STACK.
1538 
1539    We try to perform some simplistic global redundancy elimination and
1540    constant propagation:
1541 
1542    1- To detect global redundancy, we keep track of expressions that have
1543       been computed in this block and its dominators.  If we find that the
1544       same expression is computed more than once, we eliminate repeated
1545       computations by using the target of the first one.
1546 
1547    2- Constant values and copy assignments.  This is used to do very
1548       simplistic constant and copy propagation.  When a constant or copy
1549       assignment is found, we map the value on the RHS of the assignment to
1550       the variable in the LHS in the CONST_AND_COPIES table.  */
1551 
1552 static edge
1553 optimize_stmt (basic_block bb, gimple_stmt_iterator si,
1554 	       class const_and_copies *const_and_copies,
1555 	       class avail_exprs_stack *avail_exprs_stack)
1556 {
1557   gimple *stmt, *old_stmt;
1558   bool may_optimize_p;
1559   bool modified_p = false;
1560   bool was_noreturn;
1561   edge retval = NULL;
1562 
1563   old_stmt = stmt = gsi_stmt (si);
1564   was_noreturn = is_gimple_call (stmt) && gimple_call_noreturn_p (stmt);
1565 
1566   if (dump_file && (dump_flags & TDF_DETAILS))
1567     {
1568       fprintf (dump_file, "Optimizing statement ");
1569       print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1570     }
1571 
1572   update_stmt_if_modified (stmt);
1573   opt_stats.num_stmts++;
1574 
1575   /* Const/copy propagate into USES, VUSES and the RHS of VDEFs.  */
1576   cprop_into_stmt (stmt);
1577 
1578   /* If the statement has been modified with constant replacements,
1579      fold its RHS before checking for redundant computations.  */
1580   if (gimple_modified_p (stmt))
1581     {
1582       tree rhs = NULL;
1583 
1584       /* Try to fold the statement making sure that STMT is kept
1585 	 up to date.  */
1586       if (fold_stmt (&si))
1587 	{
1588 	  stmt = gsi_stmt (si);
1589 	  gimple_set_modified (stmt, true);
1590 
1591 	  if (dump_file && (dump_flags & TDF_DETAILS))
1592 	    {
1593 	      fprintf (dump_file, "  Folded to: ");
1594 	      print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1595 	    }
1596 	}
1597 
1598       /* We only need to consider cases that can yield a gimple operand.  */
1599       if (gimple_assign_single_p (stmt))
1600         rhs = gimple_assign_rhs1 (stmt);
1601       else if (gimple_code (stmt) == GIMPLE_GOTO)
1602         rhs = gimple_goto_dest (stmt);
1603       else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
1604         /* This should never be an ADDR_EXPR.  */
1605         rhs = gimple_switch_index (swtch_stmt);
1606 
1607       if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
1608         recompute_tree_invariant_for_addr_expr (rhs);
1609 
1610       /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
1611 	 even if fold_stmt updated the stmt already and thus cleared
1612 	 gimple_modified_p flag on it.  */
1613       modified_p = true;
1614     }
1615 
1616   /* Check for redundant computations.  Do this optimization only
1617      for assignments that have no volatile ops and conditionals.  */
1618   may_optimize_p = (!gimple_has_side_effects (stmt)
1619                     && (is_gimple_assign (stmt)
1620                         || (is_gimple_call (stmt)
1621                             && gimple_call_lhs (stmt) != NULL_TREE)
1622                         || gimple_code (stmt) == GIMPLE_COND
1623                         || gimple_code (stmt) == GIMPLE_SWITCH));
1624 
1625   if (may_optimize_p)
1626     {
1627       if (gimple_code (stmt) == GIMPLE_CALL)
1628 	{
1629 	  /* Resolve __builtin_constant_p.  If it hasn't been
1630 	     folded to integer_one_node by now, it's fairly
1631 	     certain that the value simply isn't constant.  */
1632 	  tree callee = gimple_call_fndecl (stmt);
1633 	  if (callee
1634 	      && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
1635 	      && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
1636 	    {
1637 	      propagate_tree_value_into_stmt (&si, integer_zero_node);
1638 	      stmt = gsi_stmt (si);
1639 	    }
1640 	}
1641 
1642       if (gimple_code (stmt) == GIMPLE_COND)
1643 	{
1644 	  tree lhs = gimple_cond_lhs (stmt);
1645 	  tree rhs = gimple_cond_rhs (stmt);
1646 
1647 	  /* If the LHS has a range [0..1] and the RHS has a range ~[0..1],
1648 	     then this conditional is computable at compile time.  We can just
1649 	     shove either 0 or 1 into the LHS, mark the statement as modified
1650 	     and all the right things will just happen below.
1651 
1652 	     Note this would apply to any case where LHS has a range
1653 	     narrower than its type implies and RHS is outside that
1654 	     narrower range.  Future work.  */
1655 	  if (TREE_CODE (lhs) == SSA_NAME
1656 	      && ssa_name_has_boolean_range (lhs)
1657 	      && TREE_CODE (rhs) == INTEGER_CST
1658 	      && ! (integer_zerop (rhs) || integer_onep (rhs)))
1659 	    {
1660 	      gimple_cond_set_lhs (as_a <gcond *> (stmt),
1661 				   fold_convert (TREE_TYPE (lhs),
1662 						 integer_zero_node));
1663 	      gimple_set_modified (stmt, true);
1664 	    }
1665 	}
1666 
1667       update_stmt_if_modified (stmt);
1668       eliminate_redundant_computations (&si, const_and_copies,
1669 					avail_exprs_stack);
1670       stmt = gsi_stmt (si);
1671 
1672       /* Perform simple redundant store elimination.  */
1673       if (gimple_assign_single_p (stmt)
1674 	  && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1675 	{
1676 	  tree lhs = gimple_assign_lhs (stmt);
1677 	  tree rhs = gimple_assign_rhs1 (stmt);
1678 	  tree cached_lhs;
1679 	  gassign *new_stmt;
1680 	  rhs = dom_valueize (rhs);
1681 	  /* Build a new statement with the RHS and LHS exchanged.  */
1682 	  if (TREE_CODE (rhs) == SSA_NAME)
1683 	    {
1684 	      gimple *defstmt = SSA_NAME_DEF_STMT (rhs);
1685 	      new_stmt = gimple_build_assign (rhs, lhs);
1686 	      SSA_NAME_DEF_STMT (rhs) = defstmt;
1687 	    }
1688 	  else
1689 	    new_stmt = gimple_build_assign (rhs, lhs);
1690 	  gimple_set_vuse (new_stmt, gimple_vuse (stmt));
1691 	  cached_lhs = avail_exprs_stack->lookup_avail_expr (new_stmt, false,
1692 							     false);
1693 	  if (cached_lhs
1694 	      && rhs == cached_lhs)
1695 	    {
1696 	      basic_block bb = gimple_bb (stmt);
1697 	      unlink_stmt_vdef (stmt);
1698 	      if (gsi_remove (&si, true))
1699 		{
1700 		  bitmap_set_bit (need_eh_cleanup, bb->index);
1701 		  if (dump_file && (dump_flags & TDF_DETAILS))
1702 		    fprintf (dump_file, "  Flagged to clear EH edges.\n");
1703 		}
1704 	      release_defs (stmt);
1705 	      return retval;
1706 	    }
1707 	}
1708     }
1709 
1710   /* Record any additional equivalences created by this statement.  */
1711   if (is_gimple_assign (stmt))
1712     record_equivalences_from_stmt (stmt, may_optimize_p, avail_exprs_stack);
1713 
1714   /* If STMT is a COND_EXPR or SWITCH_EXPR and it was modified, then we may
1715      know where it goes.  */
1716   if (gimple_modified_p (stmt) || modified_p)
1717     {
1718       tree val = NULL;
1719 
1720       if (gimple_code (stmt) == GIMPLE_COND)
1721         val = fold_binary_loc (gimple_location (stmt),
1722 			       gimple_cond_code (stmt), boolean_type_node,
1723 			       gimple_cond_lhs (stmt),
1724 			       gimple_cond_rhs (stmt));
1725       else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
1726 	val = gimple_switch_index (swtch_stmt);
1727 
1728       if (val && TREE_CODE (val) == INTEGER_CST)
1729 	{
1730 	  retval = find_taken_edge (bb, val);
1731 	  if (retval)
1732 	    {
1733 	      /* Fix the condition to be either true or false.  */
1734 	      if (gimple_code (stmt) == GIMPLE_COND)
1735 		{
1736 		  if (integer_zerop (val))
1737 		    gimple_cond_make_false (as_a <gcond *> (stmt));
1738 		  else if (integer_onep (val))
1739 		    gimple_cond_make_true (as_a <gcond *> (stmt));
1740 		  else
1741 		    gcc_unreachable ();
1742 
1743 		  gimple_set_modified (stmt, true);
1744 		}
1745 
1746 	      /* Further simplifications may be possible.  */
1747 	      cfg_altered = true;
1748 	    }
1749 	}
1750 
1751       update_stmt_if_modified (stmt);
1752 
1753       /* If we simplified a statement in such a way as to be shown that it
1754 	 cannot trap, update the eh information and the cfg to match.  */
1755       if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
1756 	{
1757 	  bitmap_set_bit (need_eh_cleanup, bb->index);
1758 	  if (dump_file && (dump_flags & TDF_DETAILS))
1759 	    fprintf (dump_file, "  Flagged to clear EH edges.\n");
1760 	}
1761 
1762       if (!was_noreturn
1763 	  && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt))
1764 	need_noreturn_fixup.safe_push (stmt);
1765     }
1766   return retval;
1767 }
1768