xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-ssa-phiopt.c (revision e6c7e151de239c49d2e38720a061ed9d1fa99309)
1 /* Optimization of PHI nodes by converting them into straightline code.
2    Copyright (C) 2004-2017 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "insn-codes.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "tree-pass.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "gimple-pretty-print.h"
34 #include "fold-const.h"
35 #include "stor-layout.h"
36 #include "cfganal.h"
37 #include "gimplify.h"
38 #include "gimple-iterator.h"
39 #include "gimplify-me.h"
40 #include "tree-cfg.h"
41 #include "tree-dfa.h"
42 #include "domwalk.h"
43 #include "cfgloop.h"
44 #include "tree-data-ref.h"
45 #include "tree-scalar-evolution.h"
46 #include "tree-inline.h"
47 #include "params.h"
48 
49 static unsigned int tree_ssa_phiopt_worker (bool, bool);
50 static bool conditional_replacement (basic_block, basic_block,
51 				     edge, edge, gphi *, tree, tree);
52 static gphi *factor_out_conditional_conversion (edge, edge, gphi *, tree, tree,
53 						gimple *);
54 static int value_replacement (basic_block, basic_block,
55 			      edge, edge, gimple *, tree, tree);
56 static bool minmax_replacement (basic_block, basic_block,
57 				edge, edge, gimple *, tree, tree);
58 static bool abs_replacement (basic_block, basic_block,
59 			     edge, edge, gimple *, tree, tree);
60 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
61 				    hash_set<tree> *);
62 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
63 static hash_set<tree> * get_non_trapping ();
64 static void replace_phi_edge_with_variable (basic_block, edge, gimple *, tree);
65 static void hoist_adjacent_loads (basic_block, basic_block,
66 				  basic_block, basic_block);
67 static bool gate_hoist_loads (void);
68 
69 /* This pass tries to transform conditional stores into unconditional
70    ones, enabling further simplifications with the simpler then and else
71    blocks.  In particular it replaces this:
72 
73      bb0:
74        if (cond) goto bb2; else goto bb1;
75      bb1:
76        *p = RHS;
77      bb2:
78 
79    with
80 
81      bb0:
82        if (cond) goto bb1; else goto bb2;
83      bb1:
84        condtmp' = *p;
85      bb2:
86        condtmp = PHI <RHS, condtmp'>
87        *p = condtmp;
88 
89    This transformation can only be done under several constraints,
90    documented below.  It also replaces:
91 
92      bb0:
93        if (cond) goto bb2; else goto bb1;
94      bb1:
95        *p = RHS1;
96        goto bb3;
97      bb2:
98        *p = RHS2;
99      bb3:
100 
101    with
102 
103      bb0:
104        if (cond) goto bb3; else goto bb1;
105      bb1:
106      bb3:
107        condtmp = PHI <RHS1, RHS2>
108        *p = condtmp;  */
109 
110 static unsigned int
111 tree_ssa_cs_elim (void)
112 {
113   unsigned todo;
114   /* ???  We are not interested in loop related info, but the following
115      will create it, ICEing as we didn't init loops with pre-headers.
116      An interfacing issue of find_data_references_in_bb.  */
117   loop_optimizer_init (LOOPS_NORMAL);
118   scev_initialize ();
119   todo = tree_ssa_phiopt_worker (true, false);
120   scev_finalize ();
121   loop_optimizer_finalize ();
122   return todo;
123 }
124 
125 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
126 
127 static gphi *
128 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
129 {
130   gimple_stmt_iterator i;
131   gphi *phi = NULL;
132   if (gimple_seq_singleton_p (seq))
133     return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
134   for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
135     {
136       gphi *p = as_a <gphi *> (gsi_stmt (i));
137       /* If the PHI arguments are equal then we can skip this PHI. */
138       if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
139 				       gimple_phi_arg_def (p, e1->dest_idx)))
140 	continue;
141 
142       /* If we already have a PHI that has the two edge arguments are
143 	 different, then return it is not a singleton for these PHIs. */
144       if (phi)
145 	return NULL;
146 
147       phi = p;
148     }
149   return phi;
150 }
151 
152 /* The core routine of conditional store replacement and normal
153    phi optimizations.  Both share much of the infrastructure in how
154    to match applicable basic block patterns.  DO_STORE_ELIM is true
155    when we want to do conditional store replacement, false otherwise.
156    DO_HOIST_LOADS is true when we want to hoist adjacent loads out
157    of diamond control flow patterns, false otherwise.  */
158 static unsigned int
159 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads)
160 {
161   basic_block bb;
162   basic_block *bb_order;
163   unsigned n, i;
164   bool cfgchanged = false;
165   hash_set<tree> *nontrap = 0;
166 
167   if (do_store_elim)
168     /* Calculate the set of non-trapping memory accesses.  */
169     nontrap = get_non_trapping ();
170 
171   /* Search every basic block for COND_EXPR we may be able to optimize.
172 
173      We walk the blocks in order that guarantees that a block with
174      a single predecessor is processed before the predecessor.
175      This ensures that we collapse inner ifs before visiting the
176      outer ones, and also that we do not try to visit a removed
177      block.  */
178   bb_order = single_pred_before_succ_order ();
179   n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
180 
181   for (i = 0; i < n; i++)
182     {
183       gimple *cond_stmt;
184       gphi *phi;
185       basic_block bb1, bb2;
186       edge e1, e2;
187       tree arg0, arg1;
188 
189       bb = bb_order[i];
190 
191       cond_stmt = last_stmt (bb);
192       /* Check to see if the last statement is a GIMPLE_COND.  */
193       if (!cond_stmt
194           || gimple_code (cond_stmt) != GIMPLE_COND)
195         continue;
196 
197       e1 = EDGE_SUCC (bb, 0);
198       bb1 = e1->dest;
199       e2 = EDGE_SUCC (bb, 1);
200       bb2 = e2->dest;
201 
202       /* We cannot do the optimization on abnormal edges.  */
203       if ((e1->flags & EDGE_ABNORMAL) != 0
204           || (e2->flags & EDGE_ABNORMAL) != 0)
205        continue;
206 
207       /* If either bb1's succ or bb2 or bb2's succ is non NULL.  */
208       if (EDGE_COUNT (bb1->succs) == 0
209           || bb2 == NULL
210 	  || EDGE_COUNT (bb2->succs) == 0)
211         continue;
212 
213       /* Find the bb which is the fall through to the other.  */
214       if (EDGE_SUCC (bb1, 0)->dest == bb2)
215         ;
216       else if (EDGE_SUCC (bb2, 0)->dest == bb1)
217         {
218 	  std::swap (bb1, bb2);
219 	  std::swap (e1, e2);
220 	}
221       else if (do_store_elim
222 	       && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
223 	{
224 	  basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
225 
226 	  if (!single_succ_p (bb1)
227 	      || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
228 	      || !single_succ_p (bb2)
229 	      || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
230 	      || EDGE_COUNT (bb3->preds) != 2)
231 	    continue;
232 	  if (cond_if_else_store_replacement (bb1, bb2, bb3))
233 	    cfgchanged = true;
234 	  continue;
235 	}
236       else if (do_hoist_loads
237 	       && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
238 	{
239 	  basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
240 
241 	  if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
242 	      && single_succ_p (bb1)
243 	      && single_succ_p (bb2)
244 	      && single_pred_p (bb1)
245 	      && single_pred_p (bb2)
246 	      && EDGE_COUNT (bb->succs) == 2
247 	      && EDGE_COUNT (bb3->preds) == 2
248 	      /* If one edge or the other is dominant, a conditional move
249 		 is likely to perform worse than the well-predicted branch.  */
250 	      && !predictable_edge_p (EDGE_SUCC (bb, 0))
251 	      && !predictable_edge_p (EDGE_SUCC (bb, 1)))
252 	    hoist_adjacent_loads (bb, bb1, bb2, bb3);
253 	  continue;
254 	}
255       else
256 	continue;
257 
258       e1 = EDGE_SUCC (bb1, 0);
259 
260       /* Make sure that bb1 is just a fall through.  */
261       if (!single_succ_p (bb1)
262 	  || (e1->flags & EDGE_FALLTHRU) == 0)
263         continue;
264 
265       /* Also make sure that bb1 only have one predecessor and that it
266 	 is bb.  */
267       if (!single_pred_p (bb1)
268           || single_pred (bb1) != bb)
269 	continue;
270 
271       if (do_store_elim)
272 	{
273 	  /* bb1 is the middle block, bb2 the join block, bb the split block,
274 	     e1 the fallthrough edge from bb1 to bb2.  We can't do the
275 	     optimization if the join block has more than two predecessors.  */
276 	  if (EDGE_COUNT (bb2->preds) > 2)
277 	    continue;
278 	  if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
279 	    cfgchanged = true;
280 	}
281       else
282 	{
283 	  gimple_seq phis = phi_nodes (bb2);
284 	  gimple_stmt_iterator gsi;
285 	  bool candorest = true;
286 
287 	  /* Value replacement can work with more than one PHI
288 	     so try that first. */
289 	  for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
290 	    {
291 	      phi = as_a <gphi *> (gsi_stmt (gsi));
292 	      arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
293 	      arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
294 	      if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
295 		{
296 		  candorest = false;
297 	          cfgchanged = true;
298 		  break;
299 		}
300 	    }
301 
302 	  if (!candorest)
303 	    continue;
304 
305 	  phi = single_non_singleton_phi_for_edges (phis, e1, e2);
306 	  if (!phi)
307 	    continue;
308 
309 	  arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
310 	  arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
311 
312 	  /* Something is wrong if we cannot find the arguments in the PHI
313 	     node.  */
314 	  gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
315 
316 	  gphi *newphi = factor_out_conditional_conversion (e1, e2, phi,
317 							    arg0, arg1,
318 							    cond_stmt);
319 	  if (newphi != NULL)
320 	    {
321 	      phi = newphi;
322 	      /* factor_out_conditional_conversion may create a new PHI in
323 		 BB2 and eliminate an existing PHI in BB2.  Recompute values
324 		 that may be affected by that change.  */
325 	      arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
326 	      arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
327 	      gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
328 	    }
329 
330 	  /* Do the replacement of conditional if it can be done.  */
331 	  if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
332 	    cfgchanged = true;
333 	  else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
334 	    cfgchanged = true;
335 	  else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
336 	    cfgchanged = true;
337 	}
338     }
339 
340   free (bb_order);
341 
342   if (do_store_elim)
343     delete nontrap;
344   /* If the CFG has changed, we should cleanup the CFG.  */
345   if (cfgchanged && do_store_elim)
346     {
347       /* In cond-store replacement we have added some loads on edges
348          and new VOPS (as we moved the store, and created a load).  */
349       gsi_commit_edge_inserts ();
350       return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
351     }
352   else if (cfgchanged)
353     return TODO_cleanup_cfg;
354   return 0;
355 }
356 
357 /* Replace PHI node element whose edge is E in block BB with variable NEW.
358    Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
359    is known to have two edges, one of which must reach BB).  */
360 
361 static void
362 replace_phi_edge_with_variable (basic_block cond_block,
363 				edge e, gimple *phi, tree new_tree)
364 {
365   basic_block bb = gimple_bb (phi);
366   basic_block block_to_remove;
367   gimple_stmt_iterator gsi;
368 
369   /* Change the PHI argument to new.  */
370   SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
371 
372   /* Remove the empty basic block.  */
373   if (EDGE_SUCC (cond_block, 0)->dest == bb)
374     {
375       EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
376       EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
377       EDGE_SUCC (cond_block, 0)->probability = REG_BR_PROB_BASE;
378       EDGE_SUCC (cond_block, 0)->count += EDGE_SUCC (cond_block, 1)->count;
379 
380       block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
381     }
382   else
383     {
384       EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
385       EDGE_SUCC (cond_block, 1)->flags
386 	&= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
387       EDGE_SUCC (cond_block, 1)->probability = REG_BR_PROB_BASE;
388       EDGE_SUCC (cond_block, 1)->count += EDGE_SUCC (cond_block, 0)->count;
389 
390       block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
391     }
392   delete_basic_block (block_to_remove);
393 
394   /* Eliminate the COND_EXPR at the end of COND_BLOCK.  */
395   gsi = gsi_last_bb (cond_block);
396   gsi_remove (&gsi, true);
397 
398   if (dump_file && (dump_flags & TDF_DETAILS))
399     fprintf (dump_file,
400 	      "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
401 	      cond_block->index,
402 	      bb->index);
403 }
404 
405 /* PR66726: Factor conversion out of COND_EXPR.  If the arguments of the PHI
406    stmt are CONVERT_STMT, factor out the conversion and perform the conversion
407    to the result of PHI stmt.  COND_STMT is the controlling predicate.
408    Return the newly-created PHI, if any.  */
409 
410 static gphi *
411 factor_out_conditional_conversion (edge e0, edge e1, gphi *phi,
412 				   tree arg0, tree arg1, gimple *cond_stmt)
413 {
414   gimple *arg0_def_stmt = NULL, *arg1_def_stmt = NULL, *new_stmt;
415   tree new_arg0 = NULL_TREE, new_arg1 = NULL_TREE;
416   tree temp, result;
417   gphi *newphi;
418   gimple_stmt_iterator gsi, gsi_for_def;
419   source_location locus = gimple_location (phi);
420   enum tree_code convert_code;
421 
422   /* Handle only PHI statements with two arguments.  TODO: If all
423      other arguments to PHI are INTEGER_CST or if their defining
424      statement have the same unary operation, we can handle more
425      than two arguments too.  */
426   if (gimple_phi_num_args (phi) != 2)
427     return NULL;
428 
429   /* First canonicalize to simplify tests.  */
430   if (TREE_CODE (arg0) != SSA_NAME)
431     {
432       std::swap (arg0, arg1);
433       std::swap (e0, e1);
434     }
435 
436   if (TREE_CODE (arg0) != SSA_NAME
437       || (TREE_CODE (arg1) != SSA_NAME
438 	  && TREE_CODE (arg1) != INTEGER_CST))
439     return NULL;
440 
441   /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
442      a conversion.  */
443   arg0_def_stmt = SSA_NAME_DEF_STMT (arg0);
444   if (!gimple_assign_cast_p (arg0_def_stmt))
445     return NULL;
446 
447   /* Use the RHS as new_arg0.  */
448   convert_code = gimple_assign_rhs_code (arg0_def_stmt);
449   new_arg0 = gimple_assign_rhs1 (arg0_def_stmt);
450   if (convert_code == VIEW_CONVERT_EXPR)
451     {
452       new_arg0 = TREE_OPERAND (new_arg0, 0);
453       if (!is_gimple_reg_type (TREE_TYPE (new_arg0)))
454 	return NULL;
455     }
456 
457   if (TREE_CODE (arg1) == SSA_NAME)
458     {
459       /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
460 	 is a conversion.  */
461       arg1_def_stmt = SSA_NAME_DEF_STMT (arg1);
462       if (!is_gimple_assign (arg1_def_stmt)
463 	  || gimple_assign_rhs_code (arg1_def_stmt) != convert_code)
464 	return NULL;
465 
466       /* Use the RHS as new_arg1.  */
467       new_arg1 = gimple_assign_rhs1 (arg1_def_stmt);
468       if (convert_code == VIEW_CONVERT_EXPR)
469 	new_arg1 = TREE_OPERAND (new_arg1, 0);
470     }
471   else
472     {
473       /* If arg1 is an INTEGER_CST, fold it to new type.  */
474       if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0))
475 	  && int_fits_type_p (arg1, TREE_TYPE (new_arg0)))
476 	{
477 	  if (gimple_assign_cast_p (arg0_def_stmt))
478 	    {
479 	      /* For the INTEGER_CST case, we are just moving the
480 		 conversion from one place to another, which can often
481 		 hurt as the conversion moves further away from the
482 		 statement that computes the value.  So, perform this
483 		 only if new_arg0 is an operand of COND_STMT, or
484 		 if arg0_def_stmt is the only non-debug stmt in
485 		 its basic block, because then it is possible this
486 		 could enable further optimizations (minmax replacement
487 		 etc.).  See PR71016.  */
488 	      if (new_arg0 != gimple_cond_lhs (cond_stmt)
489 		  && new_arg0 != gimple_cond_rhs (cond_stmt)
490 		  && gimple_bb (arg0_def_stmt) == e0->src)
491 		{
492 		  gsi = gsi_for_stmt (arg0_def_stmt);
493 		  gsi_prev_nondebug (&gsi);
494 		  if (!gsi_end_p (gsi))
495 		    return NULL;
496 		  gsi = gsi_for_stmt (arg0_def_stmt);
497 		  gsi_next_nondebug (&gsi);
498 		  if (!gsi_end_p (gsi))
499 		    return NULL;
500 		}
501 	      new_arg1 = fold_convert (TREE_TYPE (new_arg0), arg1);
502 	    }
503 	  else
504 	    return NULL;
505 	}
506       else
507 	return NULL;
508     }
509 
510   /*  If arg0/arg1 have > 1 use, then this transformation actually increases
511       the number of expressions evaluated at runtime.  */
512   if (!has_single_use (arg0)
513       || (arg1_def_stmt && !has_single_use (arg1)))
514     return NULL;
515 
516   /* If types of new_arg0 and new_arg1 are different bailout.  */
517   if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
518     return NULL;
519 
520   /* Create a new PHI stmt.  */
521   result = PHI_RESULT (phi);
522   temp = make_ssa_name (TREE_TYPE (new_arg0), NULL);
523   newphi = create_phi_node (temp, gimple_bb (phi));
524 
525   if (dump_file && (dump_flags & TDF_DETAILS))
526     {
527       fprintf (dump_file, "PHI ");
528       print_generic_expr (dump_file, gimple_phi_result (phi), 0);
529       fprintf (dump_file,
530 	       " changed to factor conversion out from COND_EXPR.\n");
531       fprintf (dump_file, "New stmt with CAST that defines ");
532       print_generic_expr (dump_file, result, 0);
533       fprintf (dump_file, ".\n");
534     }
535 
536   /* Remove the old cast(s) that has single use.  */
537   gsi_for_def = gsi_for_stmt (arg0_def_stmt);
538   gsi_remove (&gsi_for_def, true);
539   release_defs (arg0_def_stmt);
540 
541   if (arg1_def_stmt)
542     {
543       gsi_for_def = gsi_for_stmt (arg1_def_stmt);
544       gsi_remove (&gsi_for_def, true);
545       release_defs (arg1_def_stmt);
546     }
547 
548   add_phi_arg (newphi, new_arg0, e0, locus);
549   add_phi_arg (newphi, new_arg1, e1, locus);
550 
551   /* Create the conversion stmt and insert it.  */
552   if (convert_code == VIEW_CONVERT_EXPR)
553     {
554       temp = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (result), temp);
555       new_stmt = gimple_build_assign (result, temp);
556     }
557   else
558     new_stmt = gimple_build_assign (result, convert_code, temp);
559   gsi = gsi_after_labels (gimple_bb (phi));
560   gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
561 
562   /* Remove the original PHI stmt.  */
563   gsi = gsi_for_stmt (phi);
564   gsi_remove (&gsi, true);
565   return newphi;
566 }
567 
568 /*  The function conditional_replacement does the main work of doing the
569     conditional replacement.  Return true if the replacement is done.
570     Otherwise return false.
571     BB is the basic block where the replacement is going to be done on.  ARG0
572     is argument 0 from PHI.  Likewise for ARG1.  */
573 
574 static bool
575 conditional_replacement (basic_block cond_bb, basic_block middle_bb,
576 			 edge e0, edge e1, gphi *phi,
577 			 tree arg0, tree arg1)
578 {
579   tree result;
580   gimple *stmt;
581   gassign *new_stmt;
582   tree cond;
583   gimple_stmt_iterator gsi;
584   edge true_edge, false_edge;
585   tree new_var, new_var2;
586   bool neg;
587 
588   /* FIXME: Gimplification of complex type is too hard for now.  */
589   /* We aren't prepared to handle vectors either (and it is a question
590      if it would be worthwhile anyway).  */
591   if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
592 	|| POINTER_TYPE_P (TREE_TYPE (arg0)))
593       || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
594 	   || POINTER_TYPE_P (TREE_TYPE (arg1))))
595     return false;
596 
597   /* The PHI arguments have the constants 0 and 1, or 0 and -1, then
598      convert it to the conditional.  */
599   if ((integer_zerop (arg0) && integer_onep (arg1))
600       || (integer_zerop (arg1) && integer_onep (arg0)))
601     neg = false;
602   else if ((integer_zerop (arg0) && integer_all_onesp (arg1))
603 	   || (integer_zerop (arg1) && integer_all_onesp (arg0)))
604     neg = true;
605   else
606     return false;
607 
608   if (!empty_block_p (middle_bb))
609     return false;
610 
611   /* At this point we know we have a GIMPLE_COND with two successors.
612      One successor is BB, the other successor is an empty block which
613      falls through into BB.
614 
615      There is a single PHI node at the join point (BB) and its arguments
616      are constants (0, 1) or (0, -1).
617 
618      So, given the condition COND, and the two PHI arguments, we can
619      rewrite this PHI into non-branching code:
620 
621        dest = (COND) or dest = COND'
622 
623      We use the condition as-is if the argument associated with the
624      true edge has the value one or the argument associated with the
625      false edge as the value zero.  Note that those conditions are not
626      the same since only one of the outgoing edges from the GIMPLE_COND
627      will directly reach BB and thus be associated with an argument.  */
628 
629   stmt = last_stmt (cond_bb);
630   result = PHI_RESULT (phi);
631 
632   /* To handle special cases like floating point comparison, it is easier and
633      less error-prone to build a tree and gimplify it on the fly though it is
634      less efficient.  */
635   cond = fold_build2_loc (gimple_location (stmt),
636 			  gimple_cond_code (stmt), boolean_type_node,
637 			  gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
638 
639   /* We need to know which is the true edge and which is the false
640      edge so that we know when to invert the condition below.  */
641   extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
642   if ((e0 == true_edge && integer_zerop (arg0))
643       || (e0 == false_edge && !integer_zerop (arg0))
644       || (e1 == true_edge && integer_zerop (arg1))
645       || (e1 == false_edge && !integer_zerop (arg1)))
646     cond = fold_build1_loc (gimple_location (stmt),
647                             TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
648 
649   if (neg)
650     {
651       cond = fold_convert_loc (gimple_location (stmt),
652                                TREE_TYPE (result), cond);
653       cond = fold_build1_loc (gimple_location (stmt),
654                               NEGATE_EXPR, TREE_TYPE (cond), cond);
655     }
656 
657   /* Insert our new statements at the end of conditional block before the
658      COND_STMT.  */
659   gsi = gsi_for_stmt (stmt);
660   new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
661 				      GSI_SAME_STMT);
662 
663   if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
664     {
665       source_location locus_0, locus_1;
666 
667       new_var2 = make_ssa_name (TREE_TYPE (result));
668       new_stmt = gimple_build_assign (new_var2, CONVERT_EXPR, new_var);
669       gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
670       new_var = new_var2;
671 
672       /* Set the locus to the first argument, unless is doesn't have one.  */
673       locus_0 = gimple_phi_arg_location (phi, 0);
674       locus_1 = gimple_phi_arg_location (phi, 1);
675       if (locus_0 == UNKNOWN_LOCATION)
676         locus_0 = locus_1;
677       gimple_set_location (new_stmt, locus_0);
678     }
679 
680   replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
681   reset_flow_sensitive_info_in_bb (cond_bb);
682 
683   /* Note that we optimized this PHI.  */
684   return true;
685 }
686 
687 /* Update *ARG which is defined in STMT so that it contains the
688    computed value if that seems profitable.  Return true if the
689    statement is made dead by that rewriting.  */
690 
691 static bool
692 jump_function_from_stmt (tree *arg, gimple *stmt)
693 {
694   enum tree_code code = gimple_assign_rhs_code (stmt);
695   if (code == ADDR_EXPR)
696     {
697       /* For arg = &p->i transform it to p, if possible.  */
698       tree rhs1 = gimple_assign_rhs1 (stmt);
699       HOST_WIDE_INT offset;
700       tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
701 						&offset);
702       if (tem
703 	  && TREE_CODE (tem) == MEM_REF
704 	  && (mem_ref_offset (tem) + offset) == 0)
705 	{
706 	  *arg = TREE_OPERAND (tem, 0);
707 	  return true;
708 	}
709     }
710   /* TODO: Much like IPA-CP jump-functions we want to handle constant
711      additions symbolically here, and we'd need to update the comparison
712      code that compares the arg + cst tuples in our caller.  For now the
713      code above exactly handles the VEC_BASE pattern from vec.h.  */
714   return false;
715 }
716 
717 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
718    of the form SSA_NAME NE 0.
719 
720    If RHS is fed by a simple EQ_EXPR comparison of two values, see if
721    the two input values of the EQ_EXPR match arg0 and arg1.
722 
723    If so update *code and return TRUE.  Otherwise return FALSE.  */
724 
725 static bool
726 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
727 				  enum tree_code *code, const_tree rhs)
728 {
729   /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
730      statement.  */
731   if (TREE_CODE (rhs) == SSA_NAME)
732     {
733       gimple *def1 = SSA_NAME_DEF_STMT (rhs);
734 
735       /* Verify the defining statement has an EQ_EXPR on the RHS.  */
736       if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
737 	{
738 	  /* Finally verify the source operands of the EQ_EXPR are equal
739 	     to arg0 and arg1.  */
740 	  tree op0 = gimple_assign_rhs1 (def1);
741 	  tree op1 = gimple_assign_rhs2 (def1);
742 	  if ((operand_equal_for_phi_arg_p (arg0, op0)
743 	       && operand_equal_for_phi_arg_p (arg1, op1))
744 	      || (operand_equal_for_phi_arg_p (arg0, op1)
745                && operand_equal_for_phi_arg_p (arg1, op0)))
746 	    {
747 	      /* We will perform the optimization.  */
748 	      *code = gimple_assign_rhs_code (def1);
749 	      return true;
750 	    }
751 	}
752     }
753   return false;
754 }
755 
756 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
757 
758    Also return TRUE if arg0/arg1 are equal to the source arguments of a
759    an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
760 
761    Return FALSE otherwise.  */
762 
763 static bool
764 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
765 				     enum tree_code *code, gimple *cond)
766 {
767   gimple *def;
768   tree lhs = gimple_cond_lhs (cond);
769   tree rhs = gimple_cond_rhs (cond);
770 
771   if ((operand_equal_for_phi_arg_p (arg0, lhs)
772        && operand_equal_for_phi_arg_p (arg1, rhs))
773       || (operand_equal_for_phi_arg_p (arg1, lhs)
774 	  && operand_equal_for_phi_arg_p (arg0, rhs)))
775     return true;
776 
777   /* Now handle more complex case where we have an EQ comparison
778      which feeds a BIT_AND_EXPR which feeds COND.
779 
780      First verify that COND is of the form SSA_NAME NE 0.  */
781   if (*code != NE_EXPR || !integer_zerop (rhs)
782       || TREE_CODE (lhs) != SSA_NAME)
783     return false;
784 
785   /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR.  */
786   def = SSA_NAME_DEF_STMT (lhs);
787   if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
788     return false;
789 
790   /* Now verify arg0/arg1 correspond to the source arguments of an
791      EQ comparison feeding the BIT_AND_EXPR.  */
792 
793   tree tmp = gimple_assign_rhs1 (def);
794   if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
795     return true;
796 
797   tmp = gimple_assign_rhs2 (def);
798   if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
799     return true;
800 
801   return false;
802 }
803 
804 /* Returns true if ARG is a neutral element for operation CODE
805    on the RIGHT side.  */
806 
807 static bool
808 neutral_element_p (tree_code code, tree arg, bool right)
809 {
810   switch (code)
811     {
812     case PLUS_EXPR:
813     case BIT_IOR_EXPR:
814     case BIT_XOR_EXPR:
815       return integer_zerop (arg);
816 
817     case LROTATE_EXPR:
818     case RROTATE_EXPR:
819     case LSHIFT_EXPR:
820     case RSHIFT_EXPR:
821     case MINUS_EXPR:
822     case POINTER_PLUS_EXPR:
823       return right && integer_zerop (arg);
824 
825     case MULT_EXPR:
826       return integer_onep (arg);
827 
828     case TRUNC_DIV_EXPR:
829     case CEIL_DIV_EXPR:
830     case FLOOR_DIV_EXPR:
831     case ROUND_DIV_EXPR:
832     case EXACT_DIV_EXPR:
833       return right && integer_onep (arg);
834 
835     case BIT_AND_EXPR:
836       return integer_all_onesp (arg);
837 
838     default:
839       return false;
840     }
841 }
842 
843 /* Returns true if ARG is an absorbing element for operation CODE.  */
844 
845 static bool
846 absorbing_element_p (tree_code code, tree arg, bool right, tree rval)
847 {
848   switch (code)
849     {
850     case BIT_IOR_EXPR:
851       return integer_all_onesp (arg);
852 
853     case MULT_EXPR:
854     case BIT_AND_EXPR:
855       return integer_zerop (arg);
856 
857     case LSHIFT_EXPR:
858     case RSHIFT_EXPR:
859     case LROTATE_EXPR:
860     case RROTATE_EXPR:
861       return !right && integer_zerop (arg);
862 
863     case TRUNC_DIV_EXPR:
864     case CEIL_DIV_EXPR:
865     case FLOOR_DIV_EXPR:
866     case ROUND_DIV_EXPR:
867     case EXACT_DIV_EXPR:
868     case TRUNC_MOD_EXPR:
869     case CEIL_MOD_EXPR:
870     case FLOOR_MOD_EXPR:
871     case ROUND_MOD_EXPR:
872       return (!right
873 	      && integer_zerop (arg)
874 	      && tree_single_nonzero_warnv_p (rval, NULL));
875 
876     default:
877       return false;
878     }
879 }
880 
881 /*  The function value_replacement does the main work of doing the value
882     replacement.  Return non-zero if the replacement is done.  Otherwise return
883     0.  If we remove the middle basic block, return 2.
884     BB is the basic block where the replacement is going to be done on.  ARG0
885     is argument 0 from the PHI.  Likewise for ARG1.  */
886 
887 static int
888 value_replacement (basic_block cond_bb, basic_block middle_bb,
889 		   edge e0, edge e1, gimple *phi,
890 		   tree arg0, tree arg1)
891 {
892   gimple_stmt_iterator gsi;
893   gimple *cond;
894   edge true_edge, false_edge;
895   enum tree_code code;
896   bool emtpy_or_with_defined_p = true;
897 
898   /* If the type says honor signed zeros we cannot do this
899      optimization.  */
900   if (HONOR_SIGNED_ZEROS (arg1))
901     return 0;
902 
903   /* If there is a statement in MIDDLE_BB that defines one of the PHI
904      arguments, then adjust arg0 or arg1.  */
905   gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
906   while (!gsi_end_p (gsi))
907     {
908       gimple *stmt = gsi_stmt (gsi);
909       tree lhs;
910       gsi_next_nondebug (&gsi);
911       if (!is_gimple_assign (stmt))
912 	{
913 	  emtpy_or_with_defined_p = false;
914 	  continue;
915 	}
916       /* Now try to adjust arg0 or arg1 according to the computation
917 	 in the statement.  */
918       lhs = gimple_assign_lhs (stmt);
919       if (!(lhs == arg0
920 	     && jump_function_from_stmt (&arg0, stmt))
921 	    || (lhs == arg1
922 		&& jump_function_from_stmt (&arg1, stmt)))
923 	emtpy_or_with_defined_p = false;
924     }
925 
926   cond = last_stmt (cond_bb);
927   code = gimple_cond_code (cond);
928 
929   /* This transformation is only valid for equality comparisons.  */
930   if (code != NE_EXPR && code != EQ_EXPR)
931     return 0;
932 
933   /* We need to know which is the true edge and which is the false
934       edge so that we know if have abs or negative abs.  */
935   extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
936 
937   /* At this point we know we have a COND_EXPR with two successors.
938      One successor is BB, the other successor is an empty block which
939      falls through into BB.
940 
941      The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
942 
943      There is a single PHI node at the join point (BB) with two arguments.
944 
945      We now need to verify that the two arguments in the PHI node match
946      the two arguments to the equality comparison.  */
947 
948   if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
949     {
950       edge e;
951       tree arg;
952 
953       /* For NE_EXPR, we want to build an assignment result = arg where
954 	 arg is the PHI argument associated with the true edge.  For
955 	 EQ_EXPR we want the PHI argument associated with the false edge.  */
956       e = (code == NE_EXPR ? true_edge : false_edge);
957 
958       /* Unfortunately, E may not reach BB (it may instead have gone to
959 	 OTHER_BLOCK).  If that is the case, then we want the single outgoing
960 	 edge from OTHER_BLOCK which reaches BB and represents the desired
961 	 path from COND_BLOCK.  */
962       if (e->dest == middle_bb)
963 	e = single_succ_edge (e->dest);
964 
965       /* Now we know the incoming edge to BB that has the argument for the
966 	 RHS of our new assignment statement.  */
967       if (e0 == e)
968 	arg = arg0;
969       else
970 	arg = arg1;
971 
972       /* If the middle basic block was empty or is defining the
973 	 PHI arguments and this is a single phi where the args are different
974 	 for the edges e0 and e1 then we can remove the middle basic block. */
975       if (emtpy_or_with_defined_p
976 	  && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
977 						 e0, e1) == phi)
978 	{
979           replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
980 	  /* Note that we optimized this PHI.  */
981 	  return 2;
982 	}
983       else
984 	{
985 	  /* Replace the PHI arguments with arg. */
986 	  SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
987 	  SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
988 	  if (dump_file && (dump_flags & TDF_DETAILS))
989 	    {
990 	      fprintf (dump_file, "PHI ");
991 	      print_generic_expr (dump_file, gimple_phi_result (phi), 0);
992 	      fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
993 		       cond_bb->index);
994 	      print_generic_expr (dump_file, arg, 0);
995 	      fprintf (dump_file, ".\n");
996             }
997           return 1;
998 	}
999 
1000     }
1001 
1002   /* Now optimize (x != 0) ? x + y : y to just y.
1003      The following condition is too restrictive, there can easily be another
1004      stmt in middle_bb, for instance a CONVERT_EXPR for the second argument.  */
1005   gimple *assign = last_and_only_stmt (middle_bb);
1006   if (!assign || gimple_code (assign) != GIMPLE_ASSIGN
1007       || gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS
1008       || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
1009 	  && !POINTER_TYPE_P (TREE_TYPE (arg0))))
1010     return 0;
1011 
1012   /* Punt if there are (degenerate) PHIs in middle_bb, there should not be.  */
1013   if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
1014     return 0;
1015 
1016   /* Only transform if it removes the condition.  */
1017   if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
1018     return 0;
1019 
1020   /* Size-wise, this is always profitable.  */
1021   if (optimize_bb_for_speed_p (cond_bb)
1022       /* The special case is useless if it has a low probability.  */
1023       && profile_status_for_fn (cfun) != PROFILE_ABSENT
1024       && EDGE_PRED (middle_bb, 0)->probability < PROB_EVEN
1025       /* If assign is cheap, there is no point avoiding it.  */
1026       && estimate_num_insns (assign, &eni_time_weights)
1027 	 >= 3 * estimate_num_insns (cond, &eni_time_weights))
1028     return 0;
1029 
1030   tree lhs = gimple_assign_lhs (assign);
1031   tree rhs1 = gimple_assign_rhs1 (assign);
1032   tree rhs2 = gimple_assign_rhs2 (assign);
1033   enum tree_code code_def = gimple_assign_rhs_code (assign);
1034   tree cond_lhs = gimple_cond_lhs (cond);
1035   tree cond_rhs = gimple_cond_rhs (cond);
1036 
1037   if (((code == NE_EXPR && e1 == false_edge)
1038 	|| (code == EQ_EXPR && e1 == true_edge))
1039       && arg0 == lhs
1040       && ((arg1 == rhs1
1041 	   && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1042 	   && neutral_element_p (code_def, cond_rhs, true))
1043 	  || (arg1 == rhs2
1044 	      && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1045 	      && neutral_element_p (code_def, cond_rhs, false))
1046 	  || (operand_equal_for_phi_arg_p (arg1, cond_rhs)
1047 	      && ((operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1048 		   && absorbing_element_p (code_def, cond_rhs, true, rhs2))
1049 		  || (operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1050 		      && absorbing_element_p (code_def,
1051 					      cond_rhs, false, rhs2))))))
1052     {
1053       gsi = gsi_for_stmt (cond);
1054       if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
1055 	{
1056 	  /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1057 	     def-stmt in:
1058 	     if (n_5 != 0)
1059 	       goto <bb 3>;
1060 	     else
1061 	       goto <bb 4>;
1062 
1063 	     <bb 3>:
1064 	     # RANGE [0, 4294967294]
1065 	     u_6 = n_5 + 4294967295;
1066 
1067 	     <bb 4>:
1068 	     # u_3 = PHI <u_6(3), 4294967295(2)>  */
1069 	  SSA_NAME_RANGE_INFO (lhs) = NULL;
1070 	  /* If available, we can use VR of phi result at least.  */
1071 	  tree phires = gimple_phi_result (phi);
1072 	  struct range_info_def *phires_range_info
1073 	    = SSA_NAME_RANGE_INFO (phires);
1074 	  if (phires_range_info)
1075 	    duplicate_ssa_name_range_info (lhs, SSA_NAME_RANGE_TYPE (phires),
1076 					   phires_range_info);
1077 	}
1078       gimple_stmt_iterator gsi_from = gsi_for_stmt (assign);
1079       gsi_move_before (&gsi_from, &gsi);
1080       replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
1081       return 2;
1082     }
1083 
1084   return 0;
1085 }
1086 
1087 /*  The function minmax_replacement does the main work of doing the minmax
1088     replacement.  Return true if the replacement is done.  Otherwise return
1089     false.
1090     BB is the basic block where the replacement is going to be done on.  ARG0
1091     is argument 0 from the PHI.  Likewise for ARG1.  */
1092 
1093 static bool
1094 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
1095 		    edge e0, edge e1, gimple *phi,
1096 		    tree arg0, tree arg1)
1097 {
1098   tree result, type;
1099   gcond *cond;
1100   gassign *new_stmt;
1101   edge true_edge, false_edge;
1102   enum tree_code cmp, minmax, ass_code;
1103   tree smaller, alt_smaller, larger, alt_larger, arg_true, arg_false;
1104   gimple_stmt_iterator gsi, gsi_from;
1105 
1106   type = TREE_TYPE (PHI_RESULT (phi));
1107 
1108   /* The optimization may be unsafe due to NaNs.  */
1109   if (HONOR_NANS (type) || HONOR_SIGNED_ZEROS (type))
1110     return false;
1111 
1112   cond = as_a <gcond *> (last_stmt (cond_bb));
1113   cmp = gimple_cond_code (cond);
1114 
1115   /* This transformation is only valid for order comparisons.  Record which
1116      operand is smaller/larger if the result of the comparison is true.  */
1117   alt_smaller = NULL_TREE;
1118   alt_larger = NULL_TREE;
1119   if (cmp == LT_EXPR || cmp == LE_EXPR)
1120     {
1121       smaller = gimple_cond_lhs (cond);
1122       larger = gimple_cond_rhs (cond);
1123       /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1124 	 Likewise smaller <= CST is equivalent to smaller < CST+1.  */
1125       if (TREE_CODE (larger) == INTEGER_CST)
1126 	{
1127 	  if (cmp == LT_EXPR)
1128 	    {
1129 	      bool overflow;
1130 	      wide_int alt = wi::sub (larger, 1, TYPE_SIGN (TREE_TYPE (larger)),
1131 				      &overflow);
1132 	      if (! overflow)
1133 		alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1134 	    }
1135 	  else
1136 	    {
1137 	      bool overflow;
1138 	      wide_int alt = wi::add (larger, 1, TYPE_SIGN (TREE_TYPE (larger)),
1139 				      &overflow);
1140 	      if (! overflow)
1141 		alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1142 	    }
1143 	}
1144     }
1145   else if (cmp == GT_EXPR || cmp == GE_EXPR)
1146     {
1147       smaller = gimple_cond_rhs (cond);
1148       larger = gimple_cond_lhs (cond);
1149       /* If we have larger > CST it is equivalent to larger >= CST+1.
1150 	 Likewise larger >= CST is equivalent to larger > CST-1.  */
1151       if (TREE_CODE (smaller) == INTEGER_CST)
1152 	{
1153 	  if (cmp == GT_EXPR)
1154 	    {
1155 	      bool overflow;
1156 	      wide_int alt = wi::add (smaller, 1, TYPE_SIGN (TREE_TYPE (smaller)),
1157 				      &overflow);
1158 	      if (! overflow)
1159 		alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1160 	    }
1161 	  else
1162 	    {
1163 	      bool overflow;
1164 	      wide_int alt = wi::sub (smaller, 1, TYPE_SIGN (TREE_TYPE (smaller)),
1165 				      &overflow);
1166 	      if (! overflow)
1167 		alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1168 	    }
1169 	}
1170     }
1171   else
1172     return false;
1173 
1174   /* We need to know which is the true edge and which is the false
1175       edge so that we know if have abs or negative abs.  */
1176   extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1177 
1178   /* Forward the edges over the middle basic block.  */
1179   if (true_edge->dest == middle_bb)
1180     true_edge = EDGE_SUCC (true_edge->dest, 0);
1181   if (false_edge->dest == middle_bb)
1182     false_edge = EDGE_SUCC (false_edge->dest, 0);
1183 
1184   if (true_edge == e0)
1185     {
1186       gcc_assert (false_edge == e1);
1187       arg_true = arg0;
1188       arg_false = arg1;
1189     }
1190   else
1191     {
1192       gcc_assert (false_edge == e0);
1193       gcc_assert (true_edge == e1);
1194       arg_true = arg1;
1195       arg_false = arg0;
1196     }
1197 
1198   if (empty_block_p (middle_bb))
1199     {
1200       if ((operand_equal_for_phi_arg_p (arg_true, smaller)
1201 	   || (alt_smaller
1202 	       && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1203 	  && (operand_equal_for_phi_arg_p (arg_false, larger)
1204 	      || (alt_larger
1205 		  && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1206 	{
1207 	  /* Case
1208 
1209 	     if (smaller < larger)
1210 	     rslt = smaller;
1211 	     else
1212 	     rslt = larger;  */
1213 	  minmax = MIN_EXPR;
1214 	}
1215       else if ((operand_equal_for_phi_arg_p (arg_false, smaller)
1216 		|| (alt_smaller
1217 		    && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1218 	       && (operand_equal_for_phi_arg_p (arg_true, larger)
1219 		   || (alt_larger
1220 		       && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1221 	minmax = MAX_EXPR;
1222       else
1223 	return false;
1224     }
1225   else
1226     {
1227       /* Recognize the following case, assuming d <= u:
1228 
1229 	 if (a <= u)
1230 	   b = MAX (a, d);
1231 	 x = PHI <b, u>
1232 
1233 	 This is equivalent to
1234 
1235 	 b = MAX (a, d);
1236 	 x = MIN (b, u);  */
1237 
1238       gimple *assign = last_and_only_stmt (middle_bb);
1239       tree lhs, op0, op1, bound;
1240 
1241       if (!assign
1242 	  || gimple_code (assign) != GIMPLE_ASSIGN)
1243 	return false;
1244 
1245       lhs = gimple_assign_lhs (assign);
1246       ass_code = gimple_assign_rhs_code (assign);
1247       if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1248 	return false;
1249       op0 = gimple_assign_rhs1 (assign);
1250       op1 = gimple_assign_rhs2 (assign);
1251 
1252       if (true_edge->src == middle_bb)
1253 	{
1254 	  /* We got here if the condition is true, i.e., SMALLER < LARGER.  */
1255 	  if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1256 	    return false;
1257 
1258 	  if (operand_equal_for_phi_arg_p (arg_false, larger)
1259 	      || (alt_larger
1260 		  && operand_equal_for_phi_arg_p (arg_false, alt_larger)))
1261 	    {
1262 	      /* Case
1263 
1264 		 if (smaller < larger)
1265 		   {
1266 		     r' = MAX_EXPR (smaller, bound)
1267 		   }
1268 		 r = PHI <r', larger>  --> to be turned to MIN_EXPR.  */
1269 	      if (ass_code != MAX_EXPR)
1270 		return false;
1271 
1272 	      minmax = MIN_EXPR;
1273 	      if (operand_equal_for_phi_arg_p (op0, smaller)
1274 		  || (alt_smaller
1275 		      && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1276 		bound = op1;
1277 	      else if (operand_equal_for_phi_arg_p (op1, smaller)
1278 		       || (alt_smaller
1279 			   && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1280 		bound = op0;
1281 	      else
1282 		return false;
1283 
1284 	      /* We need BOUND <= LARGER.  */
1285 	      if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1286 						  bound, larger)))
1287 		return false;
1288 	    }
1289 	  else if (operand_equal_for_phi_arg_p (arg_false, smaller)
1290 		   || (alt_smaller
1291 		       && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1292 	    {
1293 	      /* Case
1294 
1295 		 if (smaller < larger)
1296 		   {
1297 		     r' = MIN_EXPR (larger, bound)
1298 		   }
1299 		 r = PHI <r', smaller>  --> to be turned to MAX_EXPR.  */
1300 	      if (ass_code != MIN_EXPR)
1301 		return false;
1302 
1303 	      minmax = MAX_EXPR;
1304 	      if (operand_equal_for_phi_arg_p (op0, larger)
1305 		  || (alt_larger
1306 		      && operand_equal_for_phi_arg_p (op0, alt_larger)))
1307 		bound = op1;
1308 	      else if (operand_equal_for_phi_arg_p (op1, larger)
1309 		       || (alt_larger
1310 			   && operand_equal_for_phi_arg_p (op1, alt_larger)))
1311 		bound = op0;
1312 	      else
1313 		return false;
1314 
1315 	      /* We need BOUND >= SMALLER.  */
1316 	      if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1317 						  bound, smaller)))
1318 		return false;
1319 	    }
1320 	  else
1321 	    return false;
1322 	}
1323       else
1324 	{
1325 	  /* We got here if the condition is false, i.e., SMALLER > LARGER.  */
1326 	  if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1327 	    return false;
1328 
1329 	  if (operand_equal_for_phi_arg_p (arg_true, larger)
1330 	      || (alt_larger
1331 		  && operand_equal_for_phi_arg_p (arg_true, alt_larger)))
1332 	    {
1333 	      /* Case
1334 
1335 		 if (smaller > larger)
1336 		   {
1337 		     r' = MIN_EXPR (smaller, bound)
1338 		   }
1339 		 r = PHI <r', larger>  --> to be turned to MAX_EXPR.  */
1340 	      if (ass_code != MIN_EXPR)
1341 		return false;
1342 
1343 	      minmax = MAX_EXPR;
1344 	      if (operand_equal_for_phi_arg_p (op0, smaller)
1345 		  || (alt_smaller
1346 		      && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1347 		bound = op1;
1348 	      else if (operand_equal_for_phi_arg_p (op1, smaller)
1349 		       || (alt_smaller
1350 			   && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1351 		bound = op0;
1352 	      else
1353 		return false;
1354 
1355 	      /* We need BOUND >= LARGER.  */
1356 	      if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1357 						  bound, larger)))
1358 		return false;
1359 	    }
1360 	  else if (operand_equal_for_phi_arg_p (arg_true, smaller)
1361 		   || (alt_smaller
1362 		       && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1363 	    {
1364 	      /* Case
1365 
1366 		 if (smaller > larger)
1367 		   {
1368 		     r' = MAX_EXPR (larger, bound)
1369 		   }
1370 		 r = PHI <r', smaller>  --> to be turned to MIN_EXPR.  */
1371 	      if (ass_code != MAX_EXPR)
1372 		return false;
1373 
1374 	      minmax = MIN_EXPR;
1375 	      if (operand_equal_for_phi_arg_p (op0, larger))
1376 		bound = op1;
1377 	      else if (operand_equal_for_phi_arg_p (op1, larger))
1378 		bound = op0;
1379 	      else
1380 		return false;
1381 
1382 	      /* We need BOUND <= SMALLER.  */
1383 	      if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1384 						  bound, smaller)))
1385 		return false;
1386 	    }
1387 	  else
1388 	    return false;
1389 	}
1390 
1391       /* Move the statement from the middle block.  */
1392       gsi = gsi_last_bb (cond_bb);
1393       gsi_from = gsi_last_nondebug_bb (middle_bb);
1394       gsi_move_before (&gsi_from, &gsi);
1395     }
1396 
1397   /* Create an SSA var to hold the min/max result.  If we're the only
1398      things setting the target PHI, then we  can clone the PHI
1399      variable.  Otherwise we must create a new one.  */
1400   result = PHI_RESULT (phi);
1401   if (EDGE_COUNT (gimple_bb (phi)->preds) == 2)
1402     result = duplicate_ssa_name (result, NULL);
1403   else
1404     result = make_ssa_name (TREE_TYPE (result));
1405 
1406   /* Emit the statement to compute min/max.  */
1407   new_stmt = gimple_build_assign (result, minmax, arg0, arg1);
1408   gsi = gsi_last_bb (cond_bb);
1409   gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1410 
1411   replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1412   reset_flow_sensitive_info_in_bb (cond_bb);
1413 
1414   return true;
1415 }
1416 
1417 /*  The function absolute_replacement does the main work of doing the absolute
1418     replacement.  Return true if the replacement is done.  Otherwise return
1419     false.
1420     bb is the basic block where the replacement is going to be done on.  arg0
1421     is argument 0 from the phi.  Likewise for arg1.  */
1422 
1423 static bool
1424 abs_replacement (basic_block cond_bb, basic_block middle_bb,
1425 		 edge e0 ATTRIBUTE_UNUSED, edge e1,
1426 		 gimple *phi, tree arg0, tree arg1)
1427 {
1428   tree result;
1429   gassign *new_stmt;
1430   gimple *cond;
1431   gimple_stmt_iterator gsi;
1432   edge true_edge, false_edge;
1433   gimple *assign;
1434   edge e;
1435   tree rhs, lhs;
1436   bool negate;
1437   enum tree_code cond_code;
1438 
1439   /* If the type says honor signed zeros we cannot do this
1440      optimization.  */
1441   if (HONOR_SIGNED_ZEROS (arg1))
1442     return false;
1443 
1444   /* OTHER_BLOCK must have only one executable statement which must have the
1445      form arg0 = -arg1 or arg1 = -arg0.  */
1446 
1447   assign = last_and_only_stmt (middle_bb);
1448   /* If we did not find the proper negation assignment, then we can not
1449      optimize.  */
1450   if (assign == NULL)
1451     return false;
1452 
1453   /* If we got here, then we have found the only executable statement
1454      in OTHER_BLOCK.  If it is anything other than arg = -arg1 or
1455      arg1 = -arg0, then we can not optimize.  */
1456   if (gimple_code (assign) != GIMPLE_ASSIGN)
1457     return false;
1458 
1459   lhs = gimple_assign_lhs (assign);
1460 
1461   if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
1462     return false;
1463 
1464   rhs = gimple_assign_rhs1 (assign);
1465 
1466   /* The assignment has to be arg0 = -arg1 or arg1 = -arg0.  */
1467   if (!(lhs == arg0 && rhs == arg1)
1468       && !(lhs == arg1 && rhs == arg0))
1469     return false;
1470 
1471   cond = last_stmt (cond_bb);
1472   result = PHI_RESULT (phi);
1473 
1474   /* Only relationals comparing arg[01] against zero are interesting.  */
1475   cond_code = gimple_cond_code (cond);
1476   if (cond_code != GT_EXPR && cond_code != GE_EXPR
1477       && cond_code != LT_EXPR && cond_code != LE_EXPR)
1478     return false;
1479 
1480   /* Make sure the conditional is arg[01] OP y.  */
1481   if (gimple_cond_lhs (cond) != rhs)
1482     return false;
1483 
1484   if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
1485 	       ? real_zerop (gimple_cond_rhs (cond))
1486 	       : integer_zerop (gimple_cond_rhs (cond)))
1487     ;
1488   else
1489     return false;
1490 
1491   /* We need to know which is the true edge and which is the false
1492      edge so that we know if have abs or negative abs.  */
1493   extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1494 
1495   /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
1496      will need to negate the result.  Similarly for LT_EXPR/LE_EXPR if
1497      the false edge goes to OTHER_BLOCK.  */
1498   if (cond_code == GT_EXPR || cond_code == GE_EXPR)
1499     e = true_edge;
1500   else
1501     e = false_edge;
1502 
1503   if (e->dest == middle_bb)
1504     negate = true;
1505   else
1506     negate = false;
1507 
1508   /* If the code negates only iff positive then make sure to not
1509      introduce undefined behavior when negating or computing the absolute.
1510      ???  We could use range info if present to check for arg1 == INT_MIN.  */
1511   if (negate
1512       && (ANY_INTEGRAL_TYPE_P (TREE_TYPE (arg1))
1513 	  && ! TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1))))
1514     return false;
1515 
1516   result = duplicate_ssa_name (result, NULL);
1517 
1518   if (negate)
1519     lhs = make_ssa_name (TREE_TYPE (result));
1520   else
1521     lhs = result;
1522 
1523   /* Build the modify expression with abs expression.  */
1524   new_stmt = gimple_build_assign (lhs, ABS_EXPR, rhs);
1525 
1526   gsi = gsi_last_bb (cond_bb);
1527   gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1528 
1529   if (negate)
1530     {
1531       /* Get the right GSI.  We want to insert after the recently
1532 	 added ABS_EXPR statement (which we know is the first statement
1533 	 in the block.  */
1534       new_stmt = gimple_build_assign (result, NEGATE_EXPR, lhs);
1535 
1536       gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1537     }
1538 
1539   replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1540   reset_flow_sensitive_info_in_bb (cond_bb);
1541 
1542   /* Note that we optimized this PHI.  */
1543   return true;
1544 }
1545 
1546 /* Auxiliary functions to determine the set of memory accesses which
1547    can't trap because they are preceded by accesses to the same memory
1548    portion.  We do that for MEM_REFs, so we only need to track
1549    the SSA_NAME of the pointer indirectly referenced.  The algorithm
1550    simply is a walk over all instructions in dominator order.  When
1551    we see an MEM_REF we determine if we've already seen a same
1552    ref anywhere up to the root of the dominator tree.  If we do the
1553    current access can't trap.  If we don't see any dominating access
1554    the current access might trap, but might also make later accesses
1555    non-trapping, so we remember it.  We need to be careful with loads
1556    or stores, for instance a load might not trap, while a store would,
1557    so if we see a dominating read access this doesn't mean that a later
1558    write access would not trap.  Hence we also need to differentiate the
1559    type of access(es) seen.
1560 
1561    ??? We currently are very conservative and assume that a load might
1562    trap even if a store doesn't (write-only memory).  This probably is
1563    overly conservative.  */
1564 
1565 /* A hash-table of SSA_NAMEs, and in which basic block an MEM_REF
1566    through it was seen, which would constitute a no-trap region for
1567    same accesses.  */
1568 struct name_to_bb
1569 {
1570   unsigned int ssa_name_ver;
1571   unsigned int phase;
1572   bool store;
1573   HOST_WIDE_INT offset, size;
1574   basic_block bb;
1575 };
1576 
1577 /* Hashtable helpers.  */
1578 
1579 struct ssa_names_hasher : free_ptr_hash <name_to_bb>
1580 {
1581   static inline hashval_t hash (const name_to_bb *);
1582   static inline bool equal (const name_to_bb *, const name_to_bb *);
1583 };
1584 
1585 /* Used for quick clearing of the hash-table when we see calls.
1586    Hash entries with phase < nt_call_phase are invalid.  */
1587 static unsigned int nt_call_phase;
1588 
1589 /* The hash function.  */
1590 
1591 inline hashval_t
1592 ssa_names_hasher::hash (const name_to_bb *n)
1593 {
1594   return n->ssa_name_ver ^ (((hashval_t) n->store) << 31)
1595          ^ (n->offset << 6) ^ (n->size << 3);
1596 }
1597 
1598 /* The equality function of *P1 and *P2.  */
1599 
1600 inline bool
1601 ssa_names_hasher::equal (const name_to_bb *n1, const name_to_bb *n2)
1602 {
1603   return n1->ssa_name_ver == n2->ssa_name_ver
1604          && n1->store == n2->store
1605          && n1->offset == n2->offset
1606          && n1->size == n2->size;
1607 }
1608 
1609 class nontrapping_dom_walker : public dom_walker
1610 {
1611 public:
1612   nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
1613     : dom_walker (direction), m_nontrapping (ps), m_seen_ssa_names (128) {}
1614 
1615   virtual edge before_dom_children (basic_block);
1616   virtual void after_dom_children (basic_block);
1617 
1618 private:
1619 
1620   /* We see the expression EXP in basic block BB.  If it's an interesting
1621      expression (an MEM_REF through an SSA_NAME) possibly insert the
1622      expression into the set NONTRAP or the hash table of seen expressions.
1623      STORE is true if this expression is on the LHS, otherwise it's on
1624      the RHS.  */
1625   void add_or_mark_expr (basic_block, tree, bool);
1626 
1627   hash_set<tree> *m_nontrapping;
1628 
1629   /* The hash table for remembering what we've seen.  */
1630   hash_table<ssa_names_hasher> m_seen_ssa_names;
1631 };
1632 
1633 /* Called by walk_dominator_tree, when entering the block BB.  */
1634 edge
1635 nontrapping_dom_walker::before_dom_children (basic_block bb)
1636 {
1637   edge e;
1638   edge_iterator ei;
1639   gimple_stmt_iterator gsi;
1640 
1641   /* If we haven't seen all our predecessors, clear the hash-table.  */
1642   FOR_EACH_EDGE (e, ei, bb->preds)
1643     if ((((size_t)e->src->aux) & 2) == 0)
1644       {
1645 	nt_call_phase++;
1646 	break;
1647       }
1648 
1649   /* Mark this BB as being on the path to dominator root and as visited.  */
1650   bb->aux = (void*)(1 | 2);
1651 
1652   /* And walk the statements in order.  */
1653   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1654     {
1655       gimple *stmt = gsi_stmt (gsi);
1656 
1657       if ((gimple_code (stmt) == GIMPLE_ASM && gimple_vdef (stmt))
1658 	  || (is_gimple_call (stmt)
1659 	      && (!nonfreeing_call_p (stmt) || !nonbarrier_call_p (stmt))))
1660 	nt_call_phase++;
1661       else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
1662 	{
1663 	  add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
1664 	  add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
1665 	}
1666     }
1667   return NULL;
1668 }
1669 
1670 /* Called by walk_dominator_tree, when basic block BB is exited.  */
1671 void
1672 nontrapping_dom_walker::after_dom_children (basic_block bb)
1673 {
1674   /* This BB isn't on the path to dominator root anymore.  */
1675   bb->aux = (void*)2;
1676 }
1677 
1678 /* We see the expression EXP in basic block BB.  If it's an interesting
1679    expression (an MEM_REF through an SSA_NAME) possibly insert the
1680    expression into the set NONTRAP or the hash table of seen expressions.
1681    STORE is true if this expression is on the LHS, otherwise it's on
1682    the RHS.  */
1683 void
1684 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
1685 {
1686   HOST_WIDE_INT size;
1687 
1688   if (TREE_CODE (exp) == MEM_REF
1689       && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME
1690       && tree_fits_shwi_p (TREE_OPERAND (exp, 1))
1691       && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
1692     {
1693       tree name = TREE_OPERAND (exp, 0);
1694       struct name_to_bb map;
1695       name_to_bb **slot;
1696       struct name_to_bb *n2bb;
1697       basic_block found_bb = 0;
1698 
1699       /* Try to find the last seen MEM_REF through the same
1700          SSA_NAME, which can trap.  */
1701       map.ssa_name_ver = SSA_NAME_VERSION (name);
1702       map.phase = 0;
1703       map.bb = 0;
1704       map.store = store;
1705       map.offset = tree_to_shwi (TREE_OPERAND (exp, 1));
1706       map.size = size;
1707 
1708       slot = m_seen_ssa_names.find_slot (&map, INSERT);
1709       n2bb = *slot;
1710       if (n2bb && n2bb->phase >= nt_call_phase)
1711         found_bb = n2bb->bb;
1712 
1713       /* If we've found a trapping MEM_REF, _and_ it dominates EXP
1714          (it's in a basic block on the path from us to the dominator root)
1715 	 then we can't trap.  */
1716       if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
1717 	{
1718 	  m_nontrapping->add (exp);
1719 	}
1720       else
1721         {
1722 	  /* EXP might trap, so insert it into the hash table.  */
1723 	  if (n2bb)
1724 	    {
1725 	      n2bb->phase = nt_call_phase;
1726 	      n2bb->bb = bb;
1727 	    }
1728 	  else
1729 	    {
1730 	      n2bb = XNEW (struct name_to_bb);
1731 	      n2bb->ssa_name_ver = SSA_NAME_VERSION (name);
1732 	      n2bb->phase = nt_call_phase;
1733 	      n2bb->bb = bb;
1734 	      n2bb->store = store;
1735 	      n2bb->offset = map.offset;
1736 	      n2bb->size = size;
1737 	      *slot = n2bb;
1738 	    }
1739 	}
1740     }
1741 }
1742 
1743 /* This is the entry point of gathering non trapping memory accesses.
1744    It will do a dominator walk over the whole function, and it will
1745    make use of the bb->aux pointers.  It returns a set of trees
1746    (the MEM_REFs itself) which can't trap.  */
1747 static hash_set<tree> *
1748 get_non_trapping (void)
1749 {
1750   nt_call_phase = 0;
1751   hash_set<tree> *nontrap = new hash_set<tree>;
1752   /* We're going to do a dominator walk, so ensure that we have
1753      dominance information.  */
1754   calculate_dominance_info (CDI_DOMINATORS);
1755 
1756   nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
1757     .walk (cfun->cfg->x_entry_block_ptr);
1758 
1759   clear_aux_for_blocks ();
1760   return nontrap;
1761 }
1762 
1763 /* Do the main work of conditional store replacement.  We already know
1764    that the recognized pattern looks like so:
1765 
1766    split:
1767      if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
1768    MIDDLE_BB:
1769      something
1770      fallthrough (edge E0)
1771    JOIN_BB:
1772      some more
1773 
1774    We check that MIDDLE_BB contains only one store, that that store
1775    doesn't trap (not via NOTRAP, but via checking if an access to the same
1776    memory location dominates us) and that the store has a "simple" RHS.  */
1777 
1778 static bool
1779 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
1780 			edge e0, edge e1, hash_set<tree> *nontrap)
1781 {
1782   gimple *assign = last_and_only_stmt (middle_bb);
1783   tree lhs, rhs, name, name2;
1784   gphi *newphi;
1785   gassign *new_stmt;
1786   gimple_stmt_iterator gsi;
1787   source_location locus;
1788 
1789   /* Check if middle_bb contains of only one store.  */
1790   if (!assign
1791       || !gimple_assign_single_p (assign)
1792       || gimple_has_volatile_ops (assign))
1793     return false;
1794 
1795   locus = gimple_location (assign);
1796   lhs = gimple_assign_lhs (assign);
1797   rhs = gimple_assign_rhs1 (assign);
1798   if (TREE_CODE (lhs) != MEM_REF
1799       || TREE_CODE (TREE_OPERAND (lhs, 0)) != SSA_NAME
1800       || !is_gimple_reg_type (TREE_TYPE (lhs)))
1801     return false;
1802 
1803   /* Prove that we can move the store down.  We could also check
1804      TREE_THIS_NOTRAP here, but in that case we also could move stores,
1805      whose value is not available readily, which we want to avoid.  */
1806   if (!nontrap->contains (lhs))
1807     return false;
1808 
1809   /* Now we've checked the constraints, so do the transformation:
1810      1) Remove the single store.  */
1811   gsi = gsi_for_stmt (assign);
1812   unlink_stmt_vdef (assign);
1813   gsi_remove (&gsi, true);
1814   release_defs (assign);
1815 
1816   /* Make both store and load use alias-set zero as we have to
1817      deal with the case of the store being a conditional change
1818      of the dynamic type.  */
1819   lhs = unshare_expr (lhs);
1820   tree *basep = &lhs;
1821   while (handled_component_p (*basep))
1822     basep = &TREE_OPERAND (*basep, 0);
1823   if (TREE_CODE (*basep) == MEM_REF
1824       || TREE_CODE (*basep) == TARGET_MEM_REF)
1825     TREE_OPERAND (*basep, 1)
1826       = fold_convert (ptr_type_node, TREE_OPERAND (*basep, 1));
1827   else
1828     *basep = build2 (MEM_REF, TREE_TYPE (*basep),
1829 		     build_fold_addr_expr (*basep),
1830 		     build_zero_cst (ptr_type_node));
1831 
1832   /* 2) Insert a load from the memory of the store to the temporary
1833         on the edge which did not contain the store.  */
1834   name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1835   new_stmt = gimple_build_assign (name, lhs);
1836   gimple_set_location (new_stmt, locus);
1837   gsi_insert_on_edge (e1, new_stmt);
1838 
1839   /* 3) Create a PHI node at the join block, with one argument
1840         holding the old RHS, and the other holding the temporary
1841         where we stored the old memory contents.  */
1842   name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1843   newphi = create_phi_node (name2, join_bb);
1844   add_phi_arg (newphi, rhs, e0, locus);
1845   add_phi_arg (newphi, name, e1, locus);
1846 
1847   lhs = unshare_expr (lhs);
1848   new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1849 
1850   /* 4) Insert that PHI node.  */
1851   gsi = gsi_after_labels (join_bb);
1852   if (gsi_end_p (gsi))
1853     {
1854       gsi = gsi_last_bb (join_bb);
1855       gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1856     }
1857   else
1858     gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1859 
1860   return true;
1861 }
1862 
1863 /* Do the main work of conditional store replacement.  */
1864 
1865 static bool
1866 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
1867 				  basic_block join_bb, gimple *then_assign,
1868 				  gimple *else_assign)
1869 {
1870   tree lhs_base, lhs, then_rhs, else_rhs, name;
1871   source_location then_locus, else_locus;
1872   gimple_stmt_iterator gsi;
1873   gphi *newphi;
1874   gassign *new_stmt;
1875 
1876   if (then_assign == NULL
1877       || !gimple_assign_single_p (then_assign)
1878       || gimple_clobber_p (then_assign)
1879       || gimple_has_volatile_ops (then_assign)
1880       || else_assign == NULL
1881       || !gimple_assign_single_p (else_assign)
1882       || gimple_clobber_p (else_assign)
1883       || gimple_has_volatile_ops (else_assign))
1884     return false;
1885 
1886   lhs = gimple_assign_lhs (then_assign);
1887   if (!is_gimple_reg_type (TREE_TYPE (lhs))
1888       || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
1889     return false;
1890 
1891   lhs_base = get_base_address (lhs);
1892   if (lhs_base == NULL_TREE
1893       || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
1894     return false;
1895 
1896   then_rhs = gimple_assign_rhs1 (then_assign);
1897   else_rhs = gimple_assign_rhs1 (else_assign);
1898   then_locus = gimple_location (then_assign);
1899   else_locus = gimple_location (else_assign);
1900 
1901   /* Now we've checked the constraints, so do the transformation:
1902      1) Remove the stores.  */
1903   gsi = gsi_for_stmt (then_assign);
1904   unlink_stmt_vdef (then_assign);
1905   gsi_remove (&gsi, true);
1906   release_defs (then_assign);
1907 
1908   gsi = gsi_for_stmt (else_assign);
1909   unlink_stmt_vdef (else_assign);
1910   gsi_remove (&gsi, true);
1911   release_defs (else_assign);
1912 
1913   /* 2) Create a PHI node at the join block, with one argument
1914 	holding the old RHS, and the other holding the temporary
1915 	where we stored the old memory contents.  */
1916   name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
1917   newphi = create_phi_node (name, join_bb);
1918   add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
1919   add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
1920 
1921   new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
1922 
1923   /* 3) Insert that PHI node.  */
1924   gsi = gsi_after_labels (join_bb);
1925   if (gsi_end_p (gsi))
1926     {
1927       gsi = gsi_last_bb (join_bb);
1928       gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
1929     }
1930   else
1931     gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
1932 
1933   return true;
1934 }
1935 
1936 /* Conditional store replacement.  We already know
1937    that the recognized pattern looks like so:
1938 
1939    split:
1940      if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
1941    THEN_BB:
1942      ...
1943      X = Y;
1944      ...
1945      goto JOIN_BB;
1946    ELSE_BB:
1947      ...
1948      X = Z;
1949      ...
1950      fallthrough (edge E0)
1951    JOIN_BB:
1952      some more
1953 
1954    We check that it is safe to sink the store to JOIN_BB by verifying that
1955    there are no read-after-write or write-after-write dependencies in
1956    THEN_BB and ELSE_BB.  */
1957 
1958 static bool
1959 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
1960                                 basic_block join_bb)
1961 {
1962   gimple *then_assign = last_and_only_stmt (then_bb);
1963   gimple *else_assign = last_and_only_stmt (else_bb);
1964   vec<data_reference_p> then_datarefs, else_datarefs;
1965   vec<ddr_p> then_ddrs, else_ddrs;
1966   gimple *then_store, *else_store;
1967   bool found, ok = false, res;
1968   struct data_dependence_relation *ddr;
1969   data_reference_p then_dr, else_dr;
1970   int i, j;
1971   tree then_lhs, else_lhs;
1972   basic_block blocks[3];
1973 
1974   if (MAX_STORES_TO_SINK == 0)
1975     return false;
1976 
1977   /* Handle the case with single statement in THEN_BB and ELSE_BB.  */
1978   if (then_assign && else_assign)
1979     return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
1980                                              then_assign, else_assign);
1981 
1982   /* Find data references.  */
1983   then_datarefs.create (1);
1984   else_datarefs.create (1);
1985   if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
1986         == chrec_dont_know)
1987       || !then_datarefs.length ()
1988       || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
1989 	  == chrec_dont_know)
1990       || !else_datarefs.length ())
1991     {
1992       free_data_refs (then_datarefs);
1993       free_data_refs (else_datarefs);
1994       return false;
1995     }
1996 
1997   /* Find pairs of stores with equal LHS.  */
1998   auto_vec<gimple *, 1> then_stores, else_stores;
1999   FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
2000     {
2001       if (DR_IS_READ (then_dr))
2002         continue;
2003 
2004       then_store = DR_STMT (then_dr);
2005       then_lhs = gimple_get_lhs (then_store);
2006       if (then_lhs == NULL_TREE)
2007 	continue;
2008       found = false;
2009 
2010       FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
2011         {
2012           if (DR_IS_READ (else_dr))
2013             continue;
2014 
2015           else_store = DR_STMT (else_dr);
2016           else_lhs = gimple_get_lhs (else_store);
2017 	  if (else_lhs == NULL_TREE)
2018 	    continue;
2019 
2020           if (operand_equal_p (then_lhs, else_lhs, 0))
2021             {
2022               found = true;
2023               break;
2024             }
2025         }
2026 
2027       if (!found)
2028         continue;
2029 
2030       then_stores.safe_push (then_store);
2031       else_stores.safe_push (else_store);
2032     }
2033 
2034   /* No pairs of stores found.  */
2035   if (!then_stores.length ()
2036       || then_stores.length () > (unsigned) MAX_STORES_TO_SINK)
2037     {
2038       free_data_refs (then_datarefs);
2039       free_data_refs (else_datarefs);
2040       return false;
2041     }
2042 
2043   /* Compute and check data dependencies in both basic blocks.  */
2044   then_ddrs.create (1);
2045   else_ddrs.create (1);
2046   if (!compute_all_dependences (then_datarefs, &then_ddrs,
2047 				vNULL, false)
2048       || !compute_all_dependences (else_datarefs, &else_ddrs,
2049 				   vNULL, false))
2050     {
2051       free_dependence_relations (then_ddrs);
2052       free_dependence_relations (else_ddrs);
2053       free_data_refs (then_datarefs);
2054       free_data_refs (else_datarefs);
2055       return false;
2056     }
2057   blocks[0] = then_bb;
2058   blocks[1] = else_bb;
2059   blocks[2] = join_bb;
2060   renumber_gimple_stmt_uids_in_blocks (blocks, 3);
2061 
2062   /* Check that there are no read-after-write or write-after-write dependencies
2063      in THEN_BB.  */
2064   FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
2065     {
2066       struct data_reference *dra = DDR_A (ddr);
2067       struct data_reference *drb = DDR_B (ddr);
2068 
2069       if (DDR_ARE_DEPENDENT (ddr) != chrec_known
2070           && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
2071                && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
2072               || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
2073                   && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
2074               || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
2075         {
2076           free_dependence_relations (then_ddrs);
2077           free_dependence_relations (else_ddrs);
2078 	  free_data_refs (then_datarefs);
2079 	  free_data_refs (else_datarefs);
2080           return false;
2081         }
2082     }
2083 
2084   /* Check that there are no read-after-write or write-after-write dependencies
2085      in ELSE_BB.  */
2086   FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
2087     {
2088       struct data_reference *dra = DDR_A (ddr);
2089       struct data_reference *drb = DDR_B (ddr);
2090 
2091       if (DDR_ARE_DEPENDENT (ddr) != chrec_known
2092           && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
2093                && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
2094               || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
2095                   && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
2096               || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
2097         {
2098           free_dependence_relations (then_ddrs);
2099           free_dependence_relations (else_ddrs);
2100 	  free_data_refs (then_datarefs);
2101 	  free_data_refs (else_datarefs);
2102           return false;
2103         }
2104     }
2105 
2106   /* Sink stores with same LHS.  */
2107   FOR_EACH_VEC_ELT (then_stores, i, then_store)
2108     {
2109       else_store = else_stores[i];
2110       res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
2111                                               then_store, else_store);
2112       ok = ok || res;
2113     }
2114 
2115   free_dependence_relations (then_ddrs);
2116   free_dependence_relations (else_ddrs);
2117   free_data_refs (then_datarefs);
2118   free_data_refs (else_datarefs);
2119 
2120   return ok;
2121 }
2122 
2123 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB.  */
2124 
2125 static bool
2126 local_mem_dependence (gimple *stmt, basic_block bb)
2127 {
2128   tree vuse = gimple_vuse (stmt);
2129   gimple *def;
2130 
2131   if (!vuse)
2132     return false;
2133 
2134   def = SSA_NAME_DEF_STMT (vuse);
2135   return (def && gimple_bb (def) == bb);
2136 }
2137 
2138 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
2139    BB1 and BB2 are "then" and "else" blocks dependent on this test,
2140    and BB3 rejoins control flow following BB1 and BB2, look for
2141    opportunities to hoist loads as follows.  If BB3 contains a PHI of
2142    two loads, one each occurring in BB1 and BB2, and the loads are
2143    provably of adjacent fields in the same structure, then move both
2144    loads into BB0.  Of course this can only be done if there are no
2145    dependencies preventing such motion.
2146 
2147    One of the hoisted loads will always be speculative, so the
2148    transformation is currently conservative:
2149 
2150     - The fields must be strictly adjacent.
2151     - The two fields must occupy a single memory block that is
2152       guaranteed to not cross a page boundary.
2153 
2154     The last is difficult to prove, as such memory blocks should be
2155     aligned on the minimum of the stack alignment boundary and the
2156     alignment guaranteed by heap allocation interfaces.  Thus we rely
2157     on a parameter for the alignment value.
2158 
2159     Provided a good value is used for the last case, the first
2160     restriction could possibly be relaxed.  */
2161 
2162 static void
2163 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
2164 		      basic_block bb2, basic_block bb3)
2165 {
2166   int param_align = PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE);
2167   unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
2168   gphi_iterator gsi;
2169 
2170   /* Walk the phis in bb3 looking for an opportunity.  We are looking
2171      for phis of two SSA names, one each of which is defined in bb1 and
2172      bb2.  */
2173   for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
2174     {
2175       gphi *phi_stmt = gsi.phi ();
2176       gimple *def1, *def2;
2177       tree arg1, arg2, ref1, ref2, field1, field2;
2178       tree tree_offset1, tree_offset2, tree_size2, next;
2179       int offset1, offset2, size2;
2180       unsigned align1;
2181       gimple_stmt_iterator gsi2;
2182       basic_block bb_for_def1, bb_for_def2;
2183 
2184       if (gimple_phi_num_args (phi_stmt) != 2
2185 	  || virtual_operand_p (gimple_phi_result (phi_stmt)))
2186 	continue;
2187 
2188       arg1 = gimple_phi_arg_def (phi_stmt, 0);
2189       arg2 = gimple_phi_arg_def (phi_stmt, 1);
2190 
2191       if (TREE_CODE (arg1) != SSA_NAME
2192 	  || TREE_CODE (arg2) != SSA_NAME
2193 	  || SSA_NAME_IS_DEFAULT_DEF (arg1)
2194 	  || SSA_NAME_IS_DEFAULT_DEF (arg2))
2195 	continue;
2196 
2197       def1 = SSA_NAME_DEF_STMT (arg1);
2198       def2 = SSA_NAME_DEF_STMT (arg2);
2199 
2200       if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
2201 	  && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
2202 	continue;
2203 
2204       /* Check the mode of the arguments to be sure a conditional move
2205 	 can be generated for it.  */
2206       if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
2207 	  == CODE_FOR_nothing)
2208 	continue;
2209 
2210       /* Both statements must be assignments whose RHS is a COMPONENT_REF.  */
2211       if (!gimple_assign_single_p (def1)
2212 	  || !gimple_assign_single_p (def2)
2213 	  || gimple_has_volatile_ops (def1)
2214 	  || gimple_has_volatile_ops (def2))
2215 	continue;
2216 
2217       ref1 = gimple_assign_rhs1 (def1);
2218       ref2 = gimple_assign_rhs1 (def2);
2219 
2220       if (TREE_CODE (ref1) != COMPONENT_REF
2221 	  || TREE_CODE (ref2) != COMPONENT_REF)
2222 	continue;
2223 
2224       /* The zeroth operand of the two component references must be
2225 	 identical.  It is not sufficient to compare get_base_address of
2226 	 the two references, because this could allow for different
2227 	 elements of the same array in the two trees.  It is not safe to
2228 	 assume that the existence of one array element implies the
2229 	 existence of a different one.  */
2230       if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
2231 	continue;
2232 
2233       field1 = TREE_OPERAND (ref1, 1);
2234       field2 = TREE_OPERAND (ref2, 1);
2235 
2236       /* Check for field adjacency, and ensure field1 comes first.  */
2237       for (next = DECL_CHAIN (field1);
2238 	   next && TREE_CODE (next) != FIELD_DECL;
2239 	   next = DECL_CHAIN (next))
2240 	;
2241 
2242       if (next != field2)
2243 	{
2244 	  for (next = DECL_CHAIN (field2);
2245 	       next && TREE_CODE (next) != FIELD_DECL;
2246 	       next = DECL_CHAIN (next))
2247 	    ;
2248 
2249 	  if (next != field1)
2250 	    continue;
2251 
2252 	  std::swap (field1, field2);
2253 	  std::swap (def1, def2);
2254 	}
2255 
2256       bb_for_def1 = gimple_bb (def1);
2257       bb_for_def2 = gimple_bb (def2);
2258 
2259       /* Check for proper alignment of the first field.  */
2260       tree_offset1 = bit_position (field1);
2261       tree_offset2 = bit_position (field2);
2262       tree_size2 = DECL_SIZE (field2);
2263 
2264       if (!tree_fits_uhwi_p (tree_offset1)
2265 	  || !tree_fits_uhwi_p (tree_offset2)
2266 	  || !tree_fits_uhwi_p (tree_size2))
2267 	continue;
2268 
2269       offset1 = tree_to_uhwi (tree_offset1);
2270       offset2 = tree_to_uhwi (tree_offset2);
2271       size2 = tree_to_uhwi (tree_size2);
2272       align1 = DECL_ALIGN (field1) % param_align_bits;
2273 
2274       if (offset1 % BITS_PER_UNIT != 0)
2275 	continue;
2276 
2277       /* For profitability, the two field references should fit within
2278 	 a single cache line.  */
2279       if (align1 + offset2 - offset1 + size2 > param_align_bits)
2280 	continue;
2281 
2282       /* The two expressions cannot be dependent upon vdefs defined
2283 	 in bb1/bb2.  */
2284       if (local_mem_dependence (def1, bb_for_def1)
2285 	  || local_mem_dependence (def2, bb_for_def2))
2286 	continue;
2287 
2288       /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
2289 	 bb0.  We hoist the first one first so that a cache miss is handled
2290          efficiently regardless of hardware cache-fill policy.  */
2291       gsi2 = gsi_for_stmt (def1);
2292       gsi_move_to_bb_end (&gsi2, bb0);
2293       gsi2 = gsi_for_stmt (def2);
2294       gsi_move_to_bb_end (&gsi2, bb0);
2295 
2296       if (dump_file && (dump_flags & TDF_DETAILS))
2297 	{
2298 	  fprintf (dump_file,
2299 		   "\nHoisting adjacent loads from %d and %d into %d: \n",
2300 		   bb_for_def1->index, bb_for_def2->index, bb0->index);
2301 	  print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
2302 	  print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
2303 	}
2304     }
2305 }
2306 
2307 /* Determine whether we should attempt to hoist adjacent loads out of
2308    diamond patterns in pass_phiopt.  Always hoist loads if
2309    -fhoist-adjacent-loads is specified and the target machine has
2310    both a conditional move instruction and a defined cache line size.  */
2311 
2312 static bool
2313 gate_hoist_loads (void)
2314 {
2315   return (flag_hoist_adjacent_loads == 1
2316 	  && PARAM_VALUE (PARAM_L1_CACHE_LINE_SIZE)
2317 	  && HAVE_conditional_move);
2318 }
2319 
2320 /* This pass tries to replaces an if-then-else block with an
2321    assignment.  We have four kinds of transformations.  Some of these
2322    transformations are also performed by the ifcvt RTL optimizer.
2323 
2324    Conditional Replacement
2325    -----------------------
2326 
2327    This transformation, implemented in conditional_replacement,
2328    replaces
2329 
2330      bb0:
2331       if (cond) goto bb2; else goto bb1;
2332      bb1:
2333      bb2:
2334       x = PHI <0 (bb1), 1 (bb0), ...>;
2335 
2336    with
2337 
2338      bb0:
2339       x' = cond;
2340       goto bb2;
2341      bb2:
2342       x = PHI <x' (bb0), ...>;
2343 
2344    We remove bb1 as it becomes unreachable.  This occurs often due to
2345    gimplification of conditionals.
2346 
2347    Value Replacement
2348    -----------------
2349 
2350    This transformation, implemented in value_replacement, replaces
2351 
2352      bb0:
2353        if (a != b) goto bb2; else goto bb1;
2354      bb1:
2355      bb2:
2356        x = PHI <a (bb1), b (bb0), ...>;
2357 
2358    with
2359 
2360      bb0:
2361      bb2:
2362        x = PHI <b (bb0), ...>;
2363 
2364    This opportunity can sometimes occur as a result of other
2365    optimizations.
2366 
2367 
2368    Another case caught by value replacement looks like this:
2369 
2370      bb0:
2371        t1 = a == CONST;
2372        t2 = b > c;
2373        t3 = t1 & t2;
2374        if (t3 != 0) goto bb1; else goto bb2;
2375      bb1:
2376      bb2:
2377        x = PHI (CONST, a)
2378 
2379    Gets replaced with:
2380      bb0:
2381      bb2:
2382        t1 = a == CONST;
2383        t2 = b > c;
2384        t3 = t1 & t2;
2385        x = a;
2386 
2387    ABS Replacement
2388    ---------------
2389 
2390    This transformation, implemented in abs_replacement, replaces
2391 
2392      bb0:
2393        if (a >= 0) goto bb2; else goto bb1;
2394      bb1:
2395        x = -a;
2396      bb2:
2397        x = PHI <x (bb1), a (bb0), ...>;
2398 
2399    with
2400 
2401      bb0:
2402        x' = ABS_EXPR< a >;
2403      bb2:
2404        x = PHI <x' (bb0), ...>;
2405 
2406    MIN/MAX Replacement
2407    -------------------
2408 
2409    This transformation, minmax_replacement replaces
2410 
2411      bb0:
2412        if (a <= b) goto bb2; else goto bb1;
2413      bb1:
2414      bb2:
2415        x = PHI <b (bb1), a (bb0), ...>;
2416 
2417    with
2418 
2419      bb0:
2420        x' = MIN_EXPR (a, b)
2421      bb2:
2422        x = PHI <x' (bb0), ...>;
2423 
2424    A similar transformation is done for MAX_EXPR.
2425 
2426 
2427    This pass also performs a fifth transformation of a slightly different
2428    flavor.
2429 
2430    Factor conversion in COND_EXPR
2431    ------------------------------
2432 
2433    This transformation factors the conversion out of COND_EXPR with
2434    factor_out_conditional_conversion.
2435 
2436    For example:
2437    if (a <= CST) goto <bb 3>; else goto <bb 4>;
2438    <bb 3>:
2439    tmp = (int) a;
2440    <bb 4>:
2441    tmp = PHI <tmp, CST>
2442 
2443    Into:
2444    if (a <= CST) goto <bb 3>; else goto <bb 4>;
2445    <bb 3>:
2446    <bb 4>:
2447    a = PHI <a, CST>
2448    tmp = (int) a;
2449 
2450    Adjacent Load Hoisting
2451    ----------------------
2452 
2453    This transformation replaces
2454 
2455      bb0:
2456        if (...) goto bb2; else goto bb1;
2457      bb1:
2458        x1 = (<expr>).field1;
2459        goto bb3;
2460      bb2:
2461        x2 = (<expr>).field2;
2462      bb3:
2463        # x = PHI <x1, x2>;
2464 
2465    with
2466 
2467      bb0:
2468        x1 = (<expr>).field1;
2469        x2 = (<expr>).field2;
2470        if (...) goto bb2; else goto bb1;
2471      bb1:
2472        goto bb3;
2473      bb2:
2474      bb3:
2475        # x = PHI <x1, x2>;
2476 
2477    The purpose of this transformation is to enable generation of conditional
2478    move instructions such as Intel CMOVE or PowerPC ISEL.  Because one of
2479    the loads is speculative, the transformation is restricted to very
2480    specific cases to avoid introducing a page fault.  We are looking for
2481    the common idiom:
2482 
2483      if (...)
2484        x = y->left;
2485      else
2486        x = y->right;
2487 
2488    where left and right are typically adjacent pointers in a tree structure.  */
2489 
2490 namespace {
2491 
2492 const pass_data pass_data_phiopt =
2493 {
2494   GIMPLE_PASS, /* type */
2495   "phiopt", /* name */
2496   OPTGROUP_NONE, /* optinfo_flags */
2497   TV_TREE_PHIOPT, /* tv_id */
2498   ( PROP_cfg | PROP_ssa ), /* properties_required */
2499   0, /* properties_provided */
2500   0, /* properties_destroyed */
2501   0, /* todo_flags_start */
2502   0, /* todo_flags_finish */
2503 };
2504 
2505 class pass_phiopt : public gimple_opt_pass
2506 {
2507 public:
2508   pass_phiopt (gcc::context *ctxt)
2509     : gimple_opt_pass (pass_data_phiopt, ctxt)
2510   {}
2511 
2512   /* opt_pass methods: */
2513   opt_pass * clone () { return new pass_phiopt (m_ctxt); }
2514   virtual bool gate (function *) { return flag_ssa_phiopt; }
2515   virtual unsigned int execute (function *)
2516     {
2517       return tree_ssa_phiopt_worker (false, gate_hoist_loads ());
2518     }
2519 
2520 }; // class pass_phiopt
2521 
2522 } // anon namespace
2523 
2524 gimple_opt_pass *
2525 make_pass_phiopt (gcc::context *ctxt)
2526 {
2527   return new pass_phiopt (ctxt);
2528 }
2529 
2530 namespace {
2531 
2532 const pass_data pass_data_cselim =
2533 {
2534   GIMPLE_PASS, /* type */
2535   "cselim", /* name */
2536   OPTGROUP_NONE, /* optinfo_flags */
2537   TV_TREE_PHIOPT, /* tv_id */
2538   ( PROP_cfg | PROP_ssa ), /* properties_required */
2539   0, /* properties_provided */
2540   0, /* properties_destroyed */
2541   0, /* todo_flags_start */
2542   0, /* todo_flags_finish */
2543 };
2544 
2545 class pass_cselim : public gimple_opt_pass
2546 {
2547 public:
2548   pass_cselim (gcc::context *ctxt)
2549     : gimple_opt_pass (pass_data_cselim, ctxt)
2550   {}
2551 
2552   /* opt_pass methods: */
2553   virtual bool gate (function *) { return flag_tree_cselim; }
2554   virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
2555 
2556 }; // class pass_cselim
2557 
2558 } // anon namespace
2559 
2560 gimple_opt_pass *
2561 make_pass_cselim (gcc::context *ctxt)
2562 {
2563   return new pass_cselim (ctxt);
2564 }
2565