xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-ssa-ccp.c (revision 7d62b00eb9ad855ffcd7da46b41e23feb5476fac)
1 /* Conditional constant propagation pass for the GNU compiler.
2    Copyright (C) 2000-2020 Free Software Foundation, Inc.
3    Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4    Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
11 later version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 /* Conditional constant propagation (CCP) is based on the SSA
23    propagation engine (tree-ssa-propagate.c).  Constant assignments of
24    the form VAR = CST are propagated from the assignments into uses of
25    VAR, which in turn may generate new constants.  The simulation uses
26    a four level lattice to keep track of constant values associated
27    with SSA names.  Given an SSA name V_i, it may take one of the
28    following values:
29 
30 	UNINITIALIZED   ->  the initial state of the value.  This value
31 			    is replaced with a correct initial value
32 			    the first time the value is used, so the
33 			    rest of the pass does not need to care about
34 			    it.  Using this value simplifies initialization
35 			    of the pass, and prevents us from needlessly
36 			    scanning statements that are never reached.
37 
38 	UNDEFINED	->  V_i is a local variable whose definition
39 			    has not been processed yet.  Therefore we
40 			    don't yet know if its value is a constant
41 			    or not.
42 
43 	CONSTANT	->  V_i has been found to hold a constant
44 			    value C.
45 
46 	VARYING		->  V_i cannot take a constant value, or if it
47 			    does, it is not possible to determine it
48 			    at compile time.
49 
50    The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
51 
52    1- In ccp_visit_stmt, we are interested in assignments whose RHS
53       evaluates into a constant and conditional jumps whose predicate
54       evaluates into a boolean true or false.  When an assignment of
55       the form V_i = CONST is found, V_i's lattice value is set to
56       CONSTANT and CONST is associated with it.  This causes the
57       propagation engine to add all the SSA edges coming out the
58       assignment into the worklists, so that statements that use V_i
59       can be visited.
60 
61       If the statement is a conditional with a constant predicate, we
62       mark the outgoing edges as executable or not executable
63       depending on the predicate's value.  This is then used when
64       visiting PHI nodes to know when a PHI argument can be ignored.
65 
66 
67    2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68       same constant C, then the LHS of the PHI is set to C.  This
69       evaluation is known as the "meet operation".  Since one of the
70       goals of this evaluation is to optimistically return constant
71       values as often as possible, it uses two main short cuts:
72 
73       - If an argument is flowing in through a non-executable edge, it
74 	is ignored.  This is useful in cases like this:
75 
76 			if (PRED)
77 			  a_9 = 3;
78 			else
79 			  a_10 = 100;
80 			a_11 = PHI (a_9, a_10)
81 
82 	If PRED is known to always evaluate to false, then we can
83 	assume that a_11 will always take its value from a_10, meaning
84 	that instead of consider it VARYING (a_9 and a_10 have
85 	different values), we can consider it CONSTANT 100.
86 
87       - If an argument has an UNDEFINED value, then it does not affect
88 	the outcome of the meet operation.  If a variable V_i has an
89 	UNDEFINED value, it means that either its defining statement
90 	hasn't been visited yet or V_i has no defining statement, in
91 	which case the original symbol 'V' is being used
92 	uninitialized.  Since 'V' is a local variable, the compiler
93 	may assume any initial value for it.
94 
95 
96    After propagation, every variable V_i that ends up with a lattice
97    value of CONSTANT will have the associated constant value in the
98    array CONST_VAL[i].VALUE.  That is fed into substitute_and_fold for
99    final substitution and folding.
100 
101    This algorithm uses wide-ints at the max precision of the target.
102    This means that, with one uninteresting exception, variables with
103    UNSIGNED types never go to VARYING because the bits above the
104    precision of the type of the variable are always zero.  The
105    uninteresting case is a variable of UNSIGNED type that has the
106    maximum precision of the target.  Such variables can go to VARYING,
107    but this causes no loss of infomation since these variables will
108    never be extended.
109 
110    References:
111 
112      Constant propagation with conditional branches,
113      Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
114 
115      Building an Optimizing Compiler,
116      Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
117 
118      Advanced Compiler Design and Implementation,
119      Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6  */
120 
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "backend.h"
125 #include "target.h"
126 #include "tree.h"
127 #include "gimple.h"
128 #include "tree-pass.h"
129 #include "ssa.h"
130 #include "gimple-pretty-print.h"
131 #include "fold-const.h"
132 #include "gimple-fold.h"
133 #include "tree-eh.h"
134 #include "gimplify.h"
135 #include "gimple-iterator.h"
136 #include "tree-cfg.h"
137 #include "tree-ssa-propagate.h"
138 #include "dbgcnt.h"
139 #include "builtins.h"
140 #include "cfgloop.h"
141 #include "stor-layout.h"
142 #include "optabs-query.h"
143 #include "tree-ssa-ccp.h"
144 #include "tree-dfa.h"
145 #include "diagnostic-core.h"
146 #include "stringpool.h"
147 #include "attribs.h"
148 #include "tree-vector-builder.h"
149 #include "cgraph.h"
150 #include "alloc-pool.h"
151 #include "symbol-summary.h"
152 #include "ipa-utils.h"
153 #include "ipa-prop.h"
154 
155 /* Possible lattice values.  */
156 typedef enum
157 {
158   UNINITIALIZED,
159   UNDEFINED,
160   CONSTANT,
161   VARYING
162 } ccp_lattice_t;
163 
164 class ccp_prop_value_t {
165 public:
166     /* Lattice value.  */
167     ccp_lattice_t lattice_val;
168 
169     /* Propagated value.  */
170     tree value;
171 
172     /* Mask that applies to the propagated value during CCP.  For X
173        with a CONSTANT lattice value X & ~mask == value & ~mask.  The
174        zero bits in the mask cover constant values.  The ones mean no
175        information.  */
176     widest_int mask;
177 };
178 
179 class ccp_propagate : public ssa_propagation_engine
180 {
181  public:
182   enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
183   enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
184 };
185 
186 /* Array of propagated constant values.  After propagation,
187    CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I).  If
188    the constant is held in an SSA name representing a memory store
189    (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
190    memory reference used to store (i.e., the LHS of the assignment
191    doing the store).  */
192 static ccp_prop_value_t *const_val;
193 static unsigned n_const_val;
194 
195 static void canonicalize_value (ccp_prop_value_t *);
196 static void ccp_lattice_meet (ccp_prop_value_t *, ccp_prop_value_t *);
197 
198 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX.  */
199 
200 static void
201 dump_lattice_value (FILE *outf, const char *prefix, ccp_prop_value_t val)
202 {
203   switch (val.lattice_val)
204     {
205     case UNINITIALIZED:
206       fprintf (outf, "%sUNINITIALIZED", prefix);
207       break;
208     case UNDEFINED:
209       fprintf (outf, "%sUNDEFINED", prefix);
210       break;
211     case VARYING:
212       fprintf (outf, "%sVARYING", prefix);
213       break;
214     case CONSTANT:
215       if (TREE_CODE (val.value) != INTEGER_CST
216 	  || val.mask == 0)
217 	{
218 	  fprintf (outf, "%sCONSTANT ", prefix);
219 	  print_generic_expr (outf, val.value, dump_flags);
220 	}
221       else
222 	{
223 	  widest_int cval = wi::bit_and_not (wi::to_widest (val.value),
224 					     val.mask);
225 	  fprintf (outf, "%sCONSTANT ", prefix);
226 	  print_hex (cval, outf);
227 	  fprintf (outf, " (");
228 	  print_hex (val.mask, outf);
229 	  fprintf (outf, ")");
230 	}
231       break;
232     default:
233       gcc_unreachable ();
234     }
235 }
236 
237 
238 /* Print lattice value VAL to stderr.  */
239 
240 void debug_lattice_value (ccp_prop_value_t val);
241 
242 DEBUG_FUNCTION void
243 debug_lattice_value (ccp_prop_value_t val)
244 {
245   dump_lattice_value (stderr, "", val);
246   fprintf (stderr, "\n");
247 }
248 
249 /* Extend NONZERO_BITS to a full mask, based on sgn.  */
250 
251 static widest_int
252 extend_mask (const wide_int &nonzero_bits, signop sgn)
253 {
254   return widest_int::from (nonzero_bits, sgn);
255 }
256 
257 /* Compute a default value for variable VAR and store it in the
258    CONST_VAL array.  The following rules are used to get default
259    values:
260 
261    1- Global and static variables that are declared constant are
262       considered CONSTANT.
263 
264    2- Any other value is considered UNDEFINED.  This is useful when
265       considering PHI nodes.  PHI arguments that are undefined do not
266       change the constant value of the PHI node, which allows for more
267       constants to be propagated.
268 
269    3- Variables defined by statements other than assignments and PHI
270       nodes are considered VARYING.
271 
272    4- Initial values of variables that are not GIMPLE registers are
273       considered VARYING.  */
274 
275 static ccp_prop_value_t
276 get_default_value (tree var)
277 {
278   ccp_prop_value_t val = { UNINITIALIZED, NULL_TREE, 0 };
279   gimple *stmt;
280 
281   stmt = SSA_NAME_DEF_STMT (var);
282 
283   if (gimple_nop_p (stmt))
284     {
285       /* Variables defined by an empty statement are those used
286 	 before being initialized.  If VAR is a local variable, we
287 	 can assume initially that it is UNDEFINED, otherwise we must
288 	 consider it VARYING.  */
289       if (!virtual_operand_p (var)
290 	  && SSA_NAME_VAR (var)
291 	  && TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL)
292 	val.lattice_val = UNDEFINED;
293       else
294 	{
295 	  val.lattice_val = VARYING;
296 	  val.mask = -1;
297 	  if (flag_tree_bit_ccp)
298 	    {
299 	      wide_int nonzero_bits = get_nonzero_bits (var);
300 	      tree value;
301 	      widest_int mask;
302 
303 	      if (SSA_NAME_VAR (var)
304 		  && TREE_CODE (SSA_NAME_VAR (var)) == PARM_DECL
305 		  && ipcp_get_parm_bits (SSA_NAME_VAR (var), &value, &mask))
306 		{
307 		  val.lattice_val = CONSTANT;
308 		  val.value = value;
309 		  widest_int ipa_value = wi::to_widest (value);
310 		  /* Unknown bits from IPA CP must be equal to zero.  */
311 		  gcc_assert (wi::bit_and (ipa_value, mask) == 0);
312 		  val.mask = mask;
313 		  if (nonzero_bits != -1)
314 		    val.mask &= extend_mask (nonzero_bits,
315 					     TYPE_SIGN (TREE_TYPE (var)));
316 		}
317 	      else if (nonzero_bits != -1)
318 		{
319 		  val.lattice_val = CONSTANT;
320 		  val.value = build_zero_cst (TREE_TYPE (var));
321 		  val.mask = extend_mask (nonzero_bits,
322 					  TYPE_SIGN (TREE_TYPE (var)));
323 		}
324 	    }
325 	}
326     }
327   else if (is_gimple_assign (stmt))
328     {
329       tree cst;
330       if (gimple_assign_single_p (stmt)
331 	  && DECL_P (gimple_assign_rhs1 (stmt))
332 	  && (cst = get_symbol_constant_value (gimple_assign_rhs1 (stmt))))
333 	{
334 	  val.lattice_val = CONSTANT;
335 	  val.value = cst;
336 	}
337       else
338 	{
339 	  /* Any other variable defined by an assignment is considered
340 	     UNDEFINED.  */
341 	  val.lattice_val = UNDEFINED;
342 	}
343     }
344   else if ((is_gimple_call (stmt)
345 	    && gimple_call_lhs (stmt) != NULL_TREE)
346 	   || gimple_code (stmt) == GIMPLE_PHI)
347     {
348       /* A variable defined by a call or a PHI node is considered
349 	 UNDEFINED.  */
350       val.lattice_val = UNDEFINED;
351     }
352   else
353     {
354       /* Otherwise, VAR will never take on a constant value.  */
355       val.lattice_val = VARYING;
356       val.mask = -1;
357     }
358 
359   return val;
360 }
361 
362 
363 /* Get the constant value associated with variable VAR.  */
364 
365 static inline ccp_prop_value_t *
366 get_value (tree var)
367 {
368   ccp_prop_value_t *val;
369 
370   if (const_val == NULL
371       || SSA_NAME_VERSION (var) >= n_const_val)
372     return NULL;
373 
374   val = &const_val[SSA_NAME_VERSION (var)];
375   if (val->lattice_val == UNINITIALIZED)
376     *val = get_default_value (var);
377 
378   canonicalize_value (val);
379 
380   return val;
381 }
382 
383 /* Return the constant tree value associated with VAR.  */
384 
385 static inline tree
386 get_constant_value (tree var)
387 {
388   ccp_prop_value_t *val;
389   if (TREE_CODE (var) != SSA_NAME)
390     {
391       if (is_gimple_min_invariant (var))
392         return var;
393       return NULL_TREE;
394     }
395   val = get_value (var);
396   if (val
397       && val->lattice_val == CONSTANT
398       && (TREE_CODE (val->value) != INTEGER_CST
399 	  || val->mask == 0))
400     return val->value;
401   return NULL_TREE;
402 }
403 
404 /* Sets the value associated with VAR to VARYING.  */
405 
406 static inline void
407 set_value_varying (tree var)
408 {
409   ccp_prop_value_t *val = &const_val[SSA_NAME_VERSION (var)];
410 
411   val->lattice_val = VARYING;
412   val->value = NULL_TREE;
413   val->mask = -1;
414 }
415 
416 /* For integer constants, make sure to drop TREE_OVERFLOW.  */
417 
418 static void
419 canonicalize_value (ccp_prop_value_t *val)
420 {
421   if (val->lattice_val != CONSTANT)
422     return;
423 
424   if (TREE_OVERFLOW_P (val->value))
425     val->value = drop_tree_overflow (val->value);
426 }
427 
428 /* Return whether the lattice transition is valid.  */
429 
430 static bool
431 valid_lattice_transition (ccp_prop_value_t old_val, ccp_prop_value_t new_val)
432 {
433   /* Lattice transitions must always be monotonically increasing in
434      value.  */
435   if (old_val.lattice_val < new_val.lattice_val)
436     return true;
437 
438   if (old_val.lattice_val != new_val.lattice_val)
439     return false;
440 
441   if (!old_val.value && !new_val.value)
442     return true;
443 
444   /* Now both lattice values are CONSTANT.  */
445 
446   /* Allow arbitrary copy changes as we might look through PHI <a_1, ...>
447      when only a single copy edge is executable.  */
448   if (TREE_CODE (old_val.value) == SSA_NAME
449       && TREE_CODE (new_val.value) == SSA_NAME)
450     return true;
451 
452   /* Allow transitioning from a constant to a copy.  */
453   if (is_gimple_min_invariant (old_val.value)
454       && TREE_CODE (new_val.value) == SSA_NAME)
455     return true;
456 
457   /* Allow transitioning from PHI <&x, not executable> == &x
458      to PHI <&x, &y> == common alignment.  */
459   if (TREE_CODE (old_val.value) != INTEGER_CST
460       && TREE_CODE (new_val.value) == INTEGER_CST)
461     return true;
462 
463   /* Bit-lattices have to agree in the still valid bits.  */
464   if (TREE_CODE (old_val.value) == INTEGER_CST
465       && TREE_CODE (new_val.value) == INTEGER_CST)
466     return (wi::bit_and_not (wi::to_widest (old_val.value), new_val.mask)
467 	    == wi::bit_and_not (wi::to_widest (new_val.value), new_val.mask));
468 
469   /* Otherwise constant values have to agree.  */
470   if (operand_equal_p (old_val.value, new_val.value, 0))
471     return true;
472 
473   /* At least the kinds and types should agree now.  */
474   if (TREE_CODE (old_val.value) != TREE_CODE (new_val.value)
475       || !types_compatible_p (TREE_TYPE (old_val.value),
476 			      TREE_TYPE (new_val.value)))
477     return false;
478 
479   /* For floats and !HONOR_NANS allow transitions from (partial) NaN
480      to non-NaN.  */
481   tree type = TREE_TYPE (new_val.value);
482   if (SCALAR_FLOAT_TYPE_P (type)
483       && !HONOR_NANS (type))
484     {
485       if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val.value)))
486 	return true;
487     }
488   else if (VECTOR_FLOAT_TYPE_P (type)
489 	   && !HONOR_NANS (type))
490     {
491       unsigned int count
492 	= tree_vector_builder::binary_encoded_nelts (old_val.value,
493 						     new_val.value);
494       for (unsigned int i = 0; i < count; ++i)
495 	if (!REAL_VALUE_ISNAN
496 	       (TREE_REAL_CST (VECTOR_CST_ENCODED_ELT (old_val.value, i)))
497 	    && !operand_equal_p (VECTOR_CST_ENCODED_ELT (old_val.value, i),
498 				 VECTOR_CST_ENCODED_ELT (new_val.value, i), 0))
499 	  return false;
500       return true;
501     }
502   else if (COMPLEX_FLOAT_TYPE_P (type)
503 	   && !HONOR_NANS (type))
504     {
505       if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val.value)))
506 	  && !operand_equal_p (TREE_REALPART (old_val.value),
507 			       TREE_REALPART (new_val.value), 0))
508 	return false;
509       if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val.value)))
510 	  && !operand_equal_p (TREE_IMAGPART (old_val.value),
511 			       TREE_IMAGPART (new_val.value), 0))
512 	return false;
513       return true;
514     }
515   return false;
516 }
517 
518 /* Set the value for variable VAR to NEW_VAL.  Return true if the new
519    value is different from VAR's previous value.  */
520 
521 static bool
522 set_lattice_value (tree var, ccp_prop_value_t *new_val)
523 {
524   /* We can deal with old UNINITIALIZED values just fine here.  */
525   ccp_prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)];
526 
527   canonicalize_value (new_val);
528 
529   /* We have to be careful to not go up the bitwise lattice
530      represented by the mask.  Instead of dropping to VARYING
531      use the meet operator to retain a conservative value.
532      Missed optimizations like PR65851 makes this necessary.
533      It also ensures we converge to a stable lattice solution.  */
534   if (old_val->lattice_val != UNINITIALIZED)
535     ccp_lattice_meet (new_val, old_val);
536 
537   gcc_checking_assert (valid_lattice_transition (*old_val, *new_val));
538 
539   /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
540      caller that this was a non-transition.  */
541   if (old_val->lattice_val != new_val->lattice_val
542       || (new_val->lattice_val == CONSTANT
543 	  && (TREE_CODE (new_val->value) != TREE_CODE (old_val->value)
544 	      || (TREE_CODE (new_val->value) == INTEGER_CST
545 		  && (new_val->mask != old_val->mask
546 		      || (wi::bit_and_not (wi::to_widest (old_val->value),
547 					   new_val->mask)
548 			  != wi::bit_and_not (wi::to_widest (new_val->value),
549 					      new_val->mask))))
550 	      || (TREE_CODE (new_val->value) != INTEGER_CST
551 		  && !operand_equal_p (new_val->value, old_val->value, 0)))))
552     {
553       /* ???  We would like to delay creation of INTEGER_CSTs from
554 	 partially constants here.  */
555 
556       if (dump_file && (dump_flags & TDF_DETAILS))
557 	{
558 	  dump_lattice_value (dump_file, "Lattice value changed to ", *new_val);
559 	  fprintf (dump_file, ".  Adding SSA edges to worklist.\n");
560 	}
561 
562       *old_val = *new_val;
563 
564       gcc_assert (new_val->lattice_val != UNINITIALIZED);
565       return true;
566     }
567 
568   return false;
569 }
570 
571 static ccp_prop_value_t get_value_for_expr (tree, bool);
572 static ccp_prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
573 void bit_value_binop (enum tree_code, signop, int, widest_int *, widest_int *,
574 		      signop, int, const widest_int &, const widest_int &,
575 		      signop, int, const widest_int &, const widest_int &);
576 
577 /* Return a widest_int that can be used for bitwise simplifications
578    from VAL.  */
579 
580 static widest_int
581 value_to_wide_int (ccp_prop_value_t val)
582 {
583   if (val.value
584       && TREE_CODE (val.value) == INTEGER_CST)
585     return wi::to_widest (val.value);
586 
587   return 0;
588 }
589 
590 /* Return the value for the address expression EXPR based on alignment
591    information.  */
592 
593 static ccp_prop_value_t
594 get_value_from_alignment (tree expr)
595 {
596   tree type = TREE_TYPE (expr);
597   ccp_prop_value_t val;
598   unsigned HOST_WIDE_INT bitpos;
599   unsigned int align;
600 
601   gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
602 
603   get_pointer_alignment_1 (expr, &align, &bitpos);
604   val.mask = wi::bit_and_not
605     (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
606      ? wi::mask <widest_int> (TYPE_PRECISION (type), false)
607      : -1,
608      align / BITS_PER_UNIT - 1);
609   val.lattice_val
610     = wi::sext (val.mask, TYPE_PRECISION (type)) == -1 ? VARYING : CONSTANT;
611   if (val.lattice_val == CONSTANT)
612     val.value = build_int_cstu (type, bitpos / BITS_PER_UNIT);
613   else
614     val.value = NULL_TREE;
615 
616   return val;
617 }
618 
619 /* Return the value for the tree operand EXPR.  If FOR_BITS_P is true
620    return constant bits extracted from alignment information for
621    invariant addresses.  */
622 
623 static ccp_prop_value_t
624 get_value_for_expr (tree expr, bool for_bits_p)
625 {
626   ccp_prop_value_t val;
627 
628   if (TREE_CODE (expr) == SSA_NAME)
629     {
630       ccp_prop_value_t *val_ = get_value (expr);
631       if (val_)
632 	val = *val_;
633       else
634 	{
635 	  val.lattice_val = VARYING;
636 	  val.value = NULL_TREE;
637 	  val.mask = -1;
638 	}
639       if (for_bits_p
640 	  && val.lattice_val == CONSTANT)
641 	{
642 	  if (TREE_CODE (val.value) == ADDR_EXPR)
643 	    val = get_value_from_alignment (val.value);
644 	  else if (TREE_CODE (val.value) != INTEGER_CST)
645 	    {
646 	      val.lattice_val = VARYING;
647 	      val.value = NULL_TREE;
648 	      val.mask = -1;
649 	    }
650 	}
651       /* Fall back to a copy value.  */
652       if (!for_bits_p
653 	  && val.lattice_val == VARYING
654 	  && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr))
655 	{
656 	  val.lattice_val = CONSTANT;
657 	  val.value = expr;
658 	  val.mask = -1;
659 	}
660     }
661   else if (is_gimple_min_invariant (expr)
662 	   && (!for_bits_p || TREE_CODE (expr) == INTEGER_CST))
663     {
664       val.lattice_val = CONSTANT;
665       val.value = expr;
666       val.mask = 0;
667       canonicalize_value (&val);
668     }
669   else if (TREE_CODE (expr) == ADDR_EXPR)
670     val = get_value_from_alignment (expr);
671   else
672     {
673       val.lattice_val = VARYING;
674       val.mask = -1;
675       val.value = NULL_TREE;
676     }
677 
678   if (val.lattice_val == VARYING
679       && TYPE_UNSIGNED (TREE_TYPE (expr)))
680     val.mask = wi::zext (val.mask, TYPE_PRECISION (TREE_TYPE (expr)));
681 
682   return val;
683 }
684 
685 /* Return the likely CCP lattice value for STMT.
686 
687    If STMT has no operands, then return CONSTANT.
688 
689    Else if undefinedness of operands of STMT cause its value to be
690    undefined, then return UNDEFINED.
691 
692    Else if any operands of STMT are constants, then return CONSTANT.
693 
694    Else return VARYING.  */
695 
696 static ccp_lattice_t
697 likely_value (gimple *stmt)
698 {
699   bool has_constant_operand, has_undefined_operand, all_undefined_operands;
700   bool has_nsa_operand;
701   tree use;
702   ssa_op_iter iter;
703   unsigned i;
704 
705   enum gimple_code code = gimple_code (stmt);
706 
707   /* This function appears to be called only for assignments, calls,
708      conditionals, and switches, due to the logic in visit_stmt.  */
709   gcc_assert (code == GIMPLE_ASSIGN
710               || code == GIMPLE_CALL
711               || code == GIMPLE_COND
712               || code == GIMPLE_SWITCH);
713 
714   /* If the statement has volatile operands, it won't fold to a
715      constant value.  */
716   if (gimple_has_volatile_ops (stmt))
717     return VARYING;
718 
719   /* Arrive here for more complex cases.  */
720   has_constant_operand = false;
721   has_undefined_operand = false;
722   all_undefined_operands = true;
723   has_nsa_operand = false;
724   FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
725     {
726       ccp_prop_value_t *val = get_value (use);
727 
728       if (val && val->lattice_val == UNDEFINED)
729 	has_undefined_operand = true;
730       else
731 	all_undefined_operands = false;
732 
733       if (val && val->lattice_val == CONSTANT)
734 	has_constant_operand = true;
735 
736       if (SSA_NAME_IS_DEFAULT_DEF (use)
737 	  || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use)))
738 	has_nsa_operand = true;
739     }
740 
741   /* There may be constants in regular rhs operands.  For calls we
742      have to ignore lhs, fndecl and static chain, otherwise only
743      the lhs.  */
744   for (i = (is_gimple_call (stmt) ? 2 : 0) + gimple_has_lhs (stmt);
745        i < gimple_num_ops (stmt); ++i)
746     {
747       tree op = gimple_op (stmt, i);
748       if (!op || TREE_CODE (op) == SSA_NAME)
749 	continue;
750       if (is_gimple_min_invariant (op))
751 	has_constant_operand = true;
752     }
753 
754   if (has_constant_operand)
755     all_undefined_operands = false;
756 
757   if (has_undefined_operand
758       && code == GIMPLE_CALL
759       && gimple_call_internal_p (stmt))
760     switch (gimple_call_internal_fn (stmt))
761       {
762 	/* These 3 builtins use the first argument just as a magic
763 	   way how to find out a decl uid.  */
764       case IFN_GOMP_SIMD_LANE:
765       case IFN_GOMP_SIMD_VF:
766       case IFN_GOMP_SIMD_LAST_LANE:
767 	has_undefined_operand = false;
768 	break;
769       default:
770 	break;
771       }
772 
773   /* If the operation combines operands like COMPLEX_EXPR make sure to
774      not mark the result UNDEFINED if only one part of the result is
775      undefined.  */
776   if (has_undefined_operand && all_undefined_operands)
777     return UNDEFINED;
778   else if (code == GIMPLE_ASSIGN && has_undefined_operand)
779     {
780       switch (gimple_assign_rhs_code (stmt))
781 	{
782 	/* Unary operators are handled with all_undefined_operands.  */
783 	case PLUS_EXPR:
784 	case MINUS_EXPR:
785 	case POINTER_PLUS_EXPR:
786 	case BIT_XOR_EXPR:
787 	  /* Not MIN_EXPR, MAX_EXPR.  One VARYING operand may be selected.
788 	     Not bitwise operators, one VARYING operand may specify the
789 	     result completely.
790 	     Not logical operators for the same reason, apart from XOR.
791 	     Not COMPLEX_EXPR as one VARYING operand makes the result partly
792 	     not UNDEFINED.  Not *DIV_EXPR, comparisons and shifts because
793 	     the undefined operand may be promoted.  */
794 	  return UNDEFINED;
795 
796 	case ADDR_EXPR:
797 	  /* If any part of an address is UNDEFINED, like the index
798 	     of an ARRAY_EXPR, then treat the result as UNDEFINED.  */
799 	  return UNDEFINED;
800 
801 	default:
802 	  ;
803 	}
804     }
805   /* If there was an UNDEFINED operand but the result may be not UNDEFINED
806      fall back to CONSTANT.  During iteration UNDEFINED may still drop
807      to CONSTANT.  */
808   if (has_undefined_operand)
809     return CONSTANT;
810 
811   /* We do not consider virtual operands here -- load from read-only
812      memory may have only VARYING virtual operands, but still be
813      constant.  Also we can combine the stmt with definitions from
814      operands whose definitions are not simulated again.  */
815   if (has_constant_operand
816       || has_nsa_operand
817       || gimple_references_memory_p (stmt))
818     return CONSTANT;
819 
820   return VARYING;
821 }
822 
823 /* Returns true if STMT cannot be constant.  */
824 
825 static bool
826 surely_varying_stmt_p (gimple *stmt)
827 {
828   /* If the statement has operands that we cannot handle, it cannot be
829      constant.  */
830   if (gimple_has_volatile_ops (stmt))
831     return true;
832 
833   /* If it is a call and does not return a value or is not a
834      builtin and not an indirect call or a call to function with
835      assume_aligned/alloc_align attribute, it is varying.  */
836   if (is_gimple_call (stmt))
837     {
838       tree fndecl, fntype = gimple_call_fntype (stmt);
839       if (!gimple_call_lhs (stmt)
840 	  || ((fndecl = gimple_call_fndecl (stmt)) != NULL_TREE
841 	      && !fndecl_built_in_p (fndecl)
842 	      && !lookup_attribute ("assume_aligned",
843 				    TYPE_ATTRIBUTES (fntype))
844 	      && !lookup_attribute ("alloc_align",
845 				    TYPE_ATTRIBUTES (fntype))))
846 	return true;
847     }
848 
849   /* Any other store operation is not interesting.  */
850   else if (gimple_vdef (stmt))
851     return true;
852 
853   /* Anything other than assignments and conditional jumps are not
854      interesting for CCP.  */
855   if (gimple_code (stmt) != GIMPLE_ASSIGN
856       && gimple_code (stmt) != GIMPLE_COND
857       && gimple_code (stmt) != GIMPLE_SWITCH
858       && gimple_code (stmt) != GIMPLE_CALL)
859     return true;
860 
861   return false;
862 }
863 
864 /* Initialize local data structures for CCP.  */
865 
866 static void
867 ccp_initialize (void)
868 {
869   basic_block bb;
870 
871   n_const_val = num_ssa_names;
872   const_val = XCNEWVEC (ccp_prop_value_t, n_const_val);
873 
874   /* Initialize simulation flags for PHI nodes and statements.  */
875   FOR_EACH_BB_FN (bb, cfun)
876     {
877       gimple_stmt_iterator i;
878 
879       for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
880         {
881 	  gimple *stmt = gsi_stmt (i);
882 	  bool is_varying;
883 
884 	  /* If the statement is a control insn, then we do not
885 	     want to avoid simulating the statement once.  Failure
886 	     to do so means that those edges will never get added.  */
887 	  if (stmt_ends_bb_p (stmt))
888 	    is_varying = false;
889 	  else
890 	    is_varying = surely_varying_stmt_p (stmt);
891 
892 	  if (is_varying)
893 	    {
894 	      tree def;
895 	      ssa_op_iter iter;
896 
897 	      /* If the statement will not produce a constant, mark
898 		 all its outputs VARYING.  */
899 	      FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
900 		set_value_varying (def);
901 	    }
902           prop_set_simulate_again (stmt, !is_varying);
903 	}
904     }
905 
906   /* Now process PHI nodes.  We never clear the simulate_again flag on
907      phi nodes, since we do not know which edges are executable yet,
908      except for phi nodes for virtual operands when we do not do store ccp.  */
909   FOR_EACH_BB_FN (bb, cfun)
910     {
911       gphi_iterator i;
912 
913       for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i))
914         {
915           gphi *phi = i.phi ();
916 
917 	  if (virtual_operand_p (gimple_phi_result (phi)))
918             prop_set_simulate_again (phi, false);
919 	  else
920             prop_set_simulate_again (phi, true);
921 	}
922     }
923 }
924 
925 /* Debug count support. Reset the values of ssa names
926    VARYING when the total number ssa names analyzed is
927    beyond the debug count specified.  */
928 
929 static void
930 do_dbg_cnt (void)
931 {
932   unsigned i;
933   for (i = 0; i < num_ssa_names; i++)
934     {
935       if (!dbg_cnt (ccp))
936         {
937           const_val[i].lattice_val = VARYING;
938 	  const_val[i].mask = -1;
939           const_val[i].value = NULL_TREE;
940         }
941     }
942 }
943 
944 
945 /* We want to provide our own GET_VALUE and FOLD_STMT virtual methods.  */
946 class ccp_folder : public substitute_and_fold_engine
947 {
948  public:
949   tree get_value (tree) FINAL OVERRIDE;
950   bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
951 };
952 
953 /* This method just wraps GET_CONSTANT_VALUE for now.  Over time
954    naked calls to GET_CONSTANT_VALUE should be eliminated in favor
955    of calling member functions.  */
956 
957 tree
958 ccp_folder::get_value (tree op)
959 {
960   return get_constant_value (op);
961 }
962 
963 /* Do final substitution of propagated values, cleanup the flowgraph and
964    free allocated storage.  If NONZERO_P, record nonzero bits.
965 
966    Return TRUE when something was optimized.  */
967 
968 static bool
969 ccp_finalize (bool nonzero_p)
970 {
971   bool something_changed;
972   unsigned i;
973   tree name;
974 
975   do_dbg_cnt ();
976 
977   /* Derive alignment and misalignment information from partially
978      constant pointers in the lattice or nonzero bits from partially
979      constant integers.  */
980   FOR_EACH_SSA_NAME (i, name, cfun)
981     {
982       ccp_prop_value_t *val;
983       unsigned int tem, align;
984 
985       if (!POINTER_TYPE_P (TREE_TYPE (name))
986 	  && (!INTEGRAL_TYPE_P (TREE_TYPE (name))
987 	      /* Don't record nonzero bits before IPA to avoid
988 		 using too much memory.  */
989 	      || !nonzero_p))
990 	continue;
991 
992       val = get_value (name);
993       if (val->lattice_val != CONSTANT
994 	  || TREE_CODE (val->value) != INTEGER_CST
995 	  || val->mask == 0)
996 	continue;
997 
998       if (POINTER_TYPE_P (TREE_TYPE (name)))
999 	{
1000 	  /* Trailing mask bits specify the alignment, trailing value
1001 	     bits the misalignment.  */
1002 	  tem = val->mask.to_uhwi ();
1003 	  align = least_bit_hwi (tem);
1004 	  if (align > 1)
1005 	    set_ptr_info_alignment (get_ptr_info (name), align,
1006 				    (TREE_INT_CST_LOW (val->value)
1007 				     & (align - 1)));
1008 	}
1009       else
1010 	{
1011 	  unsigned int precision = TYPE_PRECISION (TREE_TYPE (val->value));
1012 	  wide_int nonzero_bits
1013 	    = (wide_int::from (val->mask, precision, UNSIGNED)
1014 	       | wi::to_wide (val->value));
1015 	  nonzero_bits &= get_nonzero_bits (name);
1016 	  set_nonzero_bits (name, nonzero_bits);
1017 	}
1018     }
1019 
1020   /* Perform substitutions based on the known constant values.  */
1021   class ccp_folder ccp_folder;
1022   something_changed = ccp_folder.substitute_and_fold ();
1023 
1024   free (const_val);
1025   const_val = NULL;
1026   return something_changed;
1027 }
1028 
1029 
1030 /* Compute the meet operator between *VAL1 and *VAL2.  Store the result
1031    in VAL1.
1032 
1033    		any  M UNDEFINED   = any
1034 		any  M VARYING     = VARYING
1035 		Ci   M Cj	   = Ci		if (i == j)
1036 		Ci   M Cj	   = VARYING	if (i != j)
1037    */
1038 
1039 static void
1040 ccp_lattice_meet (ccp_prop_value_t *val1, ccp_prop_value_t *val2)
1041 {
1042   if (val1->lattice_val == UNDEFINED
1043       /* For UNDEFINED M SSA we can't always SSA because its definition
1044          may not dominate the PHI node.  Doing optimistic copy propagation
1045 	 also causes a lot of gcc.dg/uninit-pred*.c FAILs.  */
1046       && (val2->lattice_val != CONSTANT
1047 	  || TREE_CODE (val2->value) != SSA_NAME))
1048     {
1049       /* UNDEFINED M any = any   */
1050       *val1 = *val2;
1051     }
1052   else if (val2->lattice_val == UNDEFINED
1053 	   /* See above.  */
1054 	   && (val1->lattice_val != CONSTANT
1055 	       || TREE_CODE (val1->value) != SSA_NAME))
1056     {
1057       /* any M UNDEFINED = any
1058          Nothing to do.  VAL1 already contains the value we want.  */
1059       ;
1060     }
1061   else if (val1->lattice_val == VARYING
1062            || val2->lattice_val == VARYING)
1063     {
1064       /* any M VARYING = VARYING.  */
1065       val1->lattice_val = VARYING;
1066       val1->mask = -1;
1067       val1->value = NULL_TREE;
1068     }
1069   else if (val1->lattice_val == CONSTANT
1070 	   && val2->lattice_val == CONSTANT
1071 	   && TREE_CODE (val1->value) == INTEGER_CST
1072 	   && TREE_CODE (val2->value) == INTEGER_CST)
1073     {
1074       /* Ci M Cj = Ci		if (i == j)
1075 	 Ci M Cj = VARYING	if (i != j)
1076 
1077          For INTEGER_CSTs mask unequal bits.  If no equal bits remain,
1078 	 drop to varying.  */
1079       val1->mask = (val1->mask | val2->mask
1080 		    | (wi::to_widest (val1->value)
1081 		       ^ wi::to_widest (val2->value)));
1082       if (wi::sext (val1->mask, TYPE_PRECISION (TREE_TYPE (val1->value))) == -1)
1083 	{
1084 	  val1->lattice_val = VARYING;
1085 	  val1->value = NULL_TREE;
1086 	}
1087     }
1088   else if (val1->lattice_val == CONSTANT
1089 	   && val2->lattice_val == CONSTANT
1090 	   && operand_equal_p (val1->value, val2->value, 0))
1091     {
1092       /* Ci M Cj = Ci		if (i == j)
1093 	 Ci M Cj = VARYING	if (i != j)
1094 
1095          VAL1 already contains the value we want for equivalent values.  */
1096     }
1097   else if (val1->lattice_val == CONSTANT
1098 	   && val2->lattice_val == CONSTANT
1099 	   && (TREE_CODE (val1->value) == ADDR_EXPR
1100 	       || TREE_CODE (val2->value) == ADDR_EXPR))
1101     {
1102       /* When not equal addresses are involved try meeting for
1103 	 alignment.  */
1104       ccp_prop_value_t tem = *val2;
1105       if (TREE_CODE (val1->value) == ADDR_EXPR)
1106 	*val1 = get_value_for_expr (val1->value, true);
1107       if (TREE_CODE (val2->value) == ADDR_EXPR)
1108 	tem = get_value_for_expr (val2->value, true);
1109       ccp_lattice_meet (val1, &tem);
1110     }
1111   else
1112     {
1113       /* Any other combination is VARYING.  */
1114       val1->lattice_val = VARYING;
1115       val1->mask = -1;
1116       val1->value = NULL_TREE;
1117     }
1118 }
1119 
1120 
1121 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
1122    lattice values to determine PHI_NODE's lattice value.  The value of a
1123    PHI node is determined calling ccp_lattice_meet with all the arguments
1124    of the PHI node that are incoming via executable edges.  */
1125 
1126 enum ssa_prop_result
1127 ccp_propagate::visit_phi (gphi *phi)
1128 {
1129   unsigned i;
1130   ccp_prop_value_t new_val;
1131 
1132   if (dump_file && (dump_flags & TDF_DETAILS))
1133     {
1134       fprintf (dump_file, "\nVisiting PHI node: ");
1135       print_gimple_stmt (dump_file, phi, 0, dump_flags);
1136     }
1137 
1138   new_val.lattice_val = UNDEFINED;
1139   new_val.value = NULL_TREE;
1140   new_val.mask = 0;
1141 
1142   bool first = true;
1143   bool non_exec_edge = false;
1144   for (i = 0; i < gimple_phi_num_args (phi); i++)
1145     {
1146       /* Compute the meet operator over all the PHI arguments flowing
1147 	 through executable edges.  */
1148       edge e = gimple_phi_arg_edge (phi, i);
1149 
1150       if (dump_file && (dump_flags & TDF_DETAILS))
1151 	{
1152 	  fprintf (dump_file,
1153 	      "\tArgument #%d (%d -> %d %sexecutable)\n",
1154 	      i, e->src->index, e->dest->index,
1155 	      (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
1156 	}
1157 
1158       /* If the incoming edge is executable, Compute the meet operator for
1159 	 the existing value of the PHI node and the current PHI argument.  */
1160       if (e->flags & EDGE_EXECUTABLE)
1161 	{
1162 	  tree arg = gimple_phi_arg (phi, i)->def;
1163 	  ccp_prop_value_t arg_val = get_value_for_expr (arg, false);
1164 
1165 	  if (first)
1166 	    {
1167 	      new_val = arg_val;
1168 	      first = false;
1169 	    }
1170 	  else
1171 	    ccp_lattice_meet (&new_val, &arg_val);
1172 
1173 	  if (dump_file && (dump_flags & TDF_DETAILS))
1174 	    {
1175 	      fprintf (dump_file, "\t");
1176 	      print_generic_expr (dump_file, arg, dump_flags);
1177 	      dump_lattice_value (dump_file, "\tValue: ", arg_val);
1178 	      fprintf (dump_file, "\n");
1179 	    }
1180 
1181 	  if (new_val.lattice_val == VARYING)
1182 	    break;
1183 	}
1184       else
1185 	non_exec_edge = true;
1186     }
1187 
1188   /* In case there were non-executable edges and the value is a copy
1189      make sure its definition dominates the PHI node.  */
1190   if (non_exec_edge
1191       && new_val.lattice_val == CONSTANT
1192       && TREE_CODE (new_val.value) == SSA_NAME
1193       && ! SSA_NAME_IS_DEFAULT_DEF (new_val.value)
1194       && ! dominated_by_p (CDI_DOMINATORS, gimple_bb (phi),
1195 			   gimple_bb (SSA_NAME_DEF_STMT (new_val.value))))
1196     {
1197       new_val.lattice_val = VARYING;
1198       new_val.value = NULL_TREE;
1199       new_val.mask = -1;
1200     }
1201 
1202   if (dump_file && (dump_flags & TDF_DETAILS))
1203     {
1204       dump_lattice_value (dump_file, "\n    PHI node value: ", new_val);
1205       fprintf (dump_file, "\n\n");
1206     }
1207 
1208   /* Make the transition to the new value.  */
1209   if (set_lattice_value (gimple_phi_result (phi), &new_val))
1210     {
1211       if (new_val.lattice_val == VARYING)
1212 	return SSA_PROP_VARYING;
1213       else
1214 	return SSA_PROP_INTERESTING;
1215     }
1216   else
1217     return SSA_PROP_NOT_INTERESTING;
1218 }
1219 
1220 /* Return the constant value for OP or OP otherwise.  */
1221 
1222 static tree
1223 valueize_op (tree op)
1224 {
1225   if (TREE_CODE (op) == SSA_NAME)
1226     {
1227       tree tem = get_constant_value (op);
1228       if (tem)
1229 	return tem;
1230     }
1231   return op;
1232 }
1233 
1234 /* Return the constant value for OP, but signal to not follow SSA
1235    edges if the definition may be simulated again.  */
1236 
1237 static tree
1238 valueize_op_1 (tree op)
1239 {
1240   if (TREE_CODE (op) == SSA_NAME)
1241     {
1242       /* If the definition may be simulated again we cannot follow
1243          this SSA edge as the SSA propagator does not necessarily
1244 	 re-visit the use.  */
1245       gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1246       if (!gimple_nop_p (def_stmt)
1247 	  && prop_simulate_again_p (def_stmt))
1248 	return NULL_TREE;
1249       tree tem = get_constant_value (op);
1250       if (tem)
1251 	return tem;
1252     }
1253   return op;
1254 }
1255 
1256 /* CCP specific front-end to the non-destructive constant folding
1257    routines.
1258 
1259    Attempt to simplify the RHS of STMT knowing that one or more
1260    operands are constants.
1261 
1262    If simplification is possible, return the simplified RHS,
1263    otherwise return the original RHS or NULL_TREE.  */
1264 
1265 static tree
1266 ccp_fold (gimple *stmt)
1267 {
1268   location_t loc = gimple_location (stmt);
1269   switch (gimple_code (stmt))
1270     {
1271     case GIMPLE_COND:
1272       {
1273         /* Handle comparison operators that can appear in GIMPLE form.  */
1274         tree op0 = valueize_op (gimple_cond_lhs (stmt));
1275         tree op1 = valueize_op (gimple_cond_rhs (stmt));
1276         enum tree_code code = gimple_cond_code (stmt);
1277         return fold_binary_loc (loc, code, boolean_type_node, op0, op1);
1278       }
1279 
1280     case GIMPLE_SWITCH:
1281       {
1282 	/* Return the constant switch index.  */
1283         return valueize_op (gimple_switch_index (as_a <gswitch *> (stmt)));
1284       }
1285 
1286     case GIMPLE_ASSIGN:
1287     case GIMPLE_CALL:
1288       return gimple_fold_stmt_to_constant_1 (stmt,
1289 					     valueize_op, valueize_op_1);
1290 
1291     default:
1292       gcc_unreachable ();
1293     }
1294 }
1295 
1296 /* Apply the operation CODE in type TYPE to the value, mask pair
1297    RVAL and RMASK representing a value of type RTYPE and set
1298    the value, mask pair *VAL and *MASK to the result.  */
1299 
1300 void
1301 bit_value_unop (enum tree_code code, signop type_sgn, int type_precision,
1302 		widest_int *val, widest_int *mask,
1303 		signop rtype_sgn, int rtype_precision,
1304 		const widest_int &rval, const widest_int &rmask)
1305 {
1306   switch (code)
1307     {
1308     case BIT_NOT_EXPR:
1309       *mask = rmask;
1310       *val = ~rval;
1311       break;
1312 
1313     case NEGATE_EXPR:
1314       {
1315 	widest_int temv, temm;
1316 	/* Return ~rval + 1.  */
1317 	bit_value_unop (BIT_NOT_EXPR, type_sgn, type_precision, &temv, &temm,
1318 			type_sgn, type_precision, rval, rmask);
1319 	bit_value_binop (PLUS_EXPR, type_sgn, type_precision, val, mask,
1320 			 type_sgn, type_precision, temv, temm,
1321 			 type_sgn, type_precision, 1, 0);
1322 	break;
1323       }
1324 
1325     CASE_CONVERT:
1326       {
1327 	/* First extend mask and value according to the original type.  */
1328 	*mask = wi::ext (rmask, rtype_precision, rtype_sgn);
1329 	*val = wi::ext (rval, rtype_precision, rtype_sgn);
1330 
1331 	/* Then extend mask and value according to the target type.  */
1332 	*mask = wi::ext (*mask, type_precision, type_sgn);
1333 	*val = wi::ext (*val, type_precision, type_sgn);
1334 	break;
1335       }
1336 
1337     default:
1338       *mask = -1;
1339       break;
1340     }
1341 }
1342 
1343 /* Apply the operation CODE in type TYPE to the value, mask pairs
1344    R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1345    and R2TYPE and set the value, mask pair *VAL and *MASK to the result.  */
1346 
1347 void
1348 bit_value_binop (enum tree_code code, signop sgn, int width,
1349 		 widest_int *val, widest_int *mask,
1350 		 signop r1type_sgn, int r1type_precision,
1351 		 const widest_int &r1val, const widest_int &r1mask,
1352 		 signop r2type_sgn, int r2type_precision,
1353 		 const widest_int &r2val, const widest_int &r2mask)
1354 {
1355   bool swap_p = false;
1356 
1357   /* Assume we'll get a constant result.  Use an initial non varying
1358      value, we fall back to varying in the end if necessary.  */
1359   *mask = -1;
1360 
1361   switch (code)
1362     {
1363     case BIT_AND_EXPR:
1364       /* The mask is constant where there is a known not
1365 	 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1366       *mask = (r1mask | r2mask) & (r1val | r1mask) & (r2val | r2mask);
1367       *val = r1val & r2val;
1368       break;
1369 
1370     case BIT_IOR_EXPR:
1371       /* The mask is constant where there is a known
1372 	 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)).  */
1373       *mask = wi::bit_and_not (r1mask | r2mask,
1374 			       wi::bit_and_not (r1val, r1mask)
1375 			       | wi::bit_and_not (r2val, r2mask));
1376       *val = r1val | r2val;
1377       break;
1378 
1379     case BIT_XOR_EXPR:
1380       /* m1 | m2  */
1381       *mask = r1mask | r2mask;
1382       *val = r1val ^ r2val;
1383       break;
1384 
1385     case LROTATE_EXPR:
1386     case RROTATE_EXPR:
1387       if (r2mask == 0)
1388 	{
1389 	  widest_int shift = r2val;
1390 	  if (shift == 0)
1391 	    {
1392 	      *mask = r1mask;
1393 	      *val = r1val;
1394 	    }
1395 	  else
1396 	    {
1397 	      if (wi::neg_p (shift))
1398 		{
1399 		  shift = -shift;
1400 		  if (code == RROTATE_EXPR)
1401 		    code = LROTATE_EXPR;
1402 		  else
1403 		    code = RROTATE_EXPR;
1404 		}
1405 	      if (code == RROTATE_EXPR)
1406 		{
1407 		  *mask = wi::rrotate (r1mask, shift, width);
1408 		  *val = wi::rrotate (r1val, shift, width);
1409 		}
1410 	      else
1411 		{
1412 		  *mask = wi::lrotate (r1mask, shift, width);
1413 		  *val = wi::lrotate (r1val, shift, width);
1414 		}
1415 	    }
1416 	}
1417       break;
1418 
1419     case LSHIFT_EXPR:
1420     case RSHIFT_EXPR:
1421       /* ???  We can handle partially known shift counts if we know
1422 	 its sign.  That way we can tell that (x << (y | 8)) & 255
1423 	 is zero.  */
1424       if (r2mask == 0)
1425 	{
1426 	  widest_int shift = r2val;
1427 	  if (shift == 0)
1428 	    {
1429 	      *mask = r1mask;
1430 	      *val = r1val;
1431 	    }
1432 	  else
1433 	    {
1434 	      if (wi::neg_p (shift))
1435 		{
1436 		  shift = -shift;
1437 		  if (code == RSHIFT_EXPR)
1438 		    code = LSHIFT_EXPR;
1439 		  else
1440 		    code = RSHIFT_EXPR;
1441 		}
1442 	      if (code == RSHIFT_EXPR)
1443 		{
1444 		  *mask = wi::rshift (wi::ext (r1mask, width, sgn), shift, sgn);
1445 		  *val = wi::rshift (wi::ext (r1val, width, sgn), shift, sgn);
1446 		}
1447 	      else
1448 		{
1449 		  *mask = wi::ext (r1mask << shift, width, sgn);
1450 		  *val = wi::ext (r1val << shift, width, sgn);
1451 		}
1452 	    }
1453 	}
1454       break;
1455 
1456     case PLUS_EXPR:
1457     case POINTER_PLUS_EXPR:
1458       {
1459 	/* Do the addition with unknown bits set to zero, to give carry-ins of
1460 	   zero wherever possible.  */
1461 	widest_int lo = (wi::bit_and_not (r1val, r1mask)
1462 			 + wi::bit_and_not (r2val, r2mask));
1463 	lo = wi::ext (lo, width, sgn);
1464 	/* Do the addition with unknown bits set to one, to give carry-ins of
1465 	   one wherever possible.  */
1466 	widest_int hi = (r1val | r1mask) + (r2val | r2mask);
1467 	hi = wi::ext (hi, width, sgn);
1468 	/* Each bit in the result is known if (a) the corresponding bits in
1469 	   both inputs are known, and (b) the carry-in to that bit position
1470 	   is known.  We can check condition (b) by seeing if we got the same
1471 	   result with minimised carries as with maximised carries.  */
1472 	*mask = r1mask | r2mask | (lo ^ hi);
1473 	*mask = wi::ext (*mask, width, sgn);
1474 	/* It shouldn't matter whether we choose lo or hi here.  */
1475 	*val = lo;
1476 	break;
1477       }
1478 
1479     case MINUS_EXPR:
1480       {
1481 	widest_int temv, temm;
1482 	bit_value_unop (NEGATE_EXPR, r2type_sgn, r2type_precision, &temv, &temm,
1483 			  r2type_sgn, r2type_precision, r2val, r2mask);
1484 	bit_value_binop (PLUS_EXPR, sgn, width, val, mask,
1485 			 r1type_sgn, r1type_precision, r1val, r1mask,
1486 			 r2type_sgn, r2type_precision, temv, temm);
1487 	break;
1488       }
1489 
1490     case MULT_EXPR:
1491       {
1492 	/* Just track trailing zeros in both operands and transfer
1493 	   them to the other.  */
1494 	int r1tz = wi::ctz (r1val | r1mask);
1495 	int r2tz = wi::ctz (r2val | r2mask);
1496 	if (r1tz + r2tz >= width)
1497 	  {
1498 	    *mask = 0;
1499 	    *val = 0;
1500 	  }
1501 	else if (r1tz + r2tz > 0)
1502 	  {
1503 	    *mask = wi::ext (wi::mask <widest_int> (r1tz + r2tz, true),
1504 			     width, sgn);
1505 	    *val = 0;
1506 	  }
1507 	break;
1508       }
1509 
1510     case EQ_EXPR:
1511     case NE_EXPR:
1512       {
1513 	widest_int m = r1mask | r2mask;
1514 	if (wi::bit_and_not (r1val, m) != wi::bit_and_not (r2val, m))
1515 	  {
1516 	    *mask = 0;
1517 	    *val = ((code == EQ_EXPR) ? 0 : 1);
1518 	  }
1519 	else
1520 	  {
1521 	    /* We know the result of a comparison is always one or zero.  */
1522 	    *mask = 1;
1523 	    *val = 0;
1524 	  }
1525 	break;
1526       }
1527 
1528     case GE_EXPR:
1529     case GT_EXPR:
1530       swap_p = true;
1531       code = swap_tree_comparison (code);
1532       /* Fall through.  */
1533     case LT_EXPR:
1534     case LE_EXPR:
1535       {
1536 	int minmax, maxmin;
1537 
1538 	const widest_int &o1val = swap_p ? r2val : r1val;
1539 	const widest_int &o1mask = swap_p ? r2mask : r1mask;
1540 	const widest_int &o2val = swap_p ? r1val : r2val;
1541 	const widest_int &o2mask = swap_p ? r1mask : r2mask;
1542 
1543 	/* If the most significant bits are not known we know nothing.  */
1544 	if (wi::neg_p (o1mask) || wi::neg_p (o2mask))
1545 	  break;
1546 
1547 	/* For comparisons the signedness is in the comparison operands.  */
1548 	sgn = r1type_sgn;
1549 
1550 	/* If we know the most significant bits we know the values
1551 	   value ranges by means of treating varying bits as zero
1552 	   or one.  Do a cross comparison of the max/min pairs.  */
1553 	maxmin = wi::cmp (o1val | o1mask,
1554 			  wi::bit_and_not (o2val, o2mask), sgn);
1555 	minmax = wi::cmp (wi::bit_and_not (o1val, o1mask),
1556 			  o2val | o2mask, sgn);
1557 	if (maxmin < 0)  /* o1 is less than o2.  */
1558 	  {
1559 	    *mask = 0;
1560 	    *val = 1;
1561 	  }
1562 	else if (minmax > 0)  /* o1 is not less or equal to o2.  */
1563 	  {
1564 	    *mask = 0;
1565 	    *val = 0;
1566 	  }
1567 	else if (maxmin == minmax)  /* o1 and o2 are equal.  */
1568 	  {
1569 	    /* This probably should never happen as we'd have
1570 	       folded the thing during fully constant value folding.  */
1571 	    *mask = 0;
1572 	    *val = (code == LE_EXPR ? 1 : 0);
1573 	  }
1574 	else
1575 	  {
1576 	    /* We know the result of a comparison is always one or zero.  */
1577 	    *mask = 1;
1578 	    *val = 0;
1579 	  }
1580 	break;
1581       }
1582 
1583     default:;
1584     }
1585 }
1586 
1587 /* Return the propagation value when applying the operation CODE to
1588    the value RHS yielding type TYPE.  */
1589 
1590 static ccp_prop_value_t
1591 bit_value_unop (enum tree_code code, tree type, tree rhs)
1592 {
1593   ccp_prop_value_t rval = get_value_for_expr (rhs, true);
1594   widest_int value, mask;
1595   ccp_prop_value_t val;
1596 
1597   if (rval.lattice_val == UNDEFINED)
1598     return rval;
1599 
1600   gcc_assert ((rval.lattice_val == CONSTANT
1601 	       && TREE_CODE (rval.value) == INTEGER_CST)
1602 	      || wi::sext (rval.mask, TYPE_PRECISION (TREE_TYPE (rhs))) == -1);
1603   bit_value_unop (code, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1604 		  TYPE_SIGN (TREE_TYPE (rhs)), TYPE_PRECISION (TREE_TYPE (rhs)),
1605 		  value_to_wide_int (rval), rval.mask);
1606   if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1607     {
1608       val.lattice_val = CONSTANT;
1609       val.mask = mask;
1610       /* ???  Delay building trees here.  */
1611       val.value = wide_int_to_tree (type, value);
1612     }
1613   else
1614     {
1615       val.lattice_val = VARYING;
1616       val.value = NULL_TREE;
1617       val.mask = -1;
1618     }
1619   return val;
1620 }
1621 
1622 /* Return the propagation value when applying the operation CODE to
1623    the values RHS1 and RHS2 yielding type TYPE.  */
1624 
1625 static ccp_prop_value_t
1626 bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
1627 {
1628   ccp_prop_value_t r1val = get_value_for_expr (rhs1, true);
1629   ccp_prop_value_t r2val = get_value_for_expr (rhs2, true);
1630   widest_int value, mask;
1631   ccp_prop_value_t val;
1632 
1633   if (r1val.lattice_val == UNDEFINED
1634       || r2val.lattice_val == UNDEFINED)
1635     {
1636       val.lattice_val = VARYING;
1637       val.value = NULL_TREE;
1638       val.mask = -1;
1639       return val;
1640     }
1641 
1642   gcc_assert ((r1val.lattice_val == CONSTANT
1643 	       && TREE_CODE (r1val.value) == INTEGER_CST)
1644 	      || wi::sext (r1val.mask,
1645 			   TYPE_PRECISION (TREE_TYPE (rhs1))) == -1);
1646   gcc_assert ((r2val.lattice_val == CONSTANT
1647 	       && TREE_CODE (r2val.value) == INTEGER_CST)
1648 	      || wi::sext (r2val.mask,
1649 			   TYPE_PRECISION (TREE_TYPE (rhs2))) == -1);
1650   bit_value_binop (code, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1651 		   TYPE_SIGN (TREE_TYPE (rhs1)), TYPE_PRECISION (TREE_TYPE (rhs1)),
1652 		   value_to_wide_int (r1val), r1val.mask,
1653 		   TYPE_SIGN (TREE_TYPE (rhs2)), TYPE_PRECISION (TREE_TYPE (rhs2)),
1654 		   value_to_wide_int (r2val), r2val.mask);
1655 
1656   /* (x * x) & 2 == 0.  */
1657   if (code == MULT_EXPR && rhs1 == rhs2 && TYPE_PRECISION (type) > 1)
1658     {
1659       widest_int m = 2;
1660       if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1661 	value = wi::bit_and_not (value, m);
1662       else
1663 	value = 0;
1664       mask = wi::bit_and_not (mask, m);
1665     }
1666 
1667   if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1668     {
1669       val.lattice_val = CONSTANT;
1670       val.mask = mask;
1671       /* ???  Delay building trees here.  */
1672       val.value = wide_int_to_tree (type, value);
1673     }
1674   else
1675     {
1676       val.lattice_val = VARYING;
1677       val.value = NULL_TREE;
1678       val.mask = -1;
1679     }
1680   return val;
1681 }
1682 
1683 /* Return the propagation value for __builtin_assume_aligned
1684    and functions with assume_aligned or alloc_aligned attribute.
1685    For __builtin_assume_aligned, ATTR is NULL_TREE,
1686    for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED
1687    is false, for alloc_aligned attribute ATTR is non-NULL and
1688    ALLOC_ALIGNED is true.  */
1689 
1690 static ccp_prop_value_t
1691 bit_value_assume_aligned (gimple *stmt, tree attr, ccp_prop_value_t ptrval,
1692 			  bool alloc_aligned)
1693 {
1694   tree align, misalign = NULL_TREE, type;
1695   unsigned HOST_WIDE_INT aligni, misaligni = 0;
1696   ccp_prop_value_t alignval;
1697   widest_int value, mask;
1698   ccp_prop_value_t val;
1699 
1700   if (attr == NULL_TREE)
1701     {
1702       tree ptr = gimple_call_arg (stmt, 0);
1703       type = TREE_TYPE (ptr);
1704       ptrval = get_value_for_expr (ptr, true);
1705     }
1706   else
1707     {
1708       tree lhs = gimple_call_lhs (stmt);
1709       type = TREE_TYPE (lhs);
1710     }
1711 
1712   if (ptrval.lattice_val == UNDEFINED)
1713     return ptrval;
1714   gcc_assert ((ptrval.lattice_val == CONSTANT
1715 	       && TREE_CODE (ptrval.value) == INTEGER_CST)
1716 	      || wi::sext (ptrval.mask, TYPE_PRECISION (type)) == -1);
1717   if (attr == NULL_TREE)
1718     {
1719       /* Get aligni and misaligni from __builtin_assume_aligned.  */
1720       align = gimple_call_arg (stmt, 1);
1721       if (!tree_fits_uhwi_p (align))
1722 	return ptrval;
1723       aligni = tree_to_uhwi (align);
1724       if (gimple_call_num_args (stmt) > 2)
1725 	{
1726 	  misalign = gimple_call_arg (stmt, 2);
1727 	  if (!tree_fits_uhwi_p (misalign))
1728 	    return ptrval;
1729 	  misaligni = tree_to_uhwi (misalign);
1730 	}
1731     }
1732   else
1733     {
1734       /* Get aligni and misaligni from assume_aligned or
1735 	 alloc_align attributes.  */
1736       if (TREE_VALUE (attr) == NULL_TREE)
1737 	return ptrval;
1738       attr = TREE_VALUE (attr);
1739       align = TREE_VALUE (attr);
1740       if (!tree_fits_uhwi_p (align))
1741 	return ptrval;
1742       aligni = tree_to_uhwi (align);
1743       if (alloc_aligned)
1744 	{
1745 	  if (aligni == 0 || aligni > gimple_call_num_args (stmt))
1746 	    return ptrval;
1747 	  align = gimple_call_arg (stmt, aligni - 1);
1748 	  if (!tree_fits_uhwi_p (align))
1749 	    return ptrval;
1750 	  aligni = tree_to_uhwi (align);
1751 	}
1752       else if (TREE_CHAIN (attr) && TREE_VALUE (TREE_CHAIN (attr)))
1753 	{
1754 	  misalign = TREE_VALUE (TREE_CHAIN (attr));
1755 	  if (!tree_fits_uhwi_p (misalign))
1756 	    return ptrval;
1757 	  misaligni = tree_to_uhwi (misalign);
1758 	}
1759     }
1760   if (aligni <= 1 || (aligni & (aligni - 1)) != 0 || misaligni >= aligni)
1761     return ptrval;
1762 
1763   align = build_int_cst_type (type, -aligni);
1764   alignval = get_value_for_expr (align, true);
1765   bit_value_binop (BIT_AND_EXPR, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1766 		   TYPE_SIGN (type), TYPE_PRECISION (type), value_to_wide_int (ptrval), ptrval.mask,
1767 		   TYPE_SIGN (type), TYPE_PRECISION (type), value_to_wide_int (alignval), alignval.mask);
1768 
1769   if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1770     {
1771       val.lattice_val = CONSTANT;
1772       val.mask = mask;
1773       gcc_assert ((mask.to_uhwi () & (aligni - 1)) == 0);
1774       gcc_assert ((value.to_uhwi () & (aligni - 1)) == 0);
1775       value |= misaligni;
1776       /* ???  Delay building trees here.  */
1777       val.value = wide_int_to_tree (type, value);
1778     }
1779   else
1780     {
1781       val.lattice_val = VARYING;
1782       val.value = NULL_TREE;
1783       val.mask = -1;
1784     }
1785   return val;
1786 }
1787 
1788 /* Evaluate statement STMT.
1789    Valid only for assignments, calls, conditionals, and switches. */
1790 
1791 static ccp_prop_value_t
1792 evaluate_stmt (gimple *stmt)
1793 {
1794   ccp_prop_value_t val;
1795   tree simplified = NULL_TREE;
1796   ccp_lattice_t likelyvalue = likely_value (stmt);
1797   bool is_constant = false;
1798   unsigned int align;
1799 
1800   if (dump_file && (dump_flags & TDF_DETAILS))
1801     {
1802       fprintf (dump_file, "which is likely ");
1803       switch (likelyvalue)
1804 	{
1805 	case CONSTANT:
1806 	  fprintf (dump_file, "CONSTANT");
1807 	  break;
1808 	case UNDEFINED:
1809 	  fprintf (dump_file, "UNDEFINED");
1810 	  break;
1811 	case VARYING:
1812 	  fprintf (dump_file, "VARYING");
1813 	  break;
1814 	default:;
1815 	}
1816       fprintf (dump_file, "\n");
1817     }
1818 
1819   /* If the statement is likely to have a CONSTANT result, then try
1820      to fold the statement to determine the constant value.  */
1821   /* FIXME.  This is the only place that we call ccp_fold.
1822      Since likely_value never returns CONSTANT for calls, we will
1823      not attempt to fold them, including builtins that may profit.  */
1824   if (likelyvalue == CONSTANT)
1825     {
1826       fold_defer_overflow_warnings ();
1827       simplified = ccp_fold (stmt);
1828       if (simplified
1829 	  && TREE_CODE (simplified) == SSA_NAME)
1830 	{
1831 	  /* We may not use values of something that may be simulated again,
1832 	     see valueize_op_1.  */
1833 	  if (SSA_NAME_IS_DEFAULT_DEF (simplified)
1834 	      || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (simplified)))
1835 	    {
1836 	      ccp_prop_value_t *val = get_value (simplified);
1837 	      if (val && val->lattice_val != VARYING)
1838 		{
1839 		  fold_undefer_overflow_warnings (true, stmt, 0);
1840 		  return *val;
1841 		}
1842 	    }
1843 	  else
1844 	    /* We may also not place a non-valueized copy in the lattice
1845 	       as that might become stale if we never re-visit this stmt.  */
1846 	    simplified = NULL_TREE;
1847 	}
1848       is_constant = simplified && is_gimple_min_invariant (simplified);
1849       fold_undefer_overflow_warnings (is_constant, stmt, 0);
1850       if (is_constant)
1851 	{
1852 	  /* The statement produced a constant value.  */
1853 	  val.lattice_val = CONSTANT;
1854 	  val.value = simplified;
1855 	  val.mask = 0;
1856 	  return val;
1857 	}
1858     }
1859   /* If the statement is likely to have a VARYING result, then do not
1860      bother folding the statement.  */
1861   else if (likelyvalue == VARYING)
1862     {
1863       enum gimple_code code = gimple_code (stmt);
1864       if (code == GIMPLE_ASSIGN)
1865         {
1866           enum tree_code subcode = gimple_assign_rhs_code (stmt);
1867 
1868           /* Other cases cannot satisfy is_gimple_min_invariant
1869              without folding.  */
1870           if (get_gimple_rhs_class (subcode) == GIMPLE_SINGLE_RHS)
1871             simplified = gimple_assign_rhs1 (stmt);
1872         }
1873       else if (code == GIMPLE_SWITCH)
1874         simplified = gimple_switch_index (as_a <gswitch *> (stmt));
1875       else
1876 	/* These cannot satisfy is_gimple_min_invariant without folding.  */
1877 	gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
1878       is_constant = simplified && is_gimple_min_invariant (simplified);
1879       if (is_constant)
1880 	{
1881 	  /* The statement produced a constant value.  */
1882 	  val.lattice_val = CONSTANT;
1883 	  val.value = simplified;
1884 	  val.mask = 0;
1885 	}
1886     }
1887   /* If the statement result is likely UNDEFINED, make it so.  */
1888   else if (likelyvalue == UNDEFINED)
1889     {
1890       val.lattice_val = UNDEFINED;
1891       val.value = NULL_TREE;
1892       val.mask = 0;
1893       return val;
1894     }
1895 
1896   /* Resort to simplification for bitwise tracking.  */
1897   if (flag_tree_bit_ccp
1898       && (likelyvalue == CONSTANT || is_gimple_call (stmt)
1899 	  || (gimple_assign_single_p (stmt)
1900 	      && gimple_assign_rhs_code (stmt) == ADDR_EXPR))
1901       && !is_constant)
1902     {
1903       enum gimple_code code = gimple_code (stmt);
1904       val.lattice_val = VARYING;
1905       val.value = NULL_TREE;
1906       val.mask = -1;
1907       if (code == GIMPLE_ASSIGN)
1908 	{
1909 	  enum tree_code subcode = gimple_assign_rhs_code (stmt);
1910 	  tree rhs1 = gimple_assign_rhs1 (stmt);
1911 	  tree lhs = gimple_assign_lhs (stmt);
1912 	  if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1913 	       || POINTER_TYPE_P (TREE_TYPE (lhs)))
1914 	      && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1915 		  || POINTER_TYPE_P (TREE_TYPE (rhs1))))
1916 	    switch (get_gimple_rhs_class (subcode))
1917 	      {
1918 	      case GIMPLE_SINGLE_RHS:
1919 	        val = get_value_for_expr (rhs1, true);
1920 		break;
1921 
1922 	      case GIMPLE_UNARY_RHS:
1923 		val = bit_value_unop (subcode, TREE_TYPE (lhs), rhs1);
1924 		break;
1925 
1926 	      case GIMPLE_BINARY_RHS:
1927 		val = bit_value_binop (subcode, TREE_TYPE (lhs), rhs1,
1928 				       gimple_assign_rhs2 (stmt));
1929 		break;
1930 
1931 	      default:;
1932 	      }
1933 	}
1934       else if (code == GIMPLE_COND)
1935 	{
1936 	  enum tree_code code = gimple_cond_code (stmt);
1937 	  tree rhs1 = gimple_cond_lhs (stmt);
1938 	  tree rhs2 = gimple_cond_rhs (stmt);
1939 	  if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1940 	      || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1941 	    val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
1942 	}
1943       else if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
1944 	{
1945 	  tree fndecl = gimple_call_fndecl (stmt);
1946 	  switch (DECL_FUNCTION_CODE (fndecl))
1947 	    {
1948 	    case BUILT_IN_MALLOC:
1949 	    case BUILT_IN_REALLOC:
1950 	    case BUILT_IN_CALLOC:
1951 	    case BUILT_IN_STRDUP:
1952 	    case BUILT_IN_STRNDUP:
1953 	      val.lattice_val = CONSTANT;
1954 	      val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1955 	      val.mask = ~((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT
1956 			   / BITS_PER_UNIT - 1);
1957 	      break;
1958 
1959 	    CASE_BUILT_IN_ALLOCA:
1960 	      align = (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA
1961 		       ? BIGGEST_ALIGNMENT
1962 		       : TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)));
1963 	      val.lattice_val = CONSTANT;
1964 	      val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1965 	      val.mask = ~((HOST_WIDE_INT) align / BITS_PER_UNIT - 1);
1966 	      break;
1967 
1968 	    /* These builtins return their first argument, unmodified.  */
1969 	    case BUILT_IN_MEMCPY:
1970 	    case BUILT_IN_MEMMOVE:
1971 	    case BUILT_IN_MEMSET:
1972 	    case BUILT_IN_STRCPY:
1973 	    case BUILT_IN_STRNCPY:
1974 	    case BUILT_IN_MEMCPY_CHK:
1975 	    case BUILT_IN_MEMMOVE_CHK:
1976 	    case BUILT_IN_MEMSET_CHK:
1977 	    case BUILT_IN_STRCPY_CHK:
1978 	    case BUILT_IN_STRNCPY_CHK:
1979 	      val = get_value_for_expr (gimple_call_arg (stmt, 0), true);
1980 	      break;
1981 
1982 	    case BUILT_IN_ASSUME_ALIGNED:
1983 	      val = bit_value_assume_aligned (stmt, NULL_TREE, val, false);
1984 	      break;
1985 
1986 	    case BUILT_IN_ALIGNED_ALLOC:
1987 	      {
1988 		tree align = get_constant_value (gimple_call_arg (stmt, 0));
1989 		if (align
1990 		    && tree_fits_uhwi_p (align))
1991 		  {
1992 		    unsigned HOST_WIDE_INT aligni = tree_to_uhwi (align);
1993 		    if (aligni > 1
1994 			/* align must be power-of-two */
1995 			&& (aligni & (aligni - 1)) == 0)
1996 		      {
1997 			val.lattice_val = CONSTANT;
1998 			val.value = build_int_cst (ptr_type_node, 0);
1999 			val.mask = -aligni;
2000 		      }
2001 		  }
2002 		break;
2003 	      }
2004 
2005 	    case BUILT_IN_BSWAP16:
2006 	    case BUILT_IN_BSWAP32:
2007 	    case BUILT_IN_BSWAP64:
2008 	      val = get_value_for_expr (gimple_call_arg (stmt, 0), true);
2009 	      if (val.lattice_val == UNDEFINED)
2010 		break;
2011 	      else if (val.lattice_val == CONSTANT
2012 		       && val.value
2013 		       && TREE_CODE (val.value) == INTEGER_CST)
2014 		{
2015 		  tree type = TREE_TYPE (gimple_call_lhs (stmt));
2016 		  int prec = TYPE_PRECISION (type);
2017 		  wide_int wval = wi::to_wide (val.value);
2018 		  val.value
2019 		    = wide_int_to_tree (type,
2020 					wide_int::from (wval, prec,
2021 							UNSIGNED).bswap ());
2022 		  val.mask
2023 		    = widest_int::from (wide_int::from (val.mask, prec,
2024 							UNSIGNED).bswap (),
2025 					UNSIGNED);
2026 		  if (wi::sext (val.mask, prec) != -1)
2027 		    break;
2028 		}
2029 	      val.lattice_val = VARYING;
2030 	      val.value = NULL_TREE;
2031 	      val.mask = -1;
2032 	      break;
2033 
2034 	    default:;
2035 	    }
2036 	}
2037       if (is_gimple_call (stmt) && gimple_call_lhs (stmt))
2038 	{
2039 	  tree fntype = gimple_call_fntype (stmt);
2040 	  if (fntype)
2041 	    {
2042 	      tree attrs = lookup_attribute ("assume_aligned",
2043 					     TYPE_ATTRIBUTES (fntype));
2044 	      if (attrs)
2045 		val = bit_value_assume_aligned (stmt, attrs, val, false);
2046 	      attrs = lookup_attribute ("alloc_align",
2047 					TYPE_ATTRIBUTES (fntype));
2048 	      if (attrs)
2049 		val = bit_value_assume_aligned (stmt, attrs, val, true);
2050 	    }
2051 	}
2052       is_constant = (val.lattice_val == CONSTANT);
2053     }
2054 
2055   if (flag_tree_bit_ccp
2056       && ((is_constant && TREE_CODE (val.value) == INTEGER_CST)
2057 	  || !is_constant)
2058       && gimple_get_lhs (stmt)
2059       && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME)
2060     {
2061       tree lhs = gimple_get_lhs (stmt);
2062       wide_int nonzero_bits = get_nonzero_bits (lhs);
2063       if (nonzero_bits != -1)
2064 	{
2065 	  if (!is_constant)
2066 	    {
2067 	      val.lattice_val = CONSTANT;
2068 	      val.value = build_zero_cst (TREE_TYPE (lhs));
2069 	      val.mask = extend_mask (nonzero_bits, TYPE_SIGN (TREE_TYPE (lhs)));
2070 	      is_constant = true;
2071 	    }
2072 	  else
2073 	    {
2074 	      if (wi::bit_and_not (wi::to_wide (val.value), nonzero_bits) != 0)
2075 		val.value = wide_int_to_tree (TREE_TYPE (lhs),
2076 					      nonzero_bits
2077 					      & wi::to_wide (val.value));
2078 	      if (nonzero_bits == 0)
2079 		val.mask = 0;
2080 	      else
2081 		val.mask = val.mask & extend_mask (nonzero_bits,
2082 						   TYPE_SIGN (TREE_TYPE (lhs)));
2083 	    }
2084 	}
2085     }
2086 
2087   /* The statement produced a nonconstant value.  */
2088   if (!is_constant)
2089     {
2090       /* The statement produced a copy.  */
2091       if (simplified && TREE_CODE (simplified) == SSA_NAME
2092 	  && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (simplified))
2093 	{
2094 	  val.lattice_val = CONSTANT;
2095 	  val.value = simplified;
2096 	  val.mask = -1;
2097 	}
2098       /* The statement is VARYING.  */
2099       else
2100 	{
2101 	  val.lattice_val = VARYING;
2102 	  val.value = NULL_TREE;
2103 	  val.mask = -1;
2104 	}
2105     }
2106 
2107   return val;
2108 }
2109 
2110 typedef hash_table<nofree_ptr_hash<gimple> > gimple_htab;
2111 
2112 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
2113    each matching BUILT_IN_STACK_RESTORE.  Mark visited phis in VISITED.  */
2114 
2115 static void
2116 insert_clobber_before_stack_restore (tree saved_val, tree var,
2117 				     gimple_htab **visited)
2118 {
2119   gimple *stmt;
2120   gassign *clobber_stmt;
2121   tree clobber;
2122   imm_use_iterator iter;
2123   gimple_stmt_iterator i;
2124   gimple **slot;
2125 
2126   FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val)
2127     if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
2128       {
2129 	clobber = build_clobber (TREE_TYPE (var));
2130 	clobber_stmt = gimple_build_assign (var, clobber);
2131 
2132 	i = gsi_for_stmt (stmt);
2133 	gsi_insert_before (&i, clobber_stmt, GSI_SAME_STMT);
2134       }
2135     else if (gimple_code (stmt) == GIMPLE_PHI)
2136       {
2137 	if (!*visited)
2138 	  *visited = new gimple_htab (10);
2139 
2140 	slot = (*visited)->find_slot (stmt, INSERT);
2141 	if (*slot != NULL)
2142 	  continue;
2143 
2144 	*slot = stmt;
2145 	insert_clobber_before_stack_restore (gimple_phi_result (stmt), var,
2146 					     visited);
2147       }
2148     else if (gimple_assign_ssa_name_copy_p (stmt))
2149       insert_clobber_before_stack_restore (gimple_assign_lhs (stmt), var,
2150 					   visited);
2151 }
2152 
2153 /* Advance the iterator to the previous non-debug gimple statement in the same
2154    or dominating basic block.  */
2155 
2156 static inline void
2157 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i)
2158 {
2159   basic_block dom;
2160 
2161   gsi_prev_nondebug (i);
2162   while (gsi_end_p (*i))
2163     {
2164       dom = get_immediate_dominator (CDI_DOMINATORS, i->bb);
2165       if (dom == NULL || dom == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2166 	return;
2167 
2168       *i = gsi_last_bb (dom);
2169     }
2170 }
2171 
2172 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
2173    a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
2174 
2175    It is possible that BUILT_IN_STACK_SAVE cannot be found in a dominator when
2176    a previous pass (such as DOM) duplicated it along multiple paths to a BB.
2177    In that case the function gives up without inserting the clobbers.  */
2178 
2179 static void
2180 insert_clobbers_for_var (gimple_stmt_iterator i, tree var)
2181 {
2182   gimple *stmt;
2183   tree saved_val;
2184   gimple_htab *visited = NULL;
2185 
2186   for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i))
2187     {
2188       stmt = gsi_stmt (i);
2189 
2190       if (!gimple_call_builtin_p (stmt, BUILT_IN_STACK_SAVE))
2191 	continue;
2192 
2193       saved_val = gimple_call_lhs (stmt);
2194       if (saved_val == NULL_TREE)
2195 	continue;
2196 
2197       insert_clobber_before_stack_restore (saved_val, var, &visited);
2198       break;
2199     }
2200 
2201   delete visited;
2202 }
2203 
2204 /* Detects a __builtin_alloca_with_align with constant size argument.  Declares
2205    fixed-size array and returns the address, if found, otherwise returns
2206    NULL_TREE.  */
2207 
2208 static tree
2209 fold_builtin_alloca_with_align (gimple *stmt)
2210 {
2211   unsigned HOST_WIDE_INT size, threshold, n_elem;
2212   tree lhs, arg, block, var, elem_type, array_type;
2213 
2214   /* Get lhs.  */
2215   lhs = gimple_call_lhs (stmt);
2216   if (lhs == NULL_TREE)
2217     return NULL_TREE;
2218 
2219   /* Detect constant argument.  */
2220   arg = get_constant_value (gimple_call_arg (stmt, 0));
2221   if (arg == NULL_TREE
2222       || TREE_CODE (arg) != INTEGER_CST
2223       || !tree_fits_uhwi_p (arg))
2224     return NULL_TREE;
2225 
2226   size = tree_to_uhwi (arg);
2227 
2228   /* Heuristic: don't fold large allocas.  */
2229   threshold = (unsigned HOST_WIDE_INT)param_large_stack_frame;
2230   /* In case the alloca is located at function entry, it has the same lifetime
2231      as a declared array, so we allow a larger size.  */
2232   block = gimple_block (stmt);
2233   if (!(cfun->after_inlining
2234 	&& block
2235         && TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL))
2236     threshold /= 10;
2237   if (size > threshold)
2238     return NULL_TREE;
2239 
2240   /* We have to be able to move points-to info.  We used to assert
2241      that we can but IPA PTA might end up with two UIDs here
2242      as it might need to handle more than one instance being
2243      live at the same time.  Instead of trying to detect this case
2244      (using the first UID would be OK) just give up for now.  */
2245   struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
2246   unsigned uid = 0;
2247   if (pi != NULL
2248       && !pi->pt.anything
2249       && !pt_solution_singleton_or_null_p (&pi->pt, &uid))
2250     return NULL_TREE;
2251 
2252   /* Declare array.  */
2253   elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1);
2254   n_elem = size * 8 / BITS_PER_UNIT;
2255   array_type = build_array_type_nelts (elem_type, n_elem);
2256 
2257   if (tree ssa_name = SSA_NAME_IDENTIFIER (lhs))
2258     {
2259       /* Give the temporary a name derived from the name of the VLA
2260 	 declaration so it can be referenced in diagnostics.  */
2261       const char *name = IDENTIFIER_POINTER (ssa_name);
2262       var = create_tmp_var (array_type, name);
2263     }
2264   else
2265     var = create_tmp_var (array_type);
2266 
2267   if (gimple *lhsdef = SSA_NAME_DEF_STMT (lhs))
2268     {
2269       /* Set the temporary's location to that of the VLA declaration
2270 	 so it can be pointed to in diagnostics.  */
2271       location_t loc = gimple_location (lhsdef);
2272       DECL_SOURCE_LOCATION (var) = loc;
2273     }
2274 
2275   SET_DECL_ALIGN (var, TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)));
2276   if (uid != 0)
2277     SET_DECL_PT_UID (var, uid);
2278 
2279   /* Fold alloca to the address of the array.  */
2280   return fold_convert (TREE_TYPE (lhs), build_fold_addr_expr (var));
2281 }
2282 
2283 /* Fold the stmt at *GSI with CCP specific information that propagating
2284    and regular folding does not catch.  */
2285 
2286 bool
2287 ccp_folder::fold_stmt (gimple_stmt_iterator *gsi)
2288 {
2289   gimple *stmt = gsi_stmt (*gsi);
2290 
2291   switch (gimple_code (stmt))
2292     {
2293     case GIMPLE_COND:
2294       {
2295 	gcond *cond_stmt = as_a <gcond *> (stmt);
2296 	ccp_prop_value_t val;
2297 	/* Statement evaluation will handle type mismatches in constants
2298 	   more gracefully than the final propagation.  This allows us to
2299 	   fold more conditionals here.  */
2300 	val = evaluate_stmt (stmt);
2301 	if (val.lattice_val != CONSTANT
2302 	    || val.mask != 0)
2303 	  return false;
2304 
2305 	if (dump_file)
2306 	  {
2307 	    fprintf (dump_file, "Folding predicate ");
2308 	    print_gimple_expr (dump_file, stmt, 0);
2309 	    fprintf (dump_file, " to ");
2310 	    print_generic_expr (dump_file, val.value);
2311 	    fprintf (dump_file, "\n");
2312 	  }
2313 
2314 	if (integer_zerop (val.value))
2315 	  gimple_cond_make_false (cond_stmt);
2316 	else
2317 	  gimple_cond_make_true (cond_stmt);
2318 
2319 	return true;
2320       }
2321 
2322     case GIMPLE_CALL:
2323       {
2324 	tree lhs = gimple_call_lhs (stmt);
2325 	int flags = gimple_call_flags (stmt);
2326 	tree val;
2327 	tree argt;
2328 	bool changed = false;
2329 	unsigned i;
2330 
2331 	/* If the call was folded into a constant make sure it goes
2332 	   away even if we cannot propagate into all uses because of
2333 	   type issues.  */
2334 	if (lhs
2335 	    && TREE_CODE (lhs) == SSA_NAME
2336 	    && (val = get_constant_value (lhs))
2337 	    /* Don't optimize away calls that have side-effects.  */
2338 	    && (flags & (ECF_CONST|ECF_PURE)) != 0
2339 	    && (flags & ECF_LOOPING_CONST_OR_PURE) == 0)
2340 	  {
2341 	    tree new_rhs = unshare_expr (val);
2342 	    bool res;
2343 	    if (!useless_type_conversion_p (TREE_TYPE (lhs),
2344 					    TREE_TYPE (new_rhs)))
2345 	      new_rhs = fold_convert (TREE_TYPE (lhs), new_rhs);
2346 	    res = update_call_from_tree (gsi, new_rhs);
2347 	    gcc_assert (res);
2348 	    return true;
2349 	  }
2350 
2351 	/* Internal calls provide no argument types, so the extra laxity
2352 	   for normal calls does not apply.  */
2353 	if (gimple_call_internal_p (stmt))
2354 	  return false;
2355 
2356         /* The heuristic of fold_builtin_alloca_with_align differs before and
2357 	   after inlining, so we don't require the arg to be changed into a
2358 	   constant for folding, but just to be constant.  */
2359         if (gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN)
2360 	    || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN_AND_MAX))
2361           {
2362             tree new_rhs = fold_builtin_alloca_with_align (stmt);
2363             if (new_rhs)
2364 	      {
2365 		bool res = update_call_from_tree (gsi, new_rhs);
2366 		tree var = TREE_OPERAND (TREE_OPERAND (new_rhs, 0),0);
2367 		gcc_assert (res);
2368 		insert_clobbers_for_var (*gsi, var);
2369 		return true;
2370 	      }
2371           }
2372 
2373 	/* If there's no extra info from an assume_aligned call,
2374 	   drop it so it doesn't act as otherwise useless dataflow
2375 	   barrier.  */
2376 	if (gimple_call_builtin_p (stmt, BUILT_IN_ASSUME_ALIGNED))
2377 	  {
2378 	    tree ptr = gimple_call_arg (stmt, 0);
2379 	    ccp_prop_value_t ptrval = get_value_for_expr (ptr, true);
2380 	    if (ptrval.lattice_val == CONSTANT
2381 		&& TREE_CODE (ptrval.value) == INTEGER_CST
2382 		&& ptrval.mask != 0)
2383 	      {
2384 		ccp_prop_value_t val
2385 		  = bit_value_assume_aligned (stmt, NULL_TREE, ptrval, false);
2386 		unsigned int ptralign = least_bit_hwi (ptrval.mask.to_uhwi ());
2387 		unsigned int align = least_bit_hwi (val.mask.to_uhwi ());
2388 		if (ptralign == align
2389 		    && ((TREE_INT_CST_LOW (ptrval.value) & (align - 1))
2390 			== (TREE_INT_CST_LOW (val.value) & (align - 1))))
2391 		  {
2392 		    bool res = update_call_from_tree (gsi, ptr);
2393 		    gcc_assert (res);
2394 		    return true;
2395 		  }
2396 	      }
2397 	  }
2398 
2399 	/* Propagate into the call arguments.  Compared to replace_uses_in
2400 	   this can use the argument slot types for type verification
2401 	   instead of the current argument type.  We also can safely
2402 	   drop qualifiers here as we are dealing with constants anyway.  */
2403 	argt = TYPE_ARG_TYPES (gimple_call_fntype (stmt));
2404 	for (i = 0; i < gimple_call_num_args (stmt) && argt;
2405 	     ++i, argt = TREE_CHAIN (argt))
2406 	  {
2407 	    tree arg = gimple_call_arg (stmt, i);
2408 	    if (TREE_CODE (arg) == SSA_NAME
2409 		&& (val = get_constant_value (arg))
2410 		&& useless_type_conversion_p
2411 		     (TYPE_MAIN_VARIANT (TREE_VALUE (argt)),
2412 		      TYPE_MAIN_VARIANT (TREE_TYPE (val))))
2413 	      {
2414 		gimple_call_set_arg (stmt, i, unshare_expr (val));
2415 		changed = true;
2416 	      }
2417 	  }
2418 
2419 	return changed;
2420       }
2421 
2422     case GIMPLE_ASSIGN:
2423       {
2424 	tree lhs = gimple_assign_lhs (stmt);
2425 	tree val;
2426 
2427 	/* If we have a load that turned out to be constant replace it
2428 	   as we cannot propagate into all uses in all cases.  */
2429 	if (gimple_assign_single_p (stmt)
2430 	    && TREE_CODE (lhs) == SSA_NAME
2431 	    && (val = get_constant_value (lhs)))
2432 	  {
2433 	    tree rhs = unshare_expr (val);
2434 	    if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
2435 	      rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), rhs);
2436 	    gimple_assign_set_rhs_from_tree (gsi, rhs);
2437 	    return true;
2438 	  }
2439 
2440 	return false;
2441       }
2442 
2443     default:
2444       return false;
2445     }
2446 }
2447 
2448 /* Visit the assignment statement STMT.  Set the value of its LHS to the
2449    value computed by the RHS and store LHS in *OUTPUT_P.  If STMT
2450    creates virtual definitions, set the value of each new name to that
2451    of the RHS (if we can derive a constant out of the RHS).
2452    Value-returning call statements also perform an assignment, and
2453    are handled here.  */
2454 
2455 static enum ssa_prop_result
2456 visit_assignment (gimple *stmt, tree *output_p)
2457 {
2458   ccp_prop_value_t val;
2459   enum ssa_prop_result retval = SSA_PROP_NOT_INTERESTING;
2460 
2461   tree lhs = gimple_get_lhs (stmt);
2462   if (TREE_CODE (lhs) == SSA_NAME)
2463     {
2464       /* Evaluate the statement, which could be
2465 	 either a GIMPLE_ASSIGN or a GIMPLE_CALL.  */
2466       val = evaluate_stmt (stmt);
2467 
2468       /* If STMT is an assignment to an SSA_NAME, we only have one
2469 	 value to set.  */
2470       if (set_lattice_value (lhs, &val))
2471 	{
2472 	  *output_p = lhs;
2473 	  if (val.lattice_val == VARYING)
2474 	    retval = SSA_PROP_VARYING;
2475 	  else
2476 	    retval = SSA_PROP_INTERESTING;
2477 	}
2478     }
2479 
2480   return retval;
2481 }
2482 
2483 
2484 /* Visit the conditional statement STMT.  Return SSA_PROP_INTERESTING
2485    if it can determine which edge will be taken.  Otherwise, return
2486    SSA_PROP_VARYING.  */
2487 
2488 static enum ssa_prop_result
2489 visit_cond_stmt (gimple *stmt, edge *taken_edge_p)
2490 {
2491   ccp_prop_value_t val;
2492   basic_block block;
2493 
2494   block = gimple_bb (stmt);
2495   val = evaluate_stmt (stmt);
2496   if (val.lattice_val != CONSTANT
2497       || val.mask != 0)
2498     return SSA_PROP_VARYING;
2499 
2500   /* Find which edge out of the conditional block will be taken and add it
2501      to the worklist.  If no single edge can be determined statically,
2502      return SSA_PROP_VARYING to feed all the outgoing edges to the
2503      propagation engine.  */
2504   *taken_edge_p = find_taken_edge (block, val.value);
2505   if (*taken_edge_p)
2506     return SSA_PROP_INTERESTING;
2507   else
2508     return SSA_PROP_VARYING;
2509 }
2510 
2511 
2512 /* Evaluate statement STMT.  If the statement produces an output value and
2513    its evaluation changes the lattice value of its output, return
2514    SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2515    output value.
2516 
2517    If STMT is a conditional branch and we can determine its truth
2518    value, set *TAKEN_EDGE_P accordingly.  If STMT produces a varying
2519    value, return SSA_PROP_VARYING.  */
2520 
2521 enum ssa_prop_result
2522 ccp_propagate::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
2523 {
2524   tree def;
2525   ssa_op_iter iter;
2526 
2527   if (dump_file && (dump_flags & TDF_DETAILS))
2528     {
2529       fprintf (dump_file, "\nVisiting statement:\n");
2530       print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2531     }
2532 
2533   switch (gimple_code (stmt))
2534     {
2535       case GIMPLE_ASSIGN:
2536         /* If the statement is an assignment that produces a single
2537            output value, evaluate its RHS to see if the lattice value of
2538            its output has changed.  */
2539         return visit_assignment (stmt, output_p);
2540 
2541       case GIMPLE_CALL:
2542         /* A value-returning call also performs an assignment.  */
2543         if (gimple_call_lhs (stmt) != NULL_TREE)
2544           return visit_assignment (stmt, output_p);
2545         break;
2546 
2547       case GIMPLE_COND:
2548       case GIMPLE_SWITCH:
2549         /* If STMT is a conditional branch, see if we can determine
2550            which branch will be taken.   */
2551         /* FIXME.  It appears that we should be able to optimize
2552            computed GOTOs here as well.  */
2553         return visit_cond_stmt (stmt, taken_edge_p);
2554 
2555       default:
2556         break;
2557     }
2558 
2559   /* Any other kind of statement is not interesting for constant
2560      propagation and, therefore, not worth simulating.  */
2561   if (dump_file && (dump_flags & TDF_DETAILS))
2562     fprintf (dump_file, "No interesting values produced.  Marked VARYING.\n");
2563 
2564   /* Definitions made by statements other than assignments to
2565      SSA_NAMEs represent unknown modifications to their outputs.
2566      Mark them VARYING.  */
2567   FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
2568     set_value_varying (def);
2569 
2570   return SSA_PROP_VARYING;
2571 }
2572 
2573 
2574 /* Main entry point for SSA Conditional Constant Propagation.  If NONZERO_P,
2575    record nonzero bits.  */
2576 
2577 static unsigned int
2578 do_ssa_ccp (bool nonzero_p)
2579 {
2580   unsigned int todo = 0;
2581   calculate_dominance_info (CDI_DOMINATORS);
2582 
2583   ccp_initialize ();
2584   class ccp_propagate ccp_propagate;
2585   ccp_propagate.ssa_propagate ();
2586   if (ccp_finalize (nonzero_p || flag_ipa_bit_cp))
2587     {
2588       todo = (TODO_cleanup_cfg | TODO_update_ssa);
2589 
2590       /* ccp_finalize does not preserve loop-closed ssa.  */
2591       loops_state_clear (LOOP_CLOSED_SSA);
2592     }
2593 
2594   free_dominance_info (CDI_DOMINATORS);
2595   return todo;
2596 }
2597 
2598 
2599 namespace {
2600 
2601 const pass_data pass_data_ccp =
2602 {
2603   GIMPLE_PASS, /* type */
2604   "ccp", /* name */
2605   OPTGROUP_NONE, /* optinfo_flags */
2606   TV_TREE_CCP, /* tv_id */
2607   ( PROP_cfg | PROP_ssa ), /* properties_required */
2608   0, /* properties_provided */
2609   0, /* properties_destroyed */
2610   0, /* todo_flags_start */
2611   TODO_update_address_taken, /* todo_flags_finish */
2612 };
2613 
2614 class pass_ccp : public gimple_opt_pass
2615 {
2616 public:
2617   pass_ccp (gcc::context *ctxt)
2618     : gimple_opt_pass (pass_data_ccp, ctxt), nonzero_p (false)
2619   {}
2620 
2621   /* opt_pass methods: */
2622   opt_pass * clone () { return new pass_ccp (m_ctxt); }
2623   void set_pass_param (unsigned int n, bool param)
2624     {
2625       gcc_assert (n == 0);
2626       nonzero_p = param;
2627     }
2628   virtual bool gate (function *) { return flag_tree_ccp != 0; }
2629   virtual unsigned int execute (function *) { return do_ssa_ccp (nonzero_p); }
2630 
2631  private:
2632   /* Determines whether the pass instance records nonzero bits.  */
2633   bool nonzero_p;
2634 }; // class pass_ccp
2635 
2636 } // anon namespace
2637 
2638 gimple_opt_pass *
2639 make_pass_ccp (gcc::context *ctxt)
2640 {
2641   return new pass_ccp (ctxt);
2642 }
2643 
2644 
2645 
2646 /* Try to optimize out __builtin_stack_restore.  Optimize it out
2647    if there is another __builtin_stack_restore in the same basic
2648    block and no calls or ASM_EXPRs are in between, or if this block's
2649    only outgoing edge is to EXIT_BLOCK and there are no calls or
2650    ASM_EXPRs after this __builtin_stack_restore.  */
2651 
2652 static tree
2653 optimize_stack_restore (gimple_stmt_iterator i)
2654 {
2655   tree callee;
2656   gimple *stmt;
2657 
2658   basic_block bb = gsi_bb (i);
2659   gimple *call = gsi_stmt (i);
2660 
2661   if (gimple_code (call) != GIMPLE_CALL
2662       || gimple_call_num_args (call) != 1
2663       || TREE_CODE (gimple_call_arg (call, 0)) != SSA_NAME
2664       || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call, 0))))
2665     return NULL_TREE;
2666 
2667   for (gsi_next (&i); !gsi_end_p (i); gsi_next (&i))
2668     {
2669       stmt = gsi_stmt (i);
2670       if (gimple_code (stmt) == GIMPLE_ASM)
2671 	return NULL_TREE;
2672       if (gimple_code (stmt) != GIMPLE_CALL)
2673 	continue;
2674 
2675       callee = gimple_call_fndecl (stmt);
2676       if (!callee
2677 	  || !fndecl_built_in_p (callee, BUILT_IN_NORMAL)
2678 	  /* All regular builtins are ok, just obviously not alloca.  */
2679 	  || ALLOCA_FUNCTION_CODE_P (DECL_FUNCTION_CODE (callee)))
2680 	return NULL_TREE;
2681 
2682       if (fndecl_built_in_p (callee, BUILT_IN_STACK_RESTORE))
2683 	goto second_stack_restore;
2684     }
2685 
2686   if (!gsi_end_p (i))
2687     return NULL_TREE;
2688 
2689   /* Allow one successor of the exit block, or zero successors.  */
2690   switch (EDGE_COUNT (bb->succs))
2691     {
2692     case 0:
2693       break;
2694     case 1:
2695       if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2696 	return NULL_TREE;
2697       break;
2698     default:
2699       return NULL_TREE;
2700     }
2701  second_stack_restore:
2702 
2703   /* If there's exactly one use, then zap the call to __builtin_stack_save.
2704      If there are multiple uses, then the last one should remove the call.
2705      In any case, whether the call to __builtin_stack_save can be removed
2706      or not is irrelevant to removing the call to __builtin_stack_restore.  */
2707   if (has_single_use (gimple_call_arg (call, 0)))
2708     {
2709       gimple *stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0));
2710       if (is_gimple_call (stack_save))
2711 	{
2712 	  callee = gimple_call_fndecl (stack_save);
2713 	  if (callee && fndecl_built_in_p (callee, BUILT_IN_STACK_SAVE))
2714 	    {
2715 	      gimple_stmt_iterator stack_save_gsi;
2716 	      tree rhs;
2717 
2718 	      stack_save_gsi = gsi_for_stmt (stack_save);
2719 	      rhs = build_int_cst (TREE_TYPE (gimple_call_arg (call, 0)), 0);
2720 	      update_call_from_tree (&stack_save_gsi, rhs);
2721 	    }
2722 	}
2723     }
2724 
2725   /* No effect, so the statement will be deleted.  */
2726   return integer_zero_node;
2727 }
2728 
2729 /* If va_list type is a simple pointer and nothing special is needed,
2730    optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2731    __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2732    pointer assignment.  */
2733 
2734 static tree
2735 optimize_stdarg_builtin (gimple *call)
2736 {
2737   tree callee, lhs, rhs, cfun_va_list;
2738   bool va_list_simple_ptr;
2739   location_t loc = gimple_location (call);
2740 
2741   callee = gimple_call_fndecl (call);
2742 
2743   cfun_va_list = targetm.fn_abi_va_list (callee);
2744   va_list_simple_ptr = POINTER_TYPE_P (cfun_va_list)
2745 		       && (TREE_TYPE (cfun_va_list) == void_type_node
2746 			   || TREE_TYPE (cfun_va_list) == char_type_node);
2747 
2748   switch (DECL_FUNCTION_CODE (callee))
2749     {
2750     case BUILT_IN_VA_START:
2751       if (!va_list_simple_ptr
2752 	  || targetm.expand_builtin_va_start != NULL
2753 	  || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG))
2754 	return NULL_TREE;
2755 
2756       if (gimple_call_num_args (call) != 2)
2757 	return NULL_TREE;
2758 
2759       lhs = gimple_call_arg (call, 0);
2760       if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2761 	  || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2762 	     != TYPE_MAIN_VARIANT (cfun_va_list))
2763 	return NULL_TREE;
2764 
2765       lhs = build_fold_indirect_ref_loc (loc, lhs);
2766       rhs = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_NEXT_ARG),
2767                              1, integer_zero_node);
2768       rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2769       return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2770 
2771     case BUILT_IN_VA_COPY:
2772       if (!va_list_simple_ptr)
2773 	return NULL_TREE;
2774 
2775       if (gimple_call_num_args (call) != 2)
2776 	return NULL_TREE;
2777 
2778       lhs = gimple_call_arg (call, 0);
2779       if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2780 	  || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2781 	     != TYPE_MAIN_VARIANT (cfun_va_list))
2782 	return NULL_TREE;
2783 
2784       lhs = build_fold_indirect_ref_loc (loc, lhs);
2785       rhs = gimple_call_arg (call, 1);
2786       if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs))
2787 	  != TYPE_MAIN_VARIANT (cfun_va_list))
2788 	return NULL_TREE;
2789 
2790       rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2791       return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2792 
2793     case BUILT_IN_VA_END:
2794       /* No effect, so the statement will be deleted.  */
2795       return integer_zero_node;
2796 
2797     default:
2798       gcc_unreachable ();
2799     }
2800 }
2801 
2802 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
2803    the incoming jumps.  Return true if at least one jump was changed.  */
2804 
2805 static bool
2806 optimize_unreachable (gimple_stmt_iterator i)
2807 {
2808   basic_block bb = gsi_bb (i);
2809   gimple_stmt_iterator gsi;
2810   gimple *stmt;
2811   edge_iterator ei;
2812   edge e;
2813   bool ret;
2814 
2815   if (flag_sanitize & SANITIZE_UNREACHABLE)
2816     return false;
2817 
2818   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2819     {
2820       stmt = gsi_stmt (gsi);
2821 
2822       if (is_gimple_debug (stmt))
2823        continue;
2824 
2825       if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
2826 	{
2827 	  /* Verify we do not need to preserve the label.  */
2828 	  if (FORCED_LABEL (gimple_label_label (label_stmt)))
2829 	    return false;
2830 
2831 	  continue;
2832 	}
2833 
2834       /* Only handle the case that __builtin_unreachable is the first statement
2835 	 in the block.  We rely on DCE to remove stmts without side-effects
2836 	 before __builtin_unreachable.  */
2837       if (gsi_stmt (gsi) != gsi_stmt (i))
2838         return false;
2839     }
2840 
2841   ret = false;
2842   FOR_EACH_EDGE (e, ei, bb->preds)
2843     {
2844       gsi = gsi_last_bb (e->src);
2845       if (gsi_end_p (gsi))
2846 	continue;
2847 
2848       stmt = gsi_stmt (gsi);
2849       if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
2850 	{
2851 	  if (e->flags & EDGE_TRUE_VALUE)
2852 	    gimple_cond_make_false (cond_stmt);
2853 	  else if (e->flags & EDGE_FALSE_VALUE)
2854 	    gimple_cond_make_true (cond_stmt);
2855 	  else
2856 	    gcc_unreachable ();
2857 	  update_stmt (cond_stmt);
2858 	}
2859       else
2860 	{
2861 	  /* Todo: handle other cases.  Note that unreachable switch case
2862 	     statements have already been removed.  */
2863 	  continue;
2864 	}
2865 
2866       ret = true;
2867     }
2868 
2869   return ret;
2870 }
2871 
2872 /* Optimize
2873      mask_2 = 1 << cnt_1;
2874      _4 = __atomic_fetch_or_* (ptr_6, mask_2, _3);
2875      _5 = _4 & mask_2;
2876    to
2877      _4 = ATOMIC_BIT_TEST_AND_SET (ptr_6, cnt_1, 0, _3);
2878      _5 = _4;
2879    If _5 is only used in _5 != 0 or _5 == 0 comparisons, 1
2880    is passed instead of 0, and the builtin just returns a zero
2881    or 1 value instead of the actual bit.
2882    Similarly for __sync_fetch_and_or_* (without the ", _3" part
2883    in there), and/or if mask_2 is a power of 2 constant.
2884    Similarly for xor instead of or, use ATOMIC_BIT_TEST_AND_COMPLEMENT
2885    in that case.  And similarly for and instead of or, except that
2886    the second argument to the builtin needs to be one's complement
2887    of the mask instead of mask.  */
2888 
2889 static void
2890 optimize_atomic_bit_test_and (gimple_stmt_iterator *gsip,
2891 			      enum internal_fn fn, bool has_model_arg,
2892 			      bool after)
2893 {
2894   gimple *call = gsi_stmt (*gsip);
2895   tree lhs = gimple_call_lhs (call);
2896   use_operand_p use_p;
2897   gimple *use_stmt;
2898   tree mask, bit;
2899   optab optab;
2900 
2901   if (!flag_inline_atomics
2902       || optimize_debug
2903       || !gimple_call_builtin_p (call, BUILT_IN_NORMAL)
2904       || !lhs
2905       || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
2906       || !single_imm_use (lhs, &use_p, &use_stmt)
2907       || !is_gimple_assign (use_stmt)
2908       || gimple_assign_rhs_code (use_stmt) != BIT_AND_EXPR
2909       || !gimple_vdef (call))
2910     return;
2911 
2912   switch (fn)
2913     {
2914     case IFN_ATOMIC_BIT_TEST_AND_SET:
2915       optab = atomic_bit_test_and_set_optab;
2916       break;
2917     case IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT:
2918       optab = atomic_bit_test_and_complement_optab;
2919       break;
2920     case IFN_ATOMIC_BIT_TEST_AND_RESET:
2921       optab = atomic_bit_test_and_reset_optab;
2922       break;
2923     default:
2924       return;
2925     }
2926 
2927   if (optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs))) == CODE_FOR_nothing)
2928     return;
2929 
2930   mask = gimple_call_arg (call, 1);
2931   tree use_lhs = gimple_assign_lhs (use_stmt);
2932   if (!use_lhs)
2933     return;
2934 
2935   if (TREE_CODE (mask) == INTEGER_CST)
2936     {
2937       if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
2938 	mask = const_unop (BIT_NOT_EXPR, TREE_TYPE (mask), mask);
2939       mask = fold_convert (TREE_TYPE (lhs), mask);
2940       int ibit = tree_log2 (mask);
2941       if (ibit < 0)
2942 	return;
2943       bit = build_int_cst (TREE_TYPE (lhs), ibit);
2944     }
2945   else if (TREE_CODE (mask) == SSA_NAME)
2946     {
2947       gimple *g = SSA_NAME_DEF_STMT (mask);
2948       if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
2949 	{
2950 	  if (!is_gimple_assign (g)
2951 	      || gimple_assign_rhs_code (g) != BIT_NOT_EXPR)
2952 	    return;
2953 	  mask = gimple_assign_rhs1 (g);
2954 	  if (TREE_CODE (mask) != SSA_NAME)
2955 	    return;
2956 	  g = SSA_NAME_DEF_STMT (mask);
2957 	}
2958       if (!is_gimple_assign (g)
2959 	  || gimple_assign_rhs_code (g) != LSHIFT_EXPR
2960 	  || !integer_onep (gimple_assign_rhs1 (g)))
2961 	return;
2962       bit = gimple_assign_rhs2 (g);
2963     }
2964   else
2965     return;
2966 
2967   if (gimple_assign_rhs1 (use_stmt) == lhs)
2968     {
2969       if (!operand_equal_p (gimple_assign_rhs2 (use_stmt), mask, 0))
2970 	return;
2971     }
2972   else if (gimple_assign_rhs2 (use_stmt) != lhs
2973 	   || !operand_equal_p (gimple_assign_rhs1 (use_stmt), mask, 0))
2974     return;
2975 
2976   bool use_bool = true;
2977   bool has_debug_uses = false;
2978   imm_use_iterator iter;
2979   gimple *g;
2980 
2981   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs))
2982     use_bool = false;
2983   FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
2984     {
2985       enum tree_code code = ERROR_MARK;
2986       tree op0 = NULL_TREE, op1 = NULL_TREE;
2987       if (is_gimple_debug (g))
2988 	{
2989 	  has_debug_uses = true;
2990 	  continue;
2991 	}
2992       else if (is_gimple_assign (g))
2993 	switch (gimple_assign_rhs_code (g))
2994 	  {
2995 	  case COND_EXPR:
2996 	    op1 = gimple_assign_rhs1 (g);
2997 	    code = TREE_CODE (op1);
2998 	    op0 = TREE_OPERAND (op1, 0);
2999 	    op1 = TREE_OPERAND (op1, 1);
3000 	    break;
3001 	  case EQ_EXPR:
3002 	  case NE_EXPR:
3003 	    code = gimple_assign_rhs_code (g);
3004 	    op0 = gimple_assign_rhs1 (g);
3005 	    op1 = gimple_assign_rhs2 (g);
3006 	    break;
3007 	  default:
3008 	    break;
3009 	  }
3010       else if (gimple_code (g) == GIMPLE_COND)
3011 	{
3012 	  code = gimple_cond_code (g);
3013 	  op0 = gimple_cond_lhs (g);
3014 	  op1 = gimple_cond_rhs (g);
3015 	}
3016 
3017       if ((code == EQ_EXPR || code == NE_EXPR)
3018 	  && op0 == use_lhs
3019 	  && integer_zerop (op1))
3020 	{
3021 	  use_operand_p use_p;
3022 	  int n = 0;
3023 	  FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3024 	    n++;
3025 	  if (n == 1)
3026 	    continue;
3027 	}
3028 
3029       use_bool = false;
3030       BREAK_FROM_IMM_USE_STMT (iter);
3031     }
3032 
3033   tree new_lhs = make_ssa_name (TREE_TYPE (lhs));
3034   tree flag = build_int_cst (TREE_TYPE (lhs), use_bool);
3035   if (has_model_arg)
3036     g = gimple_build_call_internal (fn, 4, gimple_call_arg (call, 0),
3037 				    bit, flag, gimple_call_arg (call, 2));
3038   else
3039     g = gimple_build_call_internal (fn, 3, gimple_call_arg (call, 0),
3040 				    bit, flag);
3041   gimple_call_set_lhs (g, new_lhs);
3042   gimple_set_location (g, gimple_location (call));
3043   gimple_move_vops (g, call);
3044   bool throws = stmt_can_throw_internal (cfun, call);
3045   gimple_call_set_nothrow (as_a <gcall *> (g),
3046 			   gimple_call_nothrow_p (as_a <gcall *> (call)));
3047   gimple_stmt_iterator gsi = *gsip;
3048   gsi_insert_after (&gsi, g, GSI_NEW_STMT);
3049   edge e = NULL;
3050   if (throws)
3051     {
3052       maybe_clean_or_replace_eh_stmt (call, g);
3053       if (after || (use_bool && has_debug_uses))
3054 	e = find_fallthru_edge (gsi_bb (gsi)->succs);
3055     }
3056   if (after)
3057     {
3058       /* The internal function returns the value of the specified bit
3059 	 before the atomic operation.  If we are interested in the value
3060 	 of the specified bit after the atomic operation (makes only sense
3061 	 for xor, otherwise the bit content is compile time known),
3062 	 we need to invert the bit.  */
3063       g = gimple_build_assign (make_ssa_name (TREE_TYPE (lhs)),
3064 			       BIT_XOR_EXPR, new_lhs,
3065 			       use_bool ? build_int_cst (TREE_TYPE (lhs), 1)
3066 					: mask);
3067       new_lhs = gimple_assign_lhs (g);
3068       if (throws)
3069 	{
3070 	  gsi_insert_on_edge_immediate (e, g);
3071 	  gsi = gsi_for_stmt (g);
3072 	}
3073       else
3074 	gsi_insert_after (&gsi, g, GSI_NEW_STMT);
3075     }
3076   if (use_bool && has_debug_uses)
3077     {
3078       tree temp = NULL_TREE;
3079       if (!throws || after || single_pred_p (e->dest))
3080 	{
3081 	  temp = make_node (DEBUG_EXPR_DECL);
3082 	  DECL_ARTIFICIAL (temp) = 1;
3083 	  TREE_TYPE (temp) = TREE_TYPE (lhs);
3084 	  SET_DECL_MODE (temp, TYPE_MODE (TREE_TYPE (lhs)));
3085 	  tree t = build2 (LSHIFT_EXPR, TREE_TYPE (lhs), new_lhs, bit);
3086 	  g = gimple_build_debug_bind (temp, t, g);
3087 	  if (throws && !after)
3088 	    {
3089 	      gsi = gsi_after_labels (e->dest);
3090 	      gsi_insert_before (&gsi, g, GSI_SAME_STMT);
3091 	    }
3092 	  else
3093 	    gsi_insert_after (&gsi, g, GSI_NEW_STMT);
3094 	}
3095       FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
3096 	if (is_gimple_debug (g))
3097 	  {
3098 	    use_operand_p use_p;
3099 	    if (temp == NULL_TREE)
3100 	      gimple_debug_bind_reset_value (g);
3101 	    else
3102 	      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3103 		SET_USE (use_p, temp);
3104 	    update_stmt (g);
3105 	  }
3106     }
3107   SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_lhs)
3108     = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs);
3109   replace_uses_by (use_lhs, new_lhs);
3110   gsi = gsi_for_stmt (use_stmt);
3111   gsi_remove (&gsi, true);
3112   release_defs (use_stmt);
3113   gsi_remove (gsip, true);
3114   release_ssa_name (lhs);
3115 }
3116 
3117 /* Optimize
3118    a = {};
3119    b = a;
3120    into
3121    a = {};
3122    b = {};
3123    Similarly for memset (&a, ..., sizeof (a)); instead of a = {};
3124    and/or memcpy (&b, &a, sizeof (a)); instead of b = a;  */
3125 
3126 static void
3127 optimize_memcpy (gimple_stmt_iterator *gsip, tree dest, tree src, tree len)
3128 {
3129   gimple *stmt = gsi_stmt (*gsip);
3130   if (gimple_has_volatile_ops (stmt))
3131     return;
3132 
3133   tree vuse = gimple_vuse (stmt);
3134   if (vuse == NULL)
3135     return;
3136 
3137   gimple *defstmt = SSA_NAME_DEF_STMT (vuse);
3138   tree src2 = NULL_TREE, len2 = NULL_TREE;
3139   poly_int64 offset, offset2;
3140   tree val = integer_zero_node;
3141   if (gimple_store_p (defstmt)
3142       && gimple_assign_single_p (defstmt)
3143       && TREE_CODE (gimple_assign_rhs1 (defstmt)) == CONSTRUCTOR
3144       && !gimple_clobber_p (defstmt))
3145     src2 = gimple_assign_lhs (defstmt);
3146   else if (gimple_call_builtin_p (defstmt, BUILT_IN_MEMSET)
3147 	   && TREE_CODE (gimple_call_arg (defstmt, 0)) == ADDR_EXPR
3148 	   && TREE_CODE (gimple_call_arg (defstmt, 1)) == INTEGER_CST)
3149     {
3150       src2 = TREE_OPERAND (gimple_call_arg (defstmt, 0), 0);
3151       len2 = gimple_call_arg (defstmt, 2);
3152       val = gimple_call_arg (defstmt, 1);
3153       /* For non-0 val, we'd have to transform stmt from assignment
3154 	 into memset (only if dest is addressable).  */
3155       if (!integer_zerop (val) && is_gimple_assign (stmt))
3156 	src2 = NULL_TREE;
3157     }
3158 
3159   if (src2 == NULL_TREE)
3160     return;
3161 
3162   if (len == NULL_TREE)
3163     len = (TREE_CODE (src) == COMPONENT_REF
3164 	   ? DECL_SIZE_UNIT (TREE_OPERAND (src, 1))
3165 	   : TYPE_SIZE_UNIT (TREE_TYPE (src)));
3166   if (len2 == NULL_TREE)
3167     len2 = (TREE_CODE (src2) == COMPONENT_REF
3168 	    ? DECL_SIZE_UNIT (TREE_OPERAND (src2, 1))
3169 	    : TYPE_SIZE_UNIT (TREE_TYPE (src2)));
3170   if (len == NULL_TREE
3171       || !poly_int_tree_p (len)
3172       || len2 == NULL_TREE
3173       || !poly_int_tree_p (len2))
3174     return;
3175 
3176   src = get_addr_base_and_unit_offset (src, &offset);
3177   src2 = get_addr_base_and_unit_offset (src2, &offset2);
3178   if (src == NULL_TREE
3179       || src2 == NULL_TREE
3180       || maybe_lt (offset, offset2))
3181     return;
3182 
3183   if (!operand_equal_p (src, src2, 0))
3184     return;
3185 
3186   /* [ src + offset2, src + offset2 + len2 - 1 ] is set to val.
3187      Make sure that
3188      [ src + offset, src + offset + len - 1 ] is a subset of that.  */
3189   if (maybe_gt (wi::to_poly_offset (len) + (offset - offset2),
3190 		wi::to_poly_offset (len2)))
3191     return;
3192 
3193   if (dump_file && (dump_flags & TDF_DETAILS))
3194     {
3195       fprintf (dump_file, "Simplified\n  ");
3196       print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3197       fprintf (dump_file, "after previous\n  ");
3198       print_gimple_stmt (dump_file, defstmt, 0, dump_flags);
3199     }
3200 
3201   /* For simplicity, don't change the kind of the stmt,
3202      turn dest = src; into dest = {}; and memcpy (&dest, &src, len);
3203      into memset (&dest, val, len);
3204      In theory we could change dest = src into memset if dest
3205      is addressable (maybe beneficial if val is not 0), or
3206      memcpy (&dest, &src, len) into dest = {} if len is the size
3207      of dest, dest isn't volatile.  */
3208   if (is_gimple_assign (stmt))
3209     {
3210       tree ctor = build_constructor (TREE_TYPE (dest), NULL);
3211       gimple_assign_set_rhs_from_tree (gsip, ctor);
3212       update_stmt (stmt);
3213     }
3214   else /* If stmt is memcpy, transform it into memset.  */
3215     {
3216       gcall *call = as_a <gcall *> (stmt);
3217       tree fndecl = builtin_decl_implicit (BUILT_IN_MEMSET);
3218       gimple_call_set_fndecl (call, fndecl);
3219       gimple_call_set_fntype (call, TREE_TYPE (fndecl));
3220       gimple_call_set_arg (call, 1, val);
3221       update_stmt (stmt);
3222     }
3223 
3224   if (dump_file && (dump_flags & TDF_DETAILS))
3225     {
3226       fprintf (dump_file, "into\n  ");
3227       print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3228     }
3229 }
3230 
3231 /* A simple pass that attempts to fold all builtin functions.  This pass
3232    is run after we've propagated as many constants as we can.  */
3233 
3234 namespace {
3235 
3236 const pass_data pass_data_fold_builtins =
3237 {
3238   GIMPLE_PASS, /* type */
3239   "fab", /* name */
3240   OPTGROUP_NONE, /* optinfo_flags */
3241   TV_NONE, /* tv_id */
3242   ( PROP_cfg | PROP_ssa ), /* properties_required */
3243   0, /* properties_provided */
3244   0, /* properties_destroyed */
3245   0, /* todo_flags_start */
3246   TODO_update_ssa, /* todo_flags_finish */
3247 };
3248 
3249 class pass_fold_builtins : public gimple_opt_pass
3250 {
3251 public:
3252   pass_fold_builtins (gcc::context *ctxt)
3253     : gimple_opt_pass (pass_data_fold_builtins, ctxt)
3254   {}
3255 
3256   /* opt_pass methods: */
3257   opt_pass * clone () { return new pass_fold_builtins (m_ctxt); }
3258   virtual unsigned int execute (function *);
3259 
3260 }; // class pass_fold_builtins
3261 
3262 unsigned int
3263 pass_fold_builtins::execute (function *fun)
3264 {
3265   bool cfg_changed = false;
3266   basic_block bb;
3267   unsigned int todoflags = 0;
3268 
3269   FOR_EACH_BB_FN (bb, fun)
3270     {
3271       gimple_stmt_iterator i;
3272       for (i = gsi_start_bb (bb); !gsi_end_p (i); )
3273 	{
3274 	  gimple *stmt, *old_stmt;
3275 	  tree callee;
3276 	  enum built_in_function fcode;
3277 
3278 	  stmt = gsi_stmt (i);
3279 
3280           if (gimple_code (stmt) != GIMPLE_CALL)
3281 	    {
3282 	      /* Remove all *ssaname_N ={v} {CLOBBER}; stmts,
3283 		 after the last GIMPLE DSE they aren't needed and might
3284 		 unnecessarily keep the SSA_NAMEs live.  */
3285 	      if (gimple_clobber_p (stmt))
3286 		{
3287 		  tree lhs = gimple_assign_lhs (stmt);
3288 		  if (TREE_CODE (lhs) == MEM_REF
3289 		      && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME)
3290 		    {
3291 		      unlink_stmt_vdef (stmt);
3292 		      gsi_remove (&i, true);
3293 		      release_defs (stmt);
3294 		      continue;
3295 		    }
3296 		}
3297 	      else if (gimple_assign_load_p (stmt) && gimple_store_p (stmt))
3298 		optimize_memcpy (&i, gimple_assign_lhs (stmt),
3299 				 gimple_assign_rhs1 (stmt), NULL_TREE);
3300 	      gsi_next (&i);
3301 	      continue;
3302 	    }
3303 
3304 	  callee = gimple_call_fndecl (stmt);
3305 	  if (!callee || !fndecl_built_in_p (callee, BUILT_IN_NORMAL))
3306 	    {
3307 	      gsi_next (&i);
3308 	      continue;
3309 	    }
3310 
3311 	  fcode = DECL_FUNCTION_CODE (callee);
3312 	  if (fold_stmt (&i))
3313 	    ;
3314 	  else
3315 	    {
3316 	      tree result = NULL_TREE;
3317 	      switch (DECL_FUNCTION_CODE (callee))
3318 		{
3319 		case BUILT_IN_CONSTANT_P:
3320 		  /* Resolve __builtin_constant_p.  If it hasn't been
3321 		     folded to integer_one_node by now, it's fairly
3322 		     certain that the value simply isn't constant.  */
3323 		  result = integer_zero_node;
3324 		  break;
3325 
3326 		case BUILT_IN_ASSUME_ALIGNED:
3327 		  /* Remove __builtin_assume_aligned.  */
3328 		  result = gimple_call_arg (stmt, 0);
3329 		  break;
3330 
3331 		case BUILT_IN_STACK_RESTORE:
3332 		  result = optimize_stack_restore (i);
3333 		  if (result)
3334 		    break;
3335 		  gsi_next (&i);
3336 		  continue;
3337 
3338 		case BUILT_IN_UNREACHABLE:
3339 		  if (optimize_unreachable (i))
3340 		    cfg_changed = true;
3341 		  break;
3342 
3343 		case BUILT_IN_ATOMIC_FETCH_OR_1:
3344 		case BUILT_IN_ATOMIC_FETCH_OR_2:
3345 		case BUILT_IN_ATOMIC_FETCH_OR_4:
3346 		case BUILT_IN_ATOMIC_FETCH_OR_8:
3347 		case BUILT_IN_ATOMIC_FETCH_OR_16:
3348 		  optimize_atomic_bit_test_and (&i,
3349 						IFN_ATOMIC_BIT_TEST_AND_SET,
3350 						true, false);
3351 		  break;
3352 		case BUILT_IN_SYNC_FETCH_AND_OR_1:
3353 		case BUILT_IN_SYNC_FETCH_AND_OR_2:
3354 		case BUILT_IN_SYNC_FETCH_AND_OR_4:
3355 		case BUILT_IN_SYNC_FETCH_AND_OR_8:
3356 		case BUILT_IN_SYNC_FETCH_AND_OR_16:
3357 		  optimize_atomic_bit_test_and (&i,
3358 						IFN_ATOMIC_BIT_TEST_AND_SET,
3359 						false, false);
3360 		  break;
3361 
3362 		case BUILT_IN_ATOMIC_FETCH_XOR_1:
3363 		case BUILT_IN_ATOMIC_FETCH_XOR_2:
3364 		case BUILT_IN_ATOMIC_FETCH_XOR_4:
3365 		case BUILT_IN_ATOMIC_FETCH_XOR_8:
3366 		case BUILT_IN_ATOMIC_FETCH_XOR_16:
3367 		  optimize_atomic_bit_test_and
3368 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, false);
3369 		  break;
3370 		case BUILT_IN_SYNC_FETCH_AND_XOR_1:
3371 		case BUILT_IN_SYNC_FETCH_AND_XOR_2:
3372 		case BUILT_IN_SYNC_FETCH_AND_XOR_4:
3373 		case BUILT_IN_SYNC_FETCH_AND_XOR_8:
3374 		case BUILT_IN_SYNC_FETCH_AND_XOR_16:
3375 		  optimize_atomic_bit_test_and
3376 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, false);
3377 		  break;
3378 
3379 		case BUILT_IN_ATOMIC_XOR_FETCH_1:
3380 		case BUILT_IN_ATOMIC_XOR_FETCH_2:
3381 		case BUILT_IN_ATOMIC_XOR_FETCH_4:
3382 		case BUILT_IN_ATOMIC_XOR_FETCH_8:
3383 		case BUILT_IN_ATOMIC_XOR_FETCH_16:
3384 		  optimize_atomic_bit_test_and
3385 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, true);
3386 		  break;
3387 		case BUILT_IN_SYNC_XOR_AND_FETCH_1:
3388 		case BUILT_IN_SYNC_XOR_AND_FETCH_2:
3389 		case BUILT_IN_SYNC_XOR_AND_FETCH_4:
3390 		case BUILT_IN_SYNC_XOR_AND_FETCH_8:
3391 		case BUILT_IN_SYNC_XOR_AND_FETCH_16:
3392 		  optimize_atomic_bit_test_and
3393 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, true);
3394 		  break;
3395 
3396 		case BUILT_IN_ATOMIC_FETCH_AND_1:
3397 		case BUILT_IN_ATOMIC_FETCH_AND_2:
3398 		case BUILT_IN_ATOMIC_FETCH_AND_4:
3399 		case BUILT_IN_ATOMIC_FETCH_AND_8:
3400 		case BUILT_IN_ATOMIC_FETCH_AND_16:
3401 		  optimize_atomic_bit_test_and (&i,
3402 						IFN_ATOMIC_BIT_TEST_AND_RESET,
3403 						true, false);
3404 		  break;
3405 		case BUILT_IN_SYNC_FETCH_AND_AND_1:
3406 		case BUILT_IN_SYNC_FETCH_AND_AND_2:
3407 		case BUILT_IN_SYNC_FETCH_AND_AND_4:
3408 		case BUILT_IN_SYNC_FETCH_AND_AND_8:
3409 		case BUILT_IN_SYNC_FETCH_AND_AND_16:
3410 		  optimize_atomic_bit_test_and (&i,
3411 						IFN_ATOMIC_BIT_TEST_AND_RESET,
3412 						false, false);
3413 		  break;
3414 
3415 		case BUILT_IN_MEMCPY:
3416 		  if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)
3417 		      && TREE_CODE (gimple_call_arg (stmt, 0)) == ADDR_EXPR
3418 		      && TREE_CODE (gimple_call_arg (stmt, 1)) == ADDR_EXPR
3419 		      && TREE_CODE (gimple_call_arg (stmt, 2)) == INTEGER_CST)
3420 		    {
3421 		      tree dest = TREE_OPERAND (gimple_call_arg (stmt, 0), 0);
3422 		      tree src = TREE_OPERAND (gimple_call_arg (stmt, 1), 0);
3423 		      tree len = gimple_call_arg (stmt, 2);
3424 		      optimize_memcpy (&i, dest, src, len);
3425 		    }
3426 		  break;
3427 
3428 		case BUILT_IN_VA_START:
3429 		case BUILT_IN_VA_END:
3430 		case BUILT_IN_VA_COPY:
3431 		  /* These shouldn't be folded before pass_stdarg.  */
3432 		  result = optimize_stdarg_builtin (stmt);
3433 		  break;
3434 
3435 		default:;
3436 		}
3437 
3438 	      if (!result)
3439 		{
3440 		  gsi_next (&i);
3441 		  continue;
3442 		}
3443 
3444 	      if (!update_call_from_tree (&i, result))
3445 		gimplify_and_update_call_from_tree (&i, result);
3446 	    }
3447 
3448 	  todoflags |= TODO_update_address_taken;
3449 
3450 	  if (dump_file && (dump_flags & TDF_DETAILS))
3451 	    {
3452 	      fprintf (dump_file, "Simplified\n  ");
3453 	      print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3454 	    }
3455 
3456           old_stmt = stmt;
3457 	  stmt = gsi_stmt (i);
3458 	  update_stmt (stmt);
3459 
3460 	  if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)
3461 	      && gimple_purge_dead_eh_edges (bb))
3462 	    cfg_changed = true;
3463 
3464 	  if (dump_file && (dump_flags & TDF_DETAILS))
3465 	    {
3466 	      fprintf (dump_file, "to\n  ");
3467 	      print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3468 	      fprintf (dump_file, "\n");
3469 	    }
3470 
3471 	  /* Retry the same statement if it changed into another
3472 	     builtin, there might be new opportunities now.  */
3473           if (gimple_code (stmt) != GIMPLE_CALL)
3474 	    {
3475 	      gsi_next (&i);
3476 	      continue;
3477 	    }
3478 	  callee = gimple_call_fndecl (stmt);
3479 	  if (!callee
3480 	      || !fndecl_built_in_p (callee, fcode))
3481 	    gsi_next (&i);
3482 	}
3483     }
3484 
3485   /* Delete unreachable blocks.  */
3486   if (cfg_changed)
3487     todoflags |= TODO_cleanup_cfg;
3488 
3489   return todoflags;
3490 }
3491 
3492 } // anon namespace
3493 
3494 gimple_opt_pass *
3495 make_pass_fold_builtins (gcc::context *ctxt)
3496 {
3497   return new pass_fold_builtins (ctxt);
3498 }
3499 
3500 /* A simple pass that emits some warnings post IPA.  */
3501 
3502 namespace {
3503 
3504 const pass_data pass_data_post_ipa_warn =
3505 {
3506   GIMPLE_PASS, /* type */
3507   "post_ipa_warn", /* name */
3508   OPTGROUP_NONE, /* optinfo_flags */
3509   TV_NONE, /* tv_id */
3510   ( PROP_cfg | PROP_ssa ), /* properties_required */
3511   0, /* properties_provided */
3512   0, /* properties_destroyed */
3513   0, /* todo_flags_start */
3514   0, /* todo_flags_finish */
3515 };
3516 
3517 class pass_post_ipa_warn : public gimple_opt_pass
3518 {
3519 public:
3520   pass_post_ipa_warn (gcc::context *ctxt)
3521     : gimple_opt_pass (pass_data_post_ipa_warn, ctxt)
3522   {}
3523 
3524   /* opt_pass methods: */
3525   opt_pass * clone () { return new pass_post_ipa_warn (m_ctxt); }
3526   virtual bool gate (function *) { return warn_nonnull != 0; }
3527   virtual unsigned int execute (function *);
3528 
3529 }; // class pass_fold_builtins
3530 
3531 unsigned int
3532 pass_post_ipa_warn::execute (function *fun)
3533 {
3534   basic_block bb;
3535 
3536   FOR_EACH_BB_FN (bb, fun)
3537     {
3538       gimple_stmt_iterator gsi;
3539       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3540 	{
3541 	  gimple *stmt = gsi_stmt (gsi);
3542 	  if (!is_gimple_call (stmt) || gimple_no_warning_p (stmt))
3543 	    continue;
3544 
3545 	  if (warn_nonnull)
3546 	    {
3547 	      bitmap nonnullargs
3548 		= get_nonnull_args (gimple_call_fntype (stmt));
3549 	      if (nonnullargs)
3550 		{
3551 		  for (unsigned i = 0; i < gimple_call_num_args (stmt); i++)
3552 		    {
3553 		      tree arg = gimple_call_arg (stmt, i);
3554 		      if (TREE_CODE (TREE_TYPE (arg)) != POINTER_TYPE)
3555 			continue;
3556 		      if (!integer_zerop (arg))
3557 			continue;
3558 		      if (!bitmap_empty_p (nonnullargs)
3559 			  && !bitmap_bit_p (nonnullargs, i))
3560 			continue;
3561 
3562 		      location_t loc = gimple_location (stmt);
3563 		      auto_diagnostic_group d;
3564 		      if (warning_at (loc, OPT_Wnonnull,
3565 				      "%Gargument %u null where non-null "
3566 				      "expected", stmt, i + 1))
3567 			{
3568 			  tree fndecl = gimple_call_fndecl (stmt);
3569 			  if (fndecl && DECL_IS_BUILTIN (fndecl))
3570 			    inform (loc, "in a call to built-in function %qD",
3571 				    fndecl);
3572 			  else if (fndecl)
3573 			    inform (DECL_SOURCE_LOCATION (fndecl),
3574 				    "in a call to function %qD declared here",
3575 				    fndecl);
3576 
3577 			}
3578 		    }
3579 		  BITMAP_FREE (nonnullargs);
3580 		}
3581 	    }
3582 	}
3583     }
3584   return 0;
3585 }
3586 
3587 } // anon namespace
3588 
3589 gimple_opt_pass *
3590 make_pass_post_ipa_warn (gcc::context *ctxt)
3591 {
3592   return new pass_post_ipa_warn (ctxt);
3593 }
3594 
3595 #if defined(__NetBSD__) && defined(NETBSD_NATIVE)
3596 /*
3597  * This is a big, ugly, temporary hack:
3598  *    http://gcc.gnu.org/bugzilla/show_bug.cgi?id=59958
3599  * To make sure we have configured all our targets correctly, mimic the
3600  * #ifdef cascade from src/lib/libc/stdlib/jemalloc.c here and compile
3601  * time assert that the value matches gcc's MALLOC_ABI_ALIGNMENT here.
3602  */
3603 
3604 #if defined(__hppa__)
3605 #define	JEMALLOC_TINY_MIN_2POW	4
3606 #elif defined(__alpha__) || defined(__amd64__) || defined(__sparc64__)	\
3607      ||	(defined(__arm__) && defined(__ARM_EABI__)) \
3608      || defined(__ia64__) || defined(__powerpc__) \
3609      || defined(__aarch64__) \
3610      || ((defined(__mips__) || defined(__riscv__)) && defined(_LP64))
3611 #define	JEMALLOC_TINY_MIN_2POW	3
3612 #endif
3613 
3614 #ifndef JEMALLOC_TINY_MIN_2POW
3615 #define	JEMALLOC_TINY_MIN_2POW	2
3616 #endif
3617 
3618 /* make sure we test the (native) 64bit variant for targets supporting -m32 */
3619 #undef	TARGET_64BIT
3620 #ifdef _LP64
3621 #define	TARGET_64BIT	1
3622 #else
3623 #ifdef __sh__
3624 #undef UNITS_PER_WORD
3625 #define	UNITS_PER_WORD	4	/* original definition varies depending on cpu */
3626 #endif
3627 #define	TARGET_64BIT	0
3628 #endif
3629 
3630 /* ARM has a non-constant MALLOC_ABI_ALIGNMENT since GCC 5.  */
3631 #if !defined(__arm__)
3632 #ifdef __CTASSERT
3633 __CTASSERT((8<<JEMALLOC_TINY_MIN_2POW) == MALLOC_ABI_ALIGNMENT);
3634 #else
3635 #error compiling on an older NetBSD version?
3636 #endif
3637 #endif
3638 
3639 #endif
3640