xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-ssa-ccp.c (revision cef8759bd76c1b621f8eab8faa6f208faabc2e15)
1 /* Conditional constant propagation pass for the GNU compiler.
2    Copyright (C) 2000-2017 Free Software Foundation, Inc.
3    Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4    Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
11 later version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 /* Conditional constant propagation (CCP) is based on the SSA
23    propagation engine (tree-ssa-propagate.c).  Constant assignments of
24    the form VAR = CST are propagated from the assignments into uses of
25    VAR, which in turn may generate new constants.  The simulation uses
26    a four level lattice to keep track of constant values associated
27    with SSA names.  Given an SSA name V_i, it may take one of the
28    following values:
29 
30 	UNINITIALIZED   ->  the initial state of the value.  This value
31 			    is replaced with a correct initial value
32 			    the first time the value is used, so the
33 			    rest of the pass does not need to care about
34 			    it.  Using this value simplifies initialization
35 			    of the pass, and prevents us from needlessly
36 			    scanning statements that are never reached.
37 
38 	UNDEFINED	->  V_i is a local variable whose definition
39 			    has not been processed yet.  Therefore we
40 			    don't yet know if its value is a constant
41 			    or not.
42 
43 	CONSTANT	->  V_i has been found to hold a constant
44 			    value C.
45 
46 	VARYING		->  V_i cannot take a constant value, or if it
47 			    does, it is not possible to determine it
48 			    at compile time.
49 
50    The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
51 
52    1- In ccp_visit_stmt, we are interested in assignments whose RHS
53       evaluates into a constant and conditional jumps whose predicate
54       evaluates into a boolean true or false.  When an assignment of
55       the form V_i = CONST is found, V_i's lattice value is set to
56       CONSTANT and CONST is associated with it.  This causes the
57       propagation engine to add all the SSA edges coming out the
58       assignment into the worklists, so that statements that use V_i
59       can be visited.
60 
61       If the statement is a conditional with a constant predicate, we
62       mark the outgoing edges as executable or not executable
63       depending on the predicate's value.  This is then used when
64       visiting PHI nodes to know when a PHI argument can be ignored.
65 
66 
67    2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68       same constant C, then the LHS of the PHI is set to C.  This
69       evaluation is known as the "meet operation".  Since one of the
70       goals of this evaluation is to optimistically return constant
71       values as often as possible, it uses two main short cuts:
72 
73       - If an argument is flowing in through a non-executable edge, it
74 	is ignored.  This is useful in cases like this:
75 
76 			if (PRED)
77 			  a_9 = 3;
78 			else
79 			  a_10 = 100;
80 			a_11 = PHI (a_9, a_10)
81 
82 	If PRED is known to always evaluate to false, then we can
83 	assume that a_11 will always take its value from a_10, meaning
84 	that instead of consider it VARYING (a_9 and a_10 have
85 	different values), we can consider it CONSTANT 100.
86 
87       - If an argument has an UNDEFINED value, then it does not affect
88 	the outcome of the meet operation.  If a variable V_i has an
89 	UNDEFINED value, it means that either its defining statement
90 	hasn't been visited yet or V_i has no defining statement, in
91 	which case the original symbol 'V' is being used
92 	uninitialized.  Since 'V' is a local variable, the compiler
93 	may assume any initial value for it.
94 
95 
96    After propagation, every variable V_i that ends up with a lattice
97    value of CONSTANT will have the associated constant value in the
98    array CONST_VAL[i].VALUE.  That is fed into substitute_and_fold for
99    final substitution and folding.
100 
101    This algorithm uses wide-ints at the max precision of the target.
102    This means that, with one uninteresting exception, variables with
103    UNSIGNED types never go to VARYING because the bits above the
104    precision of the type of the variable are always zero.  The
105    uninteresting case is a variable of UNSIGNED type that has the
106    maximum precision of the target.  Such variables can go to VARYING,
107    but this causes no loss of infomation since these variables will
108    never be extended.
109 
110    References:
111 
112      Constant propagation with conditional branches,
113      Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
114 
115      Building an Optimizing Compiler,
116      Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
117 
118      Advanced Compiler Design and Implementation,
119      Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6  */
120 
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "backend.h"
125 #include "target.h"
126 #include "tree.h"
127 #include "gimple.h"
128 #include "tree-pass.h"
129 #include "ssa.h"
130 #include "gimple-pretty-print.h"
131 #include "fold-const.h"
132 #include "gimple-fold.h"
133 #include "tree-eh.h"
134 #include "gimplify.h"
135 #include "gimple-iterator.h"
136 #include "tree-cfg.h"
137 #include "tree-ssa-propagate.h"
138 #include "dbgcnt.h"
139 #include "params.h"
140 #include "builtins.h"
141 #include "tree-chkp.h"
142 #include "cfgloop.h"
143 #include "stor-layout.h"
144 #include "optabs-query.h"
145 #include "tree-ssa-ccp.h"
146 #include "tree-dfa.h"
147 #include "diagnostic-core.h"
148 
149 /* Possible lattice values.  */
150 typedef enum
151 {
152   UNINITIALIZED,
153   UNDEFINED,
154   CONSTANT,
155   VARYING
156 } ccp_lattice_t;
157 
158 struct ccp_prop_value_t {
159     /* Lattice value.  */
160     ccp_lattice_t lattice_val;
161 
162     /* Propagated value.  */
163     tree value;
164 
165     /* Mask that applies to the propagated value during CCP.  For X
166        with a CONSTANT lattice value X & ~mask == value & ~mask.  The
167        zero bits in the mask cover constant values.  The ones mean no
168        information.  */
169     widest_int mask;
170 };
171 
172 /* Array of propagated constant values.  After propagation,
173    CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I).  If
174    the constant is held in an SSA name representing a memory store
175    (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
176    memory reference used to store (i.e., the LHS of the assignment
177    doing the store).  */
178 static ccp_prop_value_t *const_val;
179 static unsigned n_const_val;
180 
181 static void canonicalize_value (ccp_prop_value_t *);
182 static bool ccp_fold_stmt (gimple_stmt_iterator *);
183 static void ccp_lattice_meet (ccp_prop_value_t *, ccp_prop_value_t *);
184 
185 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX.  */
186 
187 static void
188 dump_lattice_value (FILE *outf, const char *prefix, ccp_prop_value_t val)
189 {
190   switch (val.lattice_val)
191     {
192     case UNINITIALIZED:
193       fprintf (outf, "%sUNINITIALIZED", prefix);
194       break;
195     case UNDEFINED:
196       fprintf (outf, "%sUNDEFINED", prefix);
197       break;
198     case VARYING:
199       fprintf (outf, "%sVARYING", prefix);
200       break;
201     case CONSTANT:
202       if (TREE_CODE (val.value) != INTEGER_CST
203 	  || val.mask == 0)
204 	{
205 	  fprintf (outf, "%sCONSTANT ", prefix);
206 	  print_generic_expr (outf, val.value, dump_flags);
207 	}
208       else
209 	{
210 	  widest_int cval = wi::bit_and_not (wi::to_widest (val.value),
211 					     val.mask);
212 	  fprintf (outf, "%sCONSTANT ", prefix);
213 	  print_hex (cval, outf);
214 	  fprintf (outf, " (");
215 	  print_hex (val.mask, outf);
216 	  fprintf (outf, ")");
217 	}
218       break;
219     default:
220       gcc_unreachable ();
221     }
222 }
223 
224 
225 /* Print lattice value VAL to stderr.  */
226 
227 void debug_lattice_value (ccp_prop_value_t val);
228 
229 DEBUG_FUNCTION void
230 debug_lattice_value (ccp_prop_value_t val)
231 {
232   dump_lattice_value (stderr, "", val);
233   fprintf (stderr, "\n");
234 }
235 
236 /* Extend NONZERO_BITS to a full mask, based on sgn.  */
237 
238 static widest_int
239 extend_mask (const wide_int &nonzero_bits, signop sgn)
240 {
241   return widest_int::from (nonzero_bits, sgn);
242 }
243 
244 /* Compute a default value for variable VAR and store it in the
245    CONST_VAL array.  The following rules are used to get default
246    values:
247 
248    1- Global and static variables that are declared constant are
249       considered CONSTANT.
250 
251    2- Any other value is considered UNDEFINED.  This is useful when
252       considering PHI nodes.  PHI arguments that are undefined do not
253       change the constant value of the PHI node, which allows for more
254       constants to be propagated.
255 
256    3- Variables defined by statements other than assignments and PHI
257       nodes are considered VARYING.
258 
259    4- Initial values of variables that are not GIMPLE registers are
260       considered VARYING.  */
261 
262 static ccp_prop_value_t
263 get_default_value (tree var)
264 {
265   ccp_prop_value_t val = { UNINITIALIZED, NULL_TREE, 0 };
266   gimple *stmt;
267 
268   stmt = SSA_NAME_DEF_STMT (var);
269 
270   if (gimple_nop_p (stmt))
271     {
272       /* Variables defined by an empty statement are those used
273 	 before being initialized.  If VAR is a local variable, we
274 	 can assume initially that it is UNDEFINED, otherwise we must
275 	 consider it VARYING.  */
276       if (!virtual_operand_p (var)
277 	  && SSA_NAME_VAR (var)
278 	  && TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL)
279 	val.lattice_val = UNDEFINED;
280       else
281 	{
282 	  val.lattice_val = VARYING;
283 	  val.mask = -1;
284 	  if (flag_tree_bit_ccp)
285 	    {
286 	      wide_int nonzero_bits = get_nonzero_bits (var);
287 	      if (nonzero_bits != -1)
288 		{
289 		  val.lattice_val = CONSTANT;
290 		  val.value = build_zero_cst (TREE_TYPE (var));
291 		  val.mask = extend_mask (nonzero_bits, TYPE_SIGN (TREE_TYPE (var)));
292 		}
293 	    }
294 	}
295     }
296   else if (is_gimple_assign (stmt))
297     {
298       tree cst;
299       if (gimple_assign_single_p (stmt)
300 	  && DECL_P (gimple_assign_rhs1 (stmt))
301 	  && (cst = get_symbol_constant_value (gimple_assign_rhs1 (stmt))))
302 	{
303 	  val.lattice_val = CONSTANT;
304 	  val.value = cst;
305 	}
306       else
307 	{
308 	  /* Any other variable defined by an assignment is considered
309 	     UNDEFINED.  */
310 	  val.lattice_val = UNDEFINED;
311 	}
312     }
313   else if ((is_gimple_call (stmt)
314 	    && gimple_call_lhs (stmt) != NULL_TREE)
315 	   || gimple_code (stmt) == GIMPLE_PHI)
316     {
317       /* A variable defined by a call or a PHI node is considered
318 	 UNDEFINED.  */
319       val.lattice_val = UNDEFINED;
320     }
321   else
322     {
323       /* Otherwise, VAR will never take on a constant value.  */
324       val.lattice_val = VARYING;
325       val.mask = -1;
326     }
327 
328   return val;
329 }
330 
331 
332 /* Get the constant value associated with variable VAR.  */
333 
334 static inline ccp_prop_value_t *
335 get_value (tree var)
336 {
337   ccp_prop_value_t *val;
338 
339   if (const_val == NULL
340       || SSA_NAME_VERSION (var) >= n_const_val)
341     return NULL;
342 
343   val = &const_val[SSA_NAME_VERSION (var)];
344   if (val->lattice_val == UNINITIALIZED)
345     *val = get_default_value (var);
346 
347   canonicalize_value (val);
348 
349   return val;
350 }
351 
352 /* Return the constant tree value associated with VAR.  */
353 
354 static inline tree
355 get_constant_value (tree var)
356 {
357   ccp_prop_value_t *val;
358   if (TREE_CODE (var) != SSA_NAME)
359     {
360       if (is_gimple_min_invariant (var))
361         return var;
362       return NULL_TREE;
363     }
364   val = get_value (var);
365   if (val
366       && val->lattice_val == CONSTANT
367       && (TREE_CODE (val->value) != INTEGER_CST
368 	  || val->mask == 0))
369     return val->value;
370   return NULL_TREE;
371 }
372 
373 /* Sets the value associated with VAR to VARYING.  */
374 
375 static inline void
376 set_value_varying (tree var)
377 {
378   ccp_prop_value_t *val = &const_val[SSA_NAME_VERSION (var)];
379 
380   val->lattice_val = VARYING;
381   val->value = NULL_TREE;
382   val->mask = -1;
383 }
384 
385 /* For integer constants, make sure to drop TREE_OVERFLOW.  */
386 
387 static void
388 canonicalize_value (ccp_prop_value_t *val)
389 {
390   if (val->lattice_val != CONSTANT)
391     return;
392 
393   if (TREE_OVERFLOW_P (val->value))
394     val->value = drop_tree_overflow (val->value);
395 }
396 
397 /* Return whether the lattice transition is valid.  */
398 
399 static bool
400 valid_lattice_transition (ccp_prop_value_t old_val, ccp_prop_value_t new_val)
401 {
402   /* Lattice transitions must always be monotonically increasing in
403      value.  */
404   if (old_val.lattice_val < new_val.lattice_val)
405     return true;
406 
407   if (old_val.lattice_val != new_val.lattice_val)
408     return false;
409 
410   if (!old_val.value && !new_val.value)
411     return true;
412 
413   /* Now both lattice values are CONSTANT.  */
414 
415   /* Allow arbitrary copy changes as we might look through PHI <a_1, ...>
416      when only a single copy edge is executable.  */
417   if (TREE_CODE (old_val.value) == SSA_NAME
418       && TREE_CODE (new_val.value) == SSA_NAME)
419     return true;
420 
421   /* Allow transitioning from a constant to a copy.  */
422   if (is_gimple_min_invariant (old_val.value)
423       && TREE_CODE (new_val.value) == SSA_NAME)
424     return true;
425 
426   /* Allow transitioning from PHI <&x, not executable> == &x
427      to PHI <&x, &y> == common alignment.  */
428   if (TREE_CODE (old_val.value) != INTEGER_CST
429       && TREE_CODE (new_val.value) == INTEGER_CST)
430     return true;
431 
432   /* Bit-lattices have to agree in the still valid bits.  */
433   if (TREE_CODE (old_val.value) == INTEGER_CST
434       && TREE_CODE (new_val.value) == INTEGER_CST)
435     return (wi::bit_and_not (wi::to_widest (old_val.value), new_val.mask)
436 	    == wi::bit_and_not (wi::to_widest (new_val.value), new_val.mask));
437 
438   /* Otherwise constant values have to agree.  */
439   if (operand_equal_p (old_val.value, new_val.value, 0))
440     return true;
441 
442   /* At least the kinds and types should agree now.  */
443   if (TREE_CODE (old_val.value) != TREE_CODE (new_val.value)
444       || !types_compatible_p (TREE_TYPE (old_val.value),
445 			      TREE_TYPE (new_val.value)))
446     return false;
447 
448   /* For floats and !HONOR_NANS allow transitions from (partial) NaN
449      to non-NaN.  */
450   tree type = TREE_TYPE (new_val.value);
451   if (SCALAR_FLOAT_TYPE_P (type)
452       && !HONOR_NANS (type))
453     {
454       if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val.value)))
455 	return true;
456     }
457   else if (VECTOR_FLOAT_TYPE_P (type)
458 	   && !HONOR_NANS (type))
459     {
460       for (unsigned i = 0; i < VECTOR_CST_NELTS (old_val.value); ++i)
461 	if (!REAL_VALUE_ISNAN
462 	       (TREE_REAL_CST (VECTOR_CST_ELT (old_val.value, i)))
463 	    && !operand_equal_p (VECTOR_CST_ELT (old_val.value, i),
464 				 VECTOR_CST_ELT (new_val.value, i), 0))
465 	  return false;
466       return true;
467     }
468   else if (COMPLEX_FLOAT_TYPE_P (type)
469 	   && !HONOR_NANS (type))
470     {
471       if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val.value)))
472 	  && !operand_equal_p (TREE_REALPART (old_val.value),
473 			       TREE_REALPART (new_val.value), 0))
474 	return false;
475       if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val.value)))
476 	  && !operand_equal_p (TREE_IMAGPART (old_val.value),
477 			       TREE_IMAGPART (new_val.value), 0))
478 	return false;
479       return true;
480     }
481   return false;
482 }
483 
484 /* Set the value for variable VAR to NEW_VAL.  Return true if the new
485    value is different from VAR's previous value.  */
486 
487 static bool
488 set_lattice_value (tree var, ccp_prop_value_t *new_val)
489 {
490   /* We can deal with old UNINITIALIZED values just fine here.  */
491   ccp_prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)];
492 
493   canonicalize_value (new_val);
494 
495   /* We have to be careful to not go up the bitwise lattice
496      represented by the mask.  Instead of dropping to VARYING
497      use the meet operator to retain a conservative value.
498      Missed optimizations like PR65851 makes this necessary.
499      It also ensures we converge to a stable lattice solution.  */
500   if (old_val->lattice_val != UNINITIALIZED)
501     ccp_lattice_meet (new_val, old_val);
502 
503   gcc_checking_assert (valid_lattice_transition (*old_val, *new_val));
504 
505   /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
506      caller that this was a non-transition.  */
507   if (old_val->lattice_val != new_val->lattice_val
508       || (new_val->lattice_val == CONSTANT
509 	  && (TREE_CODE (new_val->value) != TREE_CODE (old_val->value)
510 	      || (TREE_CODE (new_val->value) == INTEGER_CST
511 		  && (new_val->mask != old_val->mask
512 		      || (wi::bit_and_not (wi::to_widest (old_val->value),
513 					   new_val->mask)
514 			  != wi::bit_and_not (wi::to_widest (new_val->value),
515 					      new_val->mask))))
516 	      || (TREE_CODE (new_val->value) != INTEGER_CST
517 		  && !operand_equal_p (new_val->value, old_val->value, 0)))))
518     {
519       /* ???  We would like to delay creation of INTEGER_CSTs from
520 	 partially constants here.  */
521 
522       if (dump_file && (dump_flags & TDF_DETAILS))
523 	{
524 	  dump_lattice_value (dump_file, "Lattice value changed to ", *new_val);
525 	  fprintf (dump_file, ".  Adding SSA edges to worklist.\n");
526 	}
527 
528       *old_val = *new_val;
529 
530       gcc_assert (new_val->lattice_val != UNINITIALIZED);
531       return true;
532     }
533 
534   return false;
535 }
536 
537 static ccp_prop_value_t get_value_for_expr (tree, bool);
538 static ccp_prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
539 void bit_value_binop (enum tree_code, signop, int, widest_int *, widest_int *,
540 		      signop, int, const widest_int &, const widest_int &,
541 		      signop, int, const widest_int &, const widest_int &);
542 
543 /* Return a widest_int that can be used for bitwise simplifications
544    from VAL.  */
545 
546 static widest_int
547 value_to_wide_int (ccp_prop_value_t val)
548 {
549   if (val.value
550       && TREE_CODE (val.value) == INTEGER_CST)
551     return wi::to_widest (val.value);
552 
553   return 0;
554 }
555 
556 /* Return the value for the address expression EXPR based on alignment
557    information.  */
558 
559 static ccp_prop_value_t
560 get_value_from_alignment (tree expr)
561 {
562   tree type = TREE_TYPE (expr);
563   ccp_prop_value_t val;
564   unsigned HOST_WIDE_INT bitpos;
565   unsigned int align;
566 
567   gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
568 
569   get_pointer_alignment_1 (expr, &align, &bitpos);
570   val.mask = (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
571 	      ? wi::mask <widest_int> (TYPE_PRECISION (type), false)
572 	      : -1).and_not (align / BITS_PER_UNIT - 1);
573   val.lattice_val
574     = wi::sext (val.mask, TYPE_PRECISION (type)) == -1 ? VARYING : CONSTANT;
575   if (val.lattice_val == CONSTANT)
576     val.value = build_int_cstu (type, bitpos / BITS_PER_UNIT);
577   else
578     val.value = NULL_TREE;
579 
580   return val;
581 }
582 
583 /* Return the value for the tree operand EXPR.  If FOR_BITS_P is true
584    return constant bits extracted from alignment information for
585    invariant addresses.  */
586 
587 static ccp_prop_value_t
588 get_value_for_expr (tree expr, bool for_bits_p)
589 {
590   ccp_prop_value_t val;
591 
592   if (TREE_CODE (expr) == SSA_NAME)
593     {
594       ccp_prop_value_t *val_ = get_value (expr);
595       if (val_)
596 	val = *val_;
597       else
598 	{
599 	  val.lattice_val = VARYING;
600 	  val.value = NULL_TREE;
601 	  val.mask = -1;
602 	}
603       if (for_bits_p
604 	  && val.lattice_val == CONSTANT
605 	  && TREE_CODE (val.value) == ADDR_EXPR)
606 	val = get_value_from_alignment (val.value);
607       /* Fall back to a copy value.  */
608       if (!for_bits_p
609 	  && val.lattice_val == VARYING
610 	  && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr))
611 	{
612 	  val.lattice_val = CONSTANT;
613 	  val.value = expr;
614 	  val.mask = -1;
615 	}
616     }
617   else if (is_gimple_min_invariant (expr)
618 	   && (!for_bits_p || TREE_CODE (expr) != ADDR_EXPR))
619     {
620       val.lattice_val = CONSTANT;
621       val.value = expr;
622       val.mask = 0;
623       canonicalize_value (&val);
624     }
625   else if (TREE_CODE (expr) == ADDR_EXPR)
626     val = get_value_from_alignment (expr);
627   else
628     {
629       val.lattice_val = VARYING;
630       val.mask = -1;
631       val.value = NULL_TREE;
632     }
633 
634   if (val.lattice_val == VARYING
635       && TYPE_UNSIGNED (TREE_TYPE (expr)))
636     val.mask = wi::zext (val.mask, TYPE_PRECISION (TREE_TYPE (expr)));
637 
638   return val;
639 }
640 
641 /* Return the likely CCP lattice value for STMT.
642 
643    If STMT has no operands, then return CONSTANT.
644 
645    Else if undefinedness of operands of STMT cause its value to be
646    undefined, then return UNDEFINED.
647 
648    Else if any operands of STMT are constants, then return CONSTANT.
649 
650    Else return VARYING.  */
651 
652 static ccp_lattice_t
653 likely_value (gimple *stmt)
654 {
655   bool has_constant_operand, has_undefined_operand, all_undefined_operands;
656   bool has_nsa_operand;
657   tree use;
658   ssa_op_iter iter;
659   unsigned i;
660 
661   enum gimple_code code = gimple_code (stmt);
662 
663   /* This function appears to be called only for assignments, calls,
664      conditionals, and switches, due to the logic in visit_stmt.  */
665   gcc_assert (code == GIMPLE_ASSIGN
666               || code == GIMPLE_CALL
667               || code == GIMPLE_COND
668               || code == GIMPLE_SWITCH);
669 
670   /* If the statement has volatile operands, it won't fold to a
671      constant value.  */
672   if (gimple_has_volatile_ops (stmt))
673     return VARYING;
674 
675   /* Arrive here for more complex cases.  */
676   has_constant_operand = false;
677   has_undefined_operand = false;
678   all_undefined_operands = true;
679   has_nsa_operand = false;
680   FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
681     {
682       ccp_prop_value_t *val = get_value (use);
683 
684       if (val && val->lattice_val == UNDEFINED)
685 	has_undefined_operand = true;
686       else
687 	all_undefined_operands = false;
688 
689       if (val && val->lattice_val == CONSTANT)
690 	has_constant_operand = true;
691 
692       if (SSA_NAME_IS_DEFAULT_DEF (use)
693 	  || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use)))
694 	has_nsa_operand = true;
695     }
696 
697   /* There may be constants in regular rhs operands.  For calls we
698      have to ignore lhs, fndecl and static chain, otherwise only
699      the lhs.  */
700   for (i = (is_gimple_call (stmt) ? 2 : 0) + gimple_has_lhs (stmt);
701        i < gimple_num_ops (stmt); ++i)
702     {
703       tree op = gimple_op (stmt, i);
704       if (!op || TREE_CODE (op) == SSA_NAME)
705 	continue;
706       if (is_gimple_min_invariant (op))
707 	has_constant_operand = true;
708     }
709 
710   if (has_constant_operand)
711     all_undefined_operands = false;
712 
713   if (has_undefined_operand
714       && code == GIMPLE_CALL
715       && gimple_call_internal_p (stmt))
716     switch (gimple_call_internal_fn (stmt))
717       {
718 	/* These 3 builtins use the first argument just as a magic
719 	   way how to find out a decl uid.  */
720       case IFN_GOMP_SIMD_LANE:
721       case IFN_GOMP_SIMD_VF:
722       case IFN_GOMP_SIMD_LAST_LANE:
723 	has_undefined_operand = false;
724 	break;
725       default:
726 	break;
727       }
728 
729   /* If the operation combines operands like COMPLEX_EXPR make sure to
730      not mark the result UNDEFINED if only one part of the result is
731      undefined.  */
732   if (has_undefined_operand && all_undefined_operands)
733     return UNDEFINED;
734   else if (code == GIMPLE_ASSIGN && has_undefined_operand)
735     {
736       switch (gimple_assign_rhs_code (stmt))
737 	{
738 	/* Unary operators are handled with all_undefined_operands.  */
739 	case PLUS_EXPR:
740 	case MINUS_EXPR:
741 	case POINTER_PLUS_EXPR:
742 	case BIT_XOR_EXPR:
743 	  /* Not MIN_EXPR, MAX_EXPR.  One VARYING operand may be selected.
744 	     Not bitwise operators, one VARYING operand may specify the
745 	     result completely.
746 	     Not logical operators for the same reason, apart from XOR.
747 	     Not COMPLEX_EXPR as one VARYING operand makes the result partly
748 	     not UNDEFINED.  Not *DIV_EXPR, comparisons and shifts because
749 	     the undefined operand may be promoted.  */
750 	  return UNDEFINED;
751 
752 	case ADDR_EXPR:
753 	  /* If any part of an address is UNDEFINED, like the index
754 	     of an ARRAY_EXPR, then treat the result as UNDEFINED.  */
755 	  return UNDEFINED;
756 
757 	default:
758 	  ;
759 	}
760     }
761   /* If there was an UNDEFINED operand but the result may be not UNDEFINED
762      fall back to CONSTANT.  During iteration UNDEFINED may still drop
763      to CONSTANT.  */
764   if (has_undefined_operand)
765     return CONSTANT;
766 
767   /* We do not consider virtual operands here -- load from read-only
768      memory may have only VARYING virtual operands, but still be
769      constant.  Also we can combine the stmt with definitions from
770      operands whose definitions are not simulated again.  */
771   if (has_constant_operand
772       || has_nsa_operand
773       || gimple_references_memory_p (stmt))
774     return CONSTANT;
775 
776   return VARYING;
777 }
778 
779 /* Returns true if STMT cannot be constant.  */
780 
781 static bool
782 surely_varying_stmt_p (gimple *stmt)
783 {
784   /* If the statement has operands that we cannot handle, it cannot be
785      constant.  */
786   if (gimple_has_volatile_ops (stmt))
787     return true;
788 
789   /* If it is a call and does not return a value or is not a
790      builtin and not an indirect call or a call to function with
791      assume_aligned/alloc_align attribute, it is varying.  */
792   if (is_gimple_call (stmt))
793     {
794       tree fndecl, fntype = gimple_call_fntype (stmt);
795       if (!gimple_call_lhs (stmt)
796 	  || ((fndecl = gimple_call_fndecl (stmt)) != NULL_TREE
797 	      && !DECL_BUILT_IN (fndecl)
798 	      && !lookup_attribute ("assume_aligned",
799 				    TYPE_ATTRIBUTES (fntype))
800 	      && !lookup_attribute ("alloc_align",
801 				    TYPE_ATTRIBUTES (fntype))))
802 	return true;
803     }
804 
805   /* Any other store operation is not interesting.  */
806   else if (gimple_vdef (stmt))
807     return true;
808 
809   /* Anything other than assignments and conditional jumps are not
810      interesting for CCP.  */
811   if (gimple_code (stmt) != GIMPLE_ASSIGN
812       && gimple_code (stmt) != GIMPLE_COND
813       && gimple_code (stmt) != GIMPLE_SWITCH
814       && gimple_code (stmt) != GIMPLE_CALL)
815     return true;
816 
817   return false;
818 }
819 
820 /* Initialize local data structures for CCP.  */
821 
822 static void
823 ccp_initialize (void)
824 {
825   basic_block bb;
826 
827   n_const_val = num_ssa_names;
828   const_val = XCNEWVEC (ccp_prop_value_t, n_const_val);
829 
830   /* Initialize simulation flags for PHI nodes and statements.  */
831   FOR_EACH_BB_FN (bb, cfun)
832     {
833       gimple_stmt_iterator i;
834 
835       for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
836         {
837 	  gimple *stmt = gsi_stmt (i);
838 	  bool is_varying;
839 
840 	  /* If the statement is a control insn, then we do not
841 	     want to avoid simulating the statement once.  Failure
842 	     to do so means that those edges will never get added.  */
843 	  if (stmt_ends_bb_p (stmt))
844 	    is_varying = false;
845 	  else
846 	    is_varying = surely_varying_stmt_p (stmt);
847 
848 	  if (is_varying)
849 	    {
850 	      tree def;
851 	      ssa_op_iter iter;
852 
853 	      /* If the statement will not produce a constant, mark
854 		 all its outputs VARYING.  */
855 	      FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
856 		set_value_varying (def);
857 	    }
858           prop_set_simulate_again (stmt, !is_varying);
859 	}
860     }
861 
862   /* Now process PHI nodes.  We never clear the simulate_again flag on
863      phi nodes, since we do not know which edges are executable yet,
864      except for phi nodes for virtual operands when we do not do store ccp.  */
865   FOR_EACH_BB_FN (bb, cfun)
866     {
867       gphi_iterator i;
868 
869       for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i))
870         {
871           gphi *phi = i.phi ();
872 
873 	  if (virtual_operand_p (gimple_phi_result (phi)))
874             prop_set_simulate_again (phi, false);
875 	  else
876             prop_set_simulate_again (phi, true);
877 	}
878     }
879 }
880 
881 /* Debug count support. Reset the values of ssa names
882    VARYING when the total number ssa names analyzed is
883    beyond the debug count specified.  */
884 
885 static void
886 do_dbg_cnt (void)
887 {
888   unsigned i;
889   for (i = 0; i < num_ssa_names; i++)
890     {
891       if (!dbg_cnt (ccp))
892         {
893           const_val[i].lattice_val = VARYING;
894 	  const_val[i].mask = -1;
895           const_val[i].value = NULL_TREE;
896         }
897     }
898 }
899 
900 
901 /* Do final substitution of propagated values, cleanup the flowgraph and
902    free allocated storage.  If NONZERO_P, record nonzero bits.
903 
904    Return TRUE when something was optimized.  */
905 
906 static bool
907 ccp_finalize (bool nonzero_p)
908 {
909   bool something_changed;
910   unsigned i;
911   tree name;
912 
913   do_dbg_cnt ();
914 
915   /* Derive alignment and misalignment information from partially
916      constant pointers in the lattice or nonzero bits from partially
917      constant integers.  */
918   FOR_EACH_SSA_NAME (i, name, cfun)
919     {
920       ccp_prop_value_t *val;
921       unsigned int tem, align;
922 
923       if (!POINTER_TYPE_P (TREE_TYPE (name))
924 	  && (!INTEGRAL_TYPE_P (TREE_TYPE (name))
925 	      /* Don't record nonzero bits before IPA to avoid
926 		 using too much memory.  */
927 	      || !nonzero_p))
928 	continue;
929 
930       val = get_value (name);
931       if (val->lattice_val != CONSTANT
932 	  || TREE_CODE (val->value) != INTEGER_CST
933 	  || val->mask == 0)
934 	continue;
935 
936       if (POINTER_TYPE_P (TREE_TYPE (name)))
937 	{
938 	  /* Trailing mask bits specify the alignment, trailing value
939 	     bits the misalignment.  */
940 	  tem = val->mask.to_uhwi ();
941 	  align = least_bit_hwi (tem);
942 	  if (align > 1)
943 	    set_ptr_info_alignment (get_ptr_info (name), align,
944 				    (TREE_INT_CST_LOW (val->value)
945 				     & (align - 1)));
946 	}
947       else
948 	{
949 	  unsigned int precision = TYPE_PRECISION (TREE_TYPE (val->value));
950 	  wide_int nonzero_bits = wide_int::from (val->mask, precision,
951 						  UNSIGNED) | val->value;
952 	  nonzero_bits &= get_nonzero_bits (name);
953 	  set_nonzero_bits (name, nonzero_bits);
954 	}
955     }
956 
957   /* Perform substitutions based on the known constant values.  */
958   something_changed = substitute_and_fold (get_constant_value, ccp_fold_stmt);
959 
960   free (const_val);
961   const_val = NULL;
962   return something_changed;;
963 }
964 
965 
966 /* Compute the meet operator between *VAL1 and *VAL2.  Store the result
967    in VAL1.
968 
969    		any  M UNDEFINED   = any
970 		any  M VARYING     = VARYING
971 		Ci   M Cj	   = Ci		if (i == j)
972 		Ci   M Cj	   = VARYING	if (i != j)
973    */
974 
975 static void
976 ccp_lattice_meet (ccp_prop_value_t *val1, ccp_prop_value_t *val2)
977 {
978   if (val1->lattice_val == UNDEFINED
979       /* For UNDEFINED M SSA we can't always SSA because its definition
980          may not dominate the PHI node.  Doing optimistic copy propagation
981 	 also causes a lot of gcc.dg/uninit-pred*.c FAILs.  */
982       && (val2->lattice_val != CONSTANT
983 	  || TREE_CODE (val2->value) != SSA_NAME))
984     {
985       /* UNDEFINED M any = any   */
986       *val1 = *val2;
987     }
988   else if (val2->lattice_val == UNDEFINED
989 	   /* See above.  */
990 	   && (val1->lattice_val != CONSTANT
991 	       || TREE_CODE (val1->value) != SSA_NAME))
992     {
993       /* any M UNDEFINED = any
994          Nothing to do.  VAL1 already contains the value we want.  */
995       ;
996     }
997   else if (val1->lattice_val == VARYING
998            || val2->lattice_val == VARYING)
999     {
1000       /* any M VARYING = VARYING.  */
1001       val1->lattice_val = VARYING;
1002       val1->mask = -1;
1003       val1->value = NULL_TREE;
1004     }
1005   else if (val1->lattice_val == CONSTANT
1006 	   && val2->lattice_val == CONSTANT
1007 	   && TREE_CODE (val1->value) == INTEGER_CST
1008 	   && TREE_CODE (val2->value) == INTEGER_CST)
1009     {
1010       /* Ci M Cj = Ci		if (i == j)
1011 	 Ci M Cj = VARYING	if (i != j)
1012 
1013          For INTEGER_CSTs mask unequal bits.  If no equal bits remain,
1014 	 drop to varying.  */
1015       val1->mask = (val1->mask | val2->mask
1016 		    | (wi::to_widest (val1->value)
1017 		       ^ wi::to_widest (val2->value)));
1018       if (wi::sext (val1->mask, TYPE_PRECISION (TREE_TYPE (val1->value))) == -1)
1019 	{
1020 	  val1->lattice_val = VARYING;
1021 	  val1->value = NULL_TREE;
1022 	}
1023     }
1024   else if (val1->lattice_val == CONSTANT
1025 	   && val2->lattice_val == CONSTANT
1026 	   && operand_equal_p (val1->value, val2->value, 0))
1027     {
1028       /* Ci M Cj = Ci		if (i == j)
1029 	 Ci M Cj = VARYING	if (i != j)
1030 
1031          VAL1 already contains the value we want for equivalent values.  */
1032     }
1033   else if (val1->lattice_val == CONSTANT
1034 	   && val2->lattice_val == CONSTANT
1035 	   && (TREE_CODE (val1->value) == ADDR_EXPR
1036 	       || TREE_CODE (val2->value) == ADDR_EXPR))
1037     {
1038       /* When not equal addresses are involved try meeting for
1039 	 alignment.  */
1040       ccp_prop_value_t tem = *val2;
1041       if (TREE_CODE (val1->value) == ADDR_EXPR)
1042 	*val1 = get_value_for_expr (val1->value, true);
1043       if (TREE_CODE (val2->value) == ADDR_EXPR)
1044 	tem = get_value_for_expr (val2->value, true);
1045       ccp_lattice_meet (val1, &tem);
1046     }
1047   else
1048     {
1049       /* Any other combination is VARYING.  */
1050       val1->lattice_val = VARYING;
1051       val1->mask = -1;
1052       val1->value = NULL_TREE;
1053     }
1054 }
1055 
1056 
1057 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
1058    lattice values to determine PHI_NODE's lattice value.  The value of a
1059    PHI node is determined calling ccp_lattice_meet with all the arguments
1060    of the PHI node that are incoming via executable edges.  */
1061 
1062 static enum ssa_prop_result
1063 ccp_visit_phi_node (gphi *phi)
1064 {
1065   unsigned i;
1066   ccp_prop_value_t new_val;
1067 
1068   if (dump_file && (dump_flags & TDF_DETAILS))
1069     {
1070       fprintf (dump_file, "\nVisiting PHI node: ");
1071       print_gimple_stmt (dump_file, phi, 0, dump_flags);
1072     }
1073 
1074   new_val.lattice_val = UNDEFINED;
1075   new_val.value = NULL_TREE;
1076   new_val.mask = 0;
1077 
1078   bool first = true;
1079   bool non_exec_edge = false;
1080   for (i = 0; i < gimple_phi_num_args (phi); i++)
1081     {
1082       /* Compute the meet operator over all the PHI arguments flowing
1083 	 through executable edges.  */
1084       edge e = gimple_phi_arg_edge (phi, i);
1085 
1086       if (dump_file && (dump_flags & TDF_DETAILS))
1087 	{
1088 	  fprintf (dump_file,
1089 	      "\n    Argument #%d (%d -> %d %sexecutable)\n",
1090 	      i, e->src->index, e->dest->index,
1091 	      (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
1092 	}
1093 
1094       /* If the incoming edge is executable, Compute the meet operator for
1095 	 the existing value of the PHI node and the current PHI argument.  */
1096       if (e->flags & EDGE_EXECUTABLE)
1097 	{
1098 	  tree arg = gimple_phi_arg (phi, i)->def;
1099 	  ccp_prop_value_t arg_val = get_value_for_expr (arg, false);
1100 
1101 	  if (first)
1102 	    {
1103 	      new_val = arg_val;
1104 	      first = false;
1105 	    }
1106 	  else
1107 	    ccp_lattice_meet (&new_val, &arg_val);
1108 
1109 	  if (dump_file && (dump_flags & TDF_DETAILS))
1110 	    {
1111 	      fprintf (dump_file, "\t");
1112 	      print_generic_expr (dump_file, arg, dump_flags);
1113 	      dump_lattice_value (dump_file, "\tValue: ", arg_val);
1114 	      fprintf (dump_file, "\n");
1115 	    }
1116 
1117 	  if (new_val.lattice_val == VARYING)
1118 	    break;
1119 	}
1120       else
1121 	non_exec_edge = true;
1122     }
1123 
1124   /* In case there were non-executable edges and the value is a copy
1125      make sure its definition dominates the PHI node.  */
1126   if (non_exec_edge
1127       && new_val.lattice_val == CONSTANT
1128       && TREE_CODE (new_val.value) == SSA_NAME
1129       && ! SSA_NAME_IS_DEFAULT_DEF (new_val.value)
1130       && ! dominated_by_p (CDI_DOMINATORS, gimple_bb (phi),
1131 			   gimple_bb (SSA_NAME_DEF_STMT (new_val.value))))
1132     {
1133       new_val.lattice_val = VARYING;
1134       new_val.value = NULL_TREE;
1135       new_val.mask = -1;
1136     }
1137 
1138   if (dump_file && (dump_flags & TDF_DETAILS))
1139     {
1140       dump_lattice_value (dump_file, "\n    PHI node value: ", new_val);
1141       fprintf (dump_file, "\n\n");
1142     }
1143 
1144   /* Make the transition to the new value.  */
1145   if (set_lattice_value (gimple_phi_result (phi), &new_val))
1146     {
1147       if (new_val.lattice_val == VARYING)
1148 	return SSA_PROP_VARYING;
1149       else
1150 	return SSA_PROP_INTERESTING;
1151     }
1152   else
1153     return SSA_PROP_NOT_INTERESTING;
1154 }
1155 
1156 /* Return the constant value for OP or OP otherwise.  */
1157 
1158 static tree
1159 valueize_op (tree op)
1160 {
1161   if (TREE_CODE (op) == SSA_NAME)
1162     {
1163       tree tem = get_constant_value (op);
1164       if (tem)
1165 	return tem;
1166     }
1167   return op;
1168 }
1169 
1170 /* Return the constant value for OP, but signal to not follow SSA
1171    edges if the definition may be simulated again.  */
1172 
1173 static tree
1174 valueize_op_1 (tree op)
1175 {
1176   if (TREE_CODE (op) == SSA_NAME)
1177     {
1178       /* If the definition may be simulated again we cannot follow
1179          this SSA edge as the SSA propagator does not necessarily
1180 	 re-visit the use.  */
1181       gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1182       if (!gimple_nop_p (def_stmt)
1183 	  && prop_simulate_again_p (def_stmt))
1184 	return NULL_TREE;
1185       tree tem = get_constant_value (op);
1186       if (tem)
1187 	return tem;
1188     }
1189   return op;
1190 }
1191 
1192 /* CCP specific front-end to the non-destructive constant folding
1193    routines.
1194 
1195    Attempt to simplify the RHS of STMT knowing that one or more
1196    operands are constants.
1197 
1198    If simplification is possible, return the simplified RHS,
1199    otherwise return the original RHS or NULL_TREE.  */
1200 
1201 static tree
1202 ccp_fold (gimple *stmt)
1203 {
1204   location_t loc = gimple_location (stmt);
1205   switch (gimple_code (stmt))
1206     {
1207     case GIMPLE_COND:
1208       {
1209         /* Handle comparison operators that can appear in GIMPLE form.  */
1210         tree op0 = valueize_op (gimple_cond_lhs (stmt));
1211         tree op1 = valueize_op (gimple_cond_rhs (stmt));
1212         enum tree_code code = gimple_cond_code (stmt);
1213         return fold_binary_loc (loc, code, boolean_type_node, op0, op1);
1214       }
1215 
1216     case GIMPLE_SWITCH:
1217       {
1218 	/* Return the constant switch index.  */
1219         return valueize_op (gimple_switch_index (as_a <gswitch *> (stmt)));
1220       }
1221 
1222     case GIMPLE_ASSIGN:
1223     case GIMPLE_CALL:
1224       return gimple_fold_stmt_to_constant_1 (stmt,
1225 					     valueize_op, valueize_op_1);
1226 
1227     default:
1228       gcc_unreachable ();
1229     }
1230 }
1231 
1232 /* Apply the operation CODE in type TYPE to the value, mask pair
1233    RVAL and RMASK representing a value of type RTYPE and set
1234    the value, mask pair *VAL and *MASK to the result.  */
1235 
1236 void
1237 bit_value_unop (enum tree_code code, signop type_sgn, int type_precision,
1238 		widest_int *val, widest_int *mask,
1239 		signop rtype_sgn, int rtype_precision,
1240 		const widest_int &rval, const widest_int &rmask)
1241 {
1242   switch (code)
1243     {
1244     case BIT_NOT_EXPR:
1245       *mask = rmask;
1246       *val = ~rval;
1247       break;
1248 
1249     case NEGATE_EXPR:
1250       {
1251 	widest_int temv, temm;
1252 	/* Return ~rval + 1.  */
1253 	bit_value_unop (BIT_NOT_EXPR, type_sgn, type_precision, &temv, &temm,
1254 			type_sgn, type_precision, rval, rmask);
1255 	bit_value_binop (PLUS_EXPR, type_sgn, type_precision, val, mask,
1256 			 type_sgn, type_precision, temv, temm,
1257 			 type_sgn, type_precision, 1, 0);
1258 	break;
1259       }
1260 
1261     CASE_CONVERT:
1262       {
1263 	/* First extend mask and value according to the original type.  */
1264 	*mask = wi::ext (rmask, rtype_precision, rtype_sgn);
1265 	*val = wi::ext (rval, rtype_precision, rtype_sgn);
1266 
1267 	/* Then extend mask and value according to the target type.  */
1268 	*mask = wi::ext (*mask, type_precision, type_sgn);
1269 	*val = wi::ext (*val, type_precision, type_sgn);
1270 	break;
1271       }
1272 
1273     default:
1274       *mask = -1;
1275       break;
1276     }
1277 }
1278 
1279 /* Apply the operation CODE in type TYPE to the value, mask pairs
1280    R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1281    and R2TYPE and set the value, mask pair *VAL and *MASK to the result.  */
1282 
1283 void
1284 bit_value_binop (enum tree_code code, signop sgn, int width,
1285 		 widest_int *val, widest_int *mask,
1286 		 signop r1type_sgn, int r1type_precision,
1287 		 const widest_int &r1val, const widest_int &r1mask,
1288 		 signop r2type_sgn, int r2type_precision,
1289 		 const widest_int &r2val, const widest_int &r2mask)
1290 {
1291   bool swap_p = false;
1292 
1293   /* Assume we'll get a constant result.  Use an initial non varying
1294      value, we fall back to varying in the end if necessary.  */
1295   *mask = -1;
1296 
1297   switch (code)
1298     {
1299     case BIT_AND_EXPR:
1300       /* The mask is constant where there is a known not
1301 	 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1302       *mask = (r1mask | r2mask) & (r1val | r1mask) & (r2val | r2mask);
1303       *val = r1val & r2val;
1304       break;
1305 
1306     case BIT_IOR_EXPR:
1307       /* The mask is constant where there is a known
1308 	 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)).  */
1309       *mask = (r1mask | r2mask)
1310 	      .and_not (r1val.and_not (r1mask) | r2val.and_not (r2mask));
1311       *val = r1val | r2val;
1312       break;
1313 
1314     case BIT_XOR_EXPR:
1315       /* m1 | m2  */
1316       *mask = r1mask | r2mask;
1317       *val = r1val ^ r2val;
1318       break;
1319 
1320     case LROTATE_EXPR:
1321     case RROTATE_EXPR:
1322       if (r2mask == 0)
1323 	{
1324 	  widest_int shift = r2val;
1325 	  if (shift == 0)
1326 	    {
1327 	      *mask = r1mask;
1328 	      *val = r1val;
1329 	    }
1330 	  else
1331 	    {
1332 	      if (wi::neg_p (shift))
1333 		{
1334 		  shift = -shift;
1335 		  if (code == RROTATE_EXPR)
1336 		    code = LROTATE_EXPR;
1337 		  else
1338 		    code = RROTATE_EXPR;
1339 		}
1340 	      if (code == RROTATE_EXPR)
1341 		{
1342 		  *mask = wi::rrotate (r1mask, shift, width);
1343 		  *val = wi::rrotate (r1val, shift, width);
1344 		}
1345 	      else
1346 		{
1347 		  *mask = wi::lrotate (r1mask, shift, width);
1348 		  *val = wi::lrotate (r1val, shift, width);
1349 		}
1350 	    }
1351 	}
1352       break;
1353 
1354     case LSHIFT_EXPR:
1355     case RSHIFT_EXPR:
1356       /* ???  We can handle partially known shift counts if we know
1357 	 its sign.  That way we can tell that (x << (y | 8)) & 255
1358 	 is zero.  */
1359       if (r2mask == 0)
1360 	{
1361 	  widest_int shift = r2val;
1362 	  if (shift == 0)
1363 	    {
1364 	      *mask = r1mask;
1365 	      *val = r1val;
1366 	    }
1367 	  else
1368 	    {
1369 	      if (wi::neg_p (shift))
1370 		{
1371 		  shift = -shift;
1372 		  if (code == RSHIFT_EXPR)
1373 		    code = LSHIFT_EXPR;
1374 		  else
1375 		    code = RSHIFT_EXPR;
1376 		}
1377 	      if (code == RSHIFT_EXPR)
1378 		{
1379 		  *mask = wi::rshift (wi::ext (r1mask, width, sgn), shift, sgn);
1380 		  *val = wi::rshift (wi::ext (r1val, width, sgn), shift, sgn);
1381 		}
1382 	      else
1383 		{
1384 		  *mask = wi::ext (r1mask << shift, width, sgn);
1385 		  *val = wi::ext (r1val << shift, width, sgn);
1386 		}
1387 	    }
1388 	}
1389       break;
1390 
1391     case PLUS_EXPR:
1392     case POINTER_PLUS_EXPR:
1393       {
1394 	/* Do the addition with unknown bits set to zero, to give carry-ins of
1395 	   zero wherever possible.  */
1396 	widest_int lo = r1val.and_not (r1mask) + r2val.and_not (r2mask);
1397 	lo = wi::ext (lo, width, sgn);
1398 	/* Do the addition with unknown bits set to one, to give carry-ins of
1399 	   one wherever possible.  */
1400 	widest_int hi = (r1val | r1mask) + (r2val | r2mask);
1401 	hi = wi::ext (hi, width, sgn);
1402 	/* Each bit in the result is known if (a) the corresponding bits in
1403 	   both inputs are known, and (b) the carry-in to that bit position
1404 	   is known.  We can check condition (b) by seeing if we got the same
1405 	   result with minimised carries as with maximised carries.  */
1406 	*mask = r1mask | r2mask | (lo ^ hi);
1407 	*mask = wi::ext (*mask, width, sgn);
1408 	/* It shouldn't matter whether we choose lo or hi here.  */
1409 	*val = lo;
1410 	break;
1411       }
1412 
1413     case MINUS_EXPR:
1414       {
1415 	widest_int temv, temm;
1416 	bit_value_unop (NEGATE_EXPR, r2type_sgn, r2type_precision, &temv, &temm,
1417 			  r2type_sgn, r2type_precision, r2val, r2mask);
1418 	bit_value_binop (PLUS_EXPR, sgn, width, val, mask,
1419 			 r1type_sgn, r1type_precision, r1val, r1mask,
1420 			 r2type_sgn, r2type_precision, temv, temm);
1421 	break;
1422       }
1423 
1424     case MULT_EXPR:
1425       {
1426 	/* Just track trailing zeros in both operands and transfer
1427 	   them to the other.  */
1428 	int r1tz = wi::ctz (r1val | r1mask);
1429 	int r2tz = wi::ctz (r2val | r2mask);
1430 	if (r1tz + r2tz >= width)
1431 	  {
1432 	    *mask = 0;
1433 	    *val = 0;
1434 	  }
1435 	else if (r1tz + r2tz > 0)
1436 	  {
1437 	    *mask = wi::ext (wi::mask <widest_int> (r1tz + r2tz, true),
1438 			     width, sgn);
1439 	    *val = 0;
1440 	  }
1441 	break;
1442       }
1443 
1444     case EQ_EXPR:
1445     case NE_EXPR:
1446       {
1447 	widest_int m = r1mask | r2mask;
1448 	if (r1val.and_not (m) != r2val.and_not (m))
1449 	  {
1450 	    *mask = 0;
1451 	    *val = ((code == EQ_EXPR) ? 0 : 1);
1452 	  }
1453 	else
1454 	  {
1455 	    /* We know the result of a comparison is always one or zero.  */
1456 	    *mask = 1;
1457 	    *val = 0;
1458 	  }
1459 	break;
1460       }
1461 
1462     case GE_EXPR:
1463     case GT_EXPR:
1464       swap_p = true;
1465       code = swap_tree_comparison (code);
1466       /* Fall through.  */
1467     case LT_EXPR:
1468     case LE_EXPR:
1469       {
1470 	int minmax, maxmin;
1471 
1472 	const widest_int &o1val = swap_p ? r2val : r1val;
1473 	const widest_int &o1mask = swap_p ? r2mask : r1mask;
1474 	const widest_int &o2val = swap_p ? r1val : r2val;
1475 	const widest_int &o2mask = swap_p ? r1mask : r2mask;
1476 
1477 	/* If the most significant bits are not known we know nothing.  */
1478 	if (wi::neg_p (o1mask) || wi::neg_p (o2mask))
1479 	  break;
1480 
1481 	/* For comparisons the signedness is in the comparison operands.  */
1482 	sgn = r1type_sgn;
1483 
1484 	/* If we know the most significant bits we know the values
1485 	   value ranges by means of treating varying bits as zero
1486 	   or one.  Do a cross comparison of the max/min pairs.  */
1487 	maxmin = wi::cmp (o1val | o1mask, o2val.and_not (o2mask), sgn);
1488 	minmax = wi::cmp (o1val.and_not (o1mask), o2val | o2mask, sgn);
1489 	if (maxmin < 0)  /* o1 is less than o2.  */
1490 	  {
1491 	    *mask = 0;
1492 	    *val = 1;
1493 	  }
1494 	else if (minmax > 0)  /* o1 is not less or equal to o2.  */
1495 	  {
1496 	    *mask = 0;
1497 	    *val = 0;
1498 	  }
1499 	else if (maxmin == minmax)  /* o1 and o2 are equal.  */
1500 	  {
1501 	    /* This probably should never happen as we'd have
1502 	       folded the thing during fully constant value folding.  */
1503 	    *mask = 0;
1504 	    *val = (code == LE_EXPR ? 1 : 0);
1505 	  }
1506 	else
1507 	  {
1508 	    /* We know the result of a comparison is always one or zero.  */
1509 	    *mask = 1;
1510 	    *val = 0;
1511 	  }
1512 	break;
1513       }
1514 
1515     default:;
1516     }
1517 }
1518 
1519 /* Return the propagation value when applying the operation CODE to
1520    the value RHS yielding type TYPE.  */
1521 
1522 static ccp_prop_value_t
1523 bit_value_unop (enum tree_code code, tree type, tree rhs)
1524 {
1525   ccp_prop_value_t rval = get_value_for_expr (rhs, true);
1526   widest_int value, mask;
1527   ccp_prop_value_t val;
1528 
1529   if (rval.lattice_val == UNDEFINED)
1530     return rval;
1531 
1532   gcc_assert ((rval.lattice_val == CONSTANT
1533 	       && TREE_CODE (rval.value) == INTEGER_CST)
1534 	      || wi::sext (rval.mask, TYPE_PRECISION (TREE_TYPE (rhs))) == -1);
1535   bit_value_unop (code, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1536 		  TYPE_SIGN (TREE_TYPE (rhs)), TYPE_PRECISION (TREE_TYPE (rhs)),
1537 		  value_to_wide_int (rval), rval.mask);
1538   if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1539     {
1540       val.lattice_val = CONSTANT;
1541       val.mask = mask;
1542       /* ???  Delay building trees here.  */
1543       val.value = wide_int_to_tree (type, value);
1544     }
1545   else
1546     {
1547       val.lattice_val = VARYING;
1548       val.value = NULL_TREE;
1549       val.mask = -1;
1550     }
1551   return val;
1552 }
1553 
1554 /* Return the propagation value when applying the operation CODE to
1555    the values RHS1 and RHS2 yielding type TYPE.  */
1556 
1557 static ccp_prop_value_t
1558 bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
1559 {
1560   ccp_prop_value_t r1val = get_value_for_expr (rhs1, true);
1561   ccp_prop_value_t r2val = get_value_for_expr (rhs2, true);
1562   widest_int value, mask;
1563   ccp_prop_value_t val;
1564 
1565   if (r1val.lattice_val == UNDEFINED
1566       || r2val.lattice_val == UNDEFINED)
1567     {
1568       val.lattice_val = VARYING;
1569       val.value = NULL_TREE;
1570       val.mask = -1;
1571       return val;
1572     }
1573 
1574   gcc_assert ((r1val.lattice_val == CONSTANT
1575 	       && TREE_CODE (r1val.value) == INTEGER_CST)
1576 	      || wi::sext (r1val.mask,
1577 			   TYPE_PRECISION (TREE_TYPE (rhs1))) == -1);
1578   gcc_assert ((r2val.lattice_val == CONSTANT
1579 	       && TREE_CODE (r2val.value) == INTEGER_CST)
1580 	      || wi::sext (r2val.mask,
1581 			   TYPE_PRECISION (TREE_TYPE (rhs2))) == -1);
1582   bit_value_binop (code, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1583 		   TYPE_SIGN (TREE_TYPE (rhs1)), TYPE_PRECISION (TREE_TYPE (rhs1)),
1584 		   value_to_wide_int (r1val), r1val.mask,
1585 		   TYPE_SIGN (TREE_TYPE (rhs2)), TYPE_PRECISION (TREE_TYPE (rhs2)),
1586 		   value_to_wide_int (r2val), r2val.mask);
1587 
1588   if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1589     {
1590       val.lattice_val = CONSTANT;
1591       val.mask = mask;
1592       /* ???  Delay building trees here.  */
1593       val.value = wide_int_to_tree (type, value);
1594     }
1595   else
1596     {
1597       val.lattice_val = VARYING;
1598       val.value = NULL_TREE;
1599       val.mask = -1;
1600     }
1601   return val;
1602 }
1603 
1604 /* Return the propagation value for __builtin_assume_aligned
1605    and functions with assume_aligned or alloc_aligned attribute.
1606    For __builtin_assume_aligned, ATTR is NULL_TREE,
1607    for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED
1608    is false, for alloc_aligned attribute ATTR is non-NULL and
1609    ALLOC_ALIGNED is true.  */
1610 
1611 static ccp_prop_value_t
1612 bit_value_assume_aligned (gimple *stmt, tree attr, ccp_prop_value_t ptrval,
1613 			  bool alloc_aligned)
1614 {
1615   tree align, misalign = NULL_TREE, type;
1616   unsigned HOST_WIDE_INT aligni, misaligni = 0;
1617   ccp_prop_value_t alignval;
1618   widest_int value, mask;
1619   ccp_prop_value_t val;
1620 
1621   if (attr == NULL_TREE)
1622     {
1623       tree ptr = gimple_call_arg (stmt, 0);
1624       type = TREE_TYPE (ptr);
1625       ptrval = get_value_for_expr (ptr, true);
1626     }
1627   else
1628     {
1629       tree lhs = gimple_call_lhs (stmt);
1630       type = TREE_TYPE (lhs);
1631     }
1632 
1633   if (ptrval.lattice_val == UNDEFINED)
1634     return ptrval;
1635   gcc_assert ((ptrval.lattice_val == CONSTANT
1636 	       && TREE_CODE (ptrval.value) == INTEGER_CST)
1637 	      || wi::sext (ptrval.mask, TYPE_PRECISION (type)) == -1);
1638   if (attr == NULL_TREE)
1639     {
1640       /* Get aligni and misaligni from __builtin_assume_aligned.  */
1641       align = gimple_call_arg (stmt, 1);
1642       if (!tree_fits_uhwi_p (align))
1643 	return ptrval;
1644       aligni = tree_to_uhwi (align);
1645       if (gimple_call_num_args (stmt) > 2)
1646 	{
1647 	  misalign = gimple_call_arg (stmt, 2);
1648 	  if (!tree_fits_uhwi_p (misalign))
1649 	    return ptrval;
1650 	  misaligni = tree_to_uhwi (misalign);
1651 	}
1652     }
1653   else
1654     {
1655       /* Get aligni and misaligni from assume_aligned or
1656 	 alloc_align attributes.  */
1657       if (TREE_VALUE (attr) == NULL_TREE)
1658 	return ptrval;
1659       attr = TREE_VALUE (attr);
1660       align = TREE_VALUE (attr);
1661       if (!tree_fits_uhwi_p (align))
1662 	return ptrval;
1663       aligni = tree_to_uhwi (align);
1664       if (alloc_aligned)
1665 	{
1666 	  if (aligni == 0 || aligni > gimple_call_num_args (stmt))
1667 	    return ptrval;
1668 	  align = gimple_call_arg (stmt, aligni - 1);
1669 	  if (!tree_fits_uhwi_p (align))
1670 	    return ptrval;
1671 	  aligni = tree_to_uhwi (align);
1672 	}
1673       else if (TREE_CHAIN (attr) && TREE_VALUE (TREE_CHAIN (attr)))
1674 	{
1675 	  misalign = TREE_VALUE (TREE_CHAIN (attr));
1676 	  if (!tree_fits_uhwi_p (misalign))
1677 	    return ptrval;
1678 	  misaligni = tree_to_uhwi (misalign);
1679 	}
1680     }
1681   if (aligni <= 1 || (aligni & (aligni - 1)) != 0 || misaligni >= aligni)
1682     return ptrval;
1683 
1684   align = build_int_cst_type (type, -aligni);
1685   alignval = get_value_for_expr (align, true);
1686   bit_value_binop (BIT_AND_EXPR, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1687 		   TYPE_SIGN (type), TYPE_PRECISION (type), value_to_wide_int (ptrval), ptrval.mask,
1688 		   TYPE_SIGN (type), TYPE_PRECISION (type), value_to_wide_int (alignval), alignval.mask);
1689 
1690   if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1691     {
1692       val.lattice_val = CONSTANT;
1693       val.mask = mask;
1694       gcc_assert ((mask.to_uhwi () & (aligni - 1)) == 0);
1695       gcc_assert ((value.to_uhwi () & (aligni - 1)) == 0);
1696       value |= misaligni;
1697       /* ???  Delay building trees here.  */
1698       val.value = wide_int_to_tree (type, value);
1699     }
1700   else
1701     {
1702       val.lattice_val = VARYING;
1703       val.value = NULL_TREE;
1704       val.mask = -1;
1705     }
1706   return val;
1707 }
1708 
1709 /* Evaluate statement STMT.
1710    Valid only for assignments, calls, conditionals, and switches. */
1711 
1712 static ccp_prop_value_t
1713 evaluate_stmt (gimple *stmt)
1714 {
1715   ccp_prop_value_t val;
1716   tree simplified = NULL_TREE;
1717   ccp_lattice_t likelyvalue = likely_value (stmt);
1718   bool is_constant = false;
1719   unsigned int align;
1720 
1721   if (dump_file && (dump_flags & TDF_DETAILS))
1722     {
1723       fprintf (dump_file, "which is likely ");
1724       switch (likelyvalue)
1725 	{
1726 	case CONSTANT:
1727 	  fprintf (dump_file, "CONSTANT");
1728 	  break;
1729 	case UNDEFINED:
1730 	  fprintf (dump_file, "UNDEFINED");
1731 	  break;
1732 	case VARYING:
1733 	  fprintf (dump_file, "VARYING");
1734 	  break;
1735 	default:;
1736 	}
1737       fprintf (dump_file, "\n");
1738     }
1739 
1740   /* If the statement is likely to have a CONSTANT result, then try
1741      to fold the statement to determine the constant value.  */
1742   /* FIXME.  This is the only place that we call ccp_fold.
1743      Since likely_value never returns CONSTANT for calls, we will
1744      not attempt to fold them, including builtins that may profit.  */
1745   if (likelyvalue == CONSTANT)
1746     {
1747       fold_defer_overflow_warnings ();
1748       simplified = ccp_fold (stmt);
1749       if (simplified
1750 	  && TREE_CODE (simplified) == SSA_NAME)
1751 	{
1752 	  /* We may not use values of something that may be simulated again,
1753 	     see valueize_op_1.  */
1754 	  if (SSA_NAME_IS_DEFAULT_DEF (simplified)
1755 	      || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (simplified)))
1756 	    {
1757 	      ccp_prop_value_t *val = get_value (simplified);
1758 	      if (val && val->lattice_val != VARYING)
1759 		{
1760 		  fold_undefer_overflow_warnings (true, stmt, 0);
1761 		  return *val;
1762 		}
1763 	    }
1764 	  else
1765 	    /* We may also not place a non-valueized copy in the lattice
1766 	       as that might become stale if we never re-visit this stmt.  */
1767 	    simplified = NULL_TREE;
1768 	}
1769       is_constant = simplified && is_gimple_min_invariant (simplified);
1770       fold_undefer_overflow_warnings (is_constant, stmt, 0);
1771       if (is_constant)
1772 	{
1773 	  /* The statement produced a constant value.  */
1774 	  val.lattice_val = CONSTANT;
1775 	  val.value = simplified;
1776 	  val.mask = 0;
1777 	  return val;
1778 	}
1779     }
1780   /* If the statement is likely to have a VARYING result, then do not
1781      bother folding the statement.  */
1782   else if (likelyvalue == VARYING)
1783     {
1784       enum gimple_code code = gimple_code (stmt);
1785       if (code == GIMPLE_ASSIGN)
1786         {
1787           enum tree_code subcode = gimple_assign_rhs_code (stmt);
1788 
1789           /* Other cases cannot satisfy is_gimple_min_invariant
1790              without folding.  */
1791           if (get_gimple_rhs_class (subcode) == GIMPLE_SINGLE_RHS)
1792             simplified = gimple_assign_rhs1 (stmt);
1793         }
1794       else if (code == GIMPLE_SWITCH)
1795         simplified = gimple_switch_index (as_a <gswitch *> (stmt));
1796       else
1797 	/* These cannot satisfy is_gimple_min_invariant without folding.  */
1798 	gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
1799       is_constant = simplified && is_gimple_min_invariant (simplified);
1800       if (is_constant)
1801 	{
1802 	  /* The statement produced a constant value.  */
1803 	  val.lattice_val = CONSTANT;
1804 	  val.value = simplified;
1805 	  val.mask = 0;
1806 	}
1807     }
1808   /* If the statement result is likely UNDEFINED, make it so.  */
1809   else if (likelyvalue == UNDEFINED)
1810     {
1811       val.lattice_val = UNDEFINED;
1812       val.value = NULL_TREE;
1813       val.mask = 0;
1814       return val;
1815     }
1816 
1817   /* Resort to simplification for bitwise tracking.  */
1818   if (flag_tree_bit_ccp
1819       && (likelyvalue == CONSTANT || is_gimple_call (stmt)
1820 	  || (gimple_assign_single_p (stmt)
1821 	      && gimple_assign_rhs_code (stmt) == ADDR_EXPR))
1822       && !is_constant)
1823     {
1824       enum gimple_code code = gimple_code (stmt);
1825       val.lattice_val = VARYING;
1826       val.value = NULL_TREE;
1827       val.mask = -1;
1828       if (code == GIMPLE_ASSIGN)
1829 	{
1830 	  enum tree_code subcode = gimple_assign_rhs_code (stmt);
1831 	  tree rhs1 = gimple_assign_rhs1 (stmt);
1832 	  tree lhs = gimple_assign_lhs (stmt);
1833 	  if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1834 	       || POINTER_TYPE_P (TREE_TYPE (lhs)))
1835 	      && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1836 		  || POINTER_TYPE_P (TREE_TYPE (rhs1))))
1837 	    switch (get_gimple_rhs_class (subcode))
1838 	      {
1839 	      case GIMPLE_SINGLE_RHS:
1840 	        val = get_value_for_expr (rhs1, true);
1841 		break;
1842 
1843 	      case GIMPLE_UNARY_RHS:
1844 		val = bit_value_unop (subcode, TREE_TYPE (lhs), rhs1);
1845 		break;
1846 
1847 	      case GIMPLE_BINARY_RHS:
1848 		val = bit_value_binop (subcode, TREE_TYPE (lhs), rhs1,
1849 				       gimple_assign_rhs2 (stmt));
1850 		break;
1851 
1852 	      default:;
1853 	      }
1854 	}
1855       else if (code == GIMPLE_COND)
1856 	{
1857 	  enum tree_code code = gimple_cond_code (stmt);
1858 	  tree rhs1 = gimple_cond_lhs (stmt);
1859 	  tree rhs2 = gimple_cond_rhs (stmt);
1860 	  if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1861 	      || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1862 	    val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
1863 	}
1864       else if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
1865 	{
1866 	  tree fndecl = gimple_call_fndecl (stmt);
1867 	  switch (DECL_FUNCTION_CODE (fndecl))
1868 	    {
1869 	    case BUILT_IN_MALLOC:
1870 	    case BUILT_IN_REALLOC:
1871 	    case BUILT_IN_CALLOC:
1872 	    case BUILT_IN_STRDUP:
1873 	    case BUILT_IN_STRNDUP:
1874 	      val.lattice_val = CONSTANT;
1875 	      val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1876 	      val.mask = ~((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT
1877 			   / BITS_PER_UNIT - 1);
1878 	      break;
1879 
1880 	    case BUILT_IN_ALLOCA:
1881 	    case BUILT_IN_ALLOCA_WITH_ALIGN:
1882 	      align = (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA_WITH_ALIGN
1883 		       ? TREE_INT_CST_LOW (gimple_call_arg (stmt, 1))
1884 		       : BIGGEST_ALIGNMENT);
1885 	      val.lattice_val = CONSTANT;
1886 	      val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1887 	      val.mask = ~((HOST_WIDE_INT) align / BITS_PER_UNIT - 1);
1888 	      break;
1889 
1890 	    /* These builtins return their first argument, unmodified.  */
1891 	    case BUILT_IN_MEMCPY:
1892 	    case BUILT_IN_MEMMOVE:
1893 	    case BUILT_IN_MEMSET:
1894 	    case BUILT_IN_STRCPY:
1895 	    case BUILT_IN_STRNCPY:
1896 	    case BUILT_IN_MEMCPY_CHK:
1897 	    case BUILT_IN_MEMMOVE_CHK:
1898 	    case BUILT_IN_MEMSET_CHK:
1899 	    case BUILT_IN_STRCPY_CHK:
1900 	    case BUILT_IN_STRNCPY_CHK:
1901 	      val = get_value_for_expr (gimple_call_arg (stmt, 0), true);
1902 	      break;
1903 
1904 	    case BUILT_IN_ASSUME_ALIGNED:
1905 	      val = bit_value_assume_aligned (stmt, NULL_TREE, val, false);
1906 	      break;
1907 
1908 	    case BUILT_IN_ALIGNED_ALLOC:
1909 	      {
1910 		tree align = get_constant_value (gimple_call_arg (stmt, 0));
1911 		if (align
1912 		    && tree_fits_uhwi_p (align))
1913 		  {
1914 		    unsigned HOST_WIDE_INT aligni = tree_to_uhwi (align);
1915 		    if (aligni > 1
1916 			/* align must be power-of-two */
1917 			&& (aligni & (aligni - 1)) == 0)
1918 		      {
1919 			val.lattice_val = CONSTANT;
1920 			val.value = build_int_cst (ptr_type_node, 0);
1921 			val.mask = -aligni;
1922 		      }
1923 		  }
1924 		break;
1925 	      }
1926 
1927 	    default:;
1928 	    }
1929 	}
1930       if (is_gimple_call (stmt) && gimple_call_lhs (stmt))
1931 	{
1932 	  tree fntype = gimple_call_fntype (stmt);
1933 	  if (fntype)
1934 	    {
1935 	      tree attrs = lookup_attribute ("assume_aligned",
1936 					     TYPE_ATTRIBUTES (fntype));
1937 	      if (attrs)
1938 		val = bit_value_assume_aligned (stmt, attrs, val, false);
1939 	      attrs = lookup_attribute ("alloc_align",
1940 					TYPE_ATTRIBUTES (fntype));
1941 	      if (attrs)
1942 		val = bit_value_assume_aligned (stmt, attrs, val, true);
1943 	    }
1944 	}
1945       is_constant = (val.lattice_val == CONSTANT);
1946     }
1947 
1948   if (flag_tree_bit_ccp
1949       && ((is_constant && TREE_CODE (val.value) == INTEGER_CST)
1950 	  || !is_constant)
1951       && gimple_get_lhs (stmt)
1952       && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME)
1953     {
1954       tree lhs = gimple_get_lhs (stmt);
1955       wide_int nonzero_bits = get_nonzero_bits (lhs);
1956       if (nonzero_bits != -1)
1957 	{
1958 	  if (!is_constant)
1959 	    {
1960 	      val.lattice_val = CONSTANT;
1961 	      val.value = build_zero_cst (TREE_TYPE (lhs));
1962 	      val.mask = extend_mask (nonzero_bits, TYPE_SIGN (TREE_TYPE (lhs)));
1963 	      is_constant = true;
1964 	    }
1965 	  else
1966 	    {
1967 	      if (wi::bit_and_not (val.value, nonzero_bits) != 0)
1968 		val.value = wide_int_to_tree (TREE_TYPE (lhs),
1969 					      nonzero_bits & val.value);
1970 	      if (nonzero_bits == 0)
1971 		val.mask = 0;
1972 	      else
1973 		val.mask = val.mask & extend_mask (nonzero_bits,
1974 						   TYPE_SIGN (TREE_TYPE (lhs)));
1975 	    }
1976 	}
1977     }
1978 
1979   /* The statement produced a nonconstant value.  */
1980   if (!is_constant)
1981     {
1982       /* The statement produced a copy.  */
1983       if (simplified && TREE_CODE (simplified) == SSA_NAME
1984 	  && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (simplified))
1985 	{
1986 	  val.lattice_val = CONSTANT;
1987 	  val.value = simplified;
1988 	  val.mask = -1;
1989 	}
1990       /* The statement is VARYING.  */
1991       else
1992 	{
1993 	  val.lattice_val = VARYING;
1994 	  val.value = NULL_TREE;
1995 	  val.mask = -1;
1996 	}
1997     }
1998 
1999   return val;
2000 }
2001 
2002 typedef hash_table<nofree_ptr_hash<gimple> > gimple_htab;
2003 
2004 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
2005    each matching BUILT_IN_STACK_RESTORE.  Mark visited phis in VISITED.  */
2006 
2007 static void
2008 insert_clobber_before_stack_restore (tree saved_val, tree var,
2009 				     gimple_htab **visited)
2010 {
2011   gimple *stmt;
2012   gassign *clobber_stmt;
2013   tree clobber;
2014   imm_use_iterator iter;
2015   gimple_stmt_iterator i;
2016   gimple **slot;
2017 
2018   FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val)
2019     if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
2020       {
2021 	clobber = build_constructor (TREE_TYPE (var),
2022 				     NULL);
2023 	TREE_THIS_VOLATILE (clobber) = 1;
2024 	clobber_stmt = gimple_build_assign (var, clobber);
2025 
2026 	i = gsi_for_stmt (stmt);
2027 	gsi_insert_before (&i, clobber_stmt, GSI_SAME_STMT);
2028       }
2029     else if (gimple_code (stmt) == GIMPLE_PHI)
2030       {
2031 	if (!*visited)
2032 	  *visited = new gimple_htab (10);
2033 
2034 	slot = (*visited)->find_slot (stmt, INSERT);
2035 	if (*slot != NULL)
2036 	  continue;
2037 
2038 	*slot = stmt;
2039 	insert_clobber_before_stack_restore (gimple_phi_result (stmt), var,
2040 					     visited);
2041       }
2042     else if (gimple_assign_ssa_name_copy_p (stmt))
2043       insert_clobber_before_stack_restore (gimple_assign_lhs (stmt), var,
2044 					   visited);
2045     else if (chkp_gimple_call_builtin_p (stmt, BUILT_IN_CHKP_BNDRET))
2046       continue;
2047     else
2048       gcc_assert (is_gimple_debug (stmt));
2049 }
2050 
2051 /* Advance the iterator to the previous non-debug gimple statement in the same
2052    or dominating basic block.  */
2053 
2054 static inline void
2055 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i)
2056 {
2057   basic_block dom;
2058 
2059   gsi_prev_nondebug (i);
2060   while (gsi_end_p (*i))
2061     {
2062       dom = get_immediate_dominator (CDI_DOMINATORS, i->bb);
2063       if (dom == NULL || dom == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2064 	return;
2065 
2066       *i = gsi_last_bb (dom);
2067     }
2068 }
2069 
2070 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
2071    a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
2072 
2073    It is possible that BUILT_IN_STACK_SAVE cannot be find in a dominator when a
2074    previous pass (such as DOM) duplicated it along multiple paths to a BB.  In
2075    that case the function gives up without inserting the clobbers.  */
2076 
2077 static void
2078 insert_clobbers_for_var (gimple_stmt_iterator i, tree var)
2079 {
2080   gimple *stmt;
2081   tree saved_val;
2082   gimple_htab *visited = NULL;
2083 
2084   for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i))
2085     {
2086       stmt = gsi_stmt (i);
2087 
2088       if (!gimple_call_builtin_p (stmt, BUILT_IN_STACK_SAVE))
2089 	continue;
2090 
2091       saved_val = gimple_call_lhs (stmt);
2092       if (saved_val == NULL_TREE)
2093 	continue;
2094 
2095       insert_clobber_before_stack_restore (saved_val, var, &visited);
2096       break;
2097     }
2098 
2099   delete visited;
2100 }
2101 
2102 /* Detects a __builtin_alloca_with_align with constant size argument.  Declares
2103    fixed-size array and returns the address, if found, otherwise returns
2104    NULL_TREE.  */
2105 
2106 static tree
2107 fold_builtin_alloca_with_align (gimple *stmt)
2108 {
2109   unsigned HOST_WIDE_INT size, threshold, n_elem;
2110   tree lhs, arg, block, var, elem_type, array_type;
2111 
2112   /* Get lhs.  */
2113   lhs = gimple_call_lhs (stmt);
2114   if (lhs == NULL_TREE)
2115     return NULL_TREE;
2116 
2117   /* Detect constant argument.  */
2118   arg = get_constant_value (gimple_call_arg (stmt, 0));
2119   if (arg == NULL_TREE
2120       || TREE_CODE (arg) != INTEGER_CST
2121       || !tree_fits_uhwi_p (arg))
2122     return NULL_TREE;
2123 
2124   size = tree_to_uhwi (arg);
2125 
2126   /* Heuristic: don't fold large allocas.  */
2127   threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME);
2128   /* In case the alloca is located at function entry, it has the same lifetime
2129      as a declared array, so we allow a larger size.  */
2130   block = gimple_block (stmt);
2131   if (!(cfun->after_inlining
2132 	&& block
2133         && TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL))
2134     threshold /= 10;
2135   if (size > threshold)
2136     return NULL_TREE;
2137 
2138   /* Declare array.  */
2139   elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1);
2140   n_elem = size * 8 / BITS_PER_UNIT;
2141   array_type = build_array_type_nelts (elem_type, n_elem);
2142   var = create_tmp_var (array_type);
2143   SET_DECL_ALIGN (var, TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)));
2144   {
2145     struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
2146     if (pi != NULL && !pi->pt.anything)
2147       {
2148 	bool singleton_p;
2149 	unsigned uid;
2150 	singleton_p = pt_solution_singleton_or_null_p (&pi->pt, &uid);
2151 	gcc_assert (singleton_p);
2152 	SET_DECL_PT_UID (var, uid);
2153       }
2154   }
2155 
2156   /* Fold alloca to the address of the array.  */
2157   return fold_convert (TREE_TYPE (lhs), build_fold_addr_expr (var));
2158 }
2159 
2160 /* Fold the stmt at *GSI with CCP specific information that propagating
2161    and regular folding does not catch.  */
2162 
2163 static bool
2164 ccp_fold_stmt (gimple_stmt_iterator *gsi)
2165 {
2166   gimple *stmt = gsi_stmt (*gsi);
2167 
2168   switch (gimple_code (stmt))
2169     {
2170     case GIMPLE_COND:
2171       {
2172 	gcond *cond_stmt = as_a <gcond *> (stmt);
2173 	ccp_prop_value_t val;
2174 	/* Statement evaluation will handle type mismatches in constants
2175 	   more gracefully than the final propagation.  This allows us to
2176 	   fold more conditionals here.  */
2177 	val = evaluate_stmt (stmt);
2178 	if (val.lattice_val != CONSTANT
2179 	    || val.mask != 0)
2180 	  return false;
2181 
2182 	if (dump_file)
2183 	  {
2184 	    fprintf (dump_file, "Folding predicate ");
2185 	    print_gimple_expr (dump_file, stmt, 0, 0);
2186 	    fprintf (dump_file, " to ");
2187 	    print_generic_expr (dump_file, val.value, 0);
2188 	    fprintf (dump_file, "\n");
2189 	  }
2190 
2191 	if (integer_zerop (val.value))
2192 	  gimple_cond_make_false (cond_stmt);
2193 	else
2194 	  gimple_cond_make_true (cond_stmt);
2195 
2196 	return true;
2197       }
2198 
2199     case GIMPLE_CALL:
2200       {
2201 	tree lhs = gimple_call_lhs (stmt);
2202 	int flags = gimple_call_flags (stmt);
2203 	tree val;
2204 	tree argt;
2205 	bool changed = false;
2206 	unsigned i;
2207 
2208 	/* If the call was folded into a constant make sure it goes
2209 	   away even if we cannot propagate into all uses because of
2210 	   type issues.  */
2211 	if (lhs
2212 	    && TREE_CODE (lhs) == SSA_NAME
2213 	    && (val = get_constant_value (lhs))
2214 	    /* Don't optimize away calls that have side-effects.  */
2215 	    && (flags & (ECF_CONST|ECF_PURE)) != 0
2216 	    && (flags & ECF_LOOPING_CONST_OR_PURE) == 0)
2217 	  {
2218 	    tree new_rhs = unshare_expr (val);
2219 	    bool res;
2220 	    if (!useless_type_conversion_p (TREE_TYPE (lhs),
2221 					    TREE_TYPE (new_rhs)))
2222 	      new_rhs = fold_convert (TREE_TYPE (lhs), new_rhs);
2223 	    res = update_call_from_tree (gsi, new_rhs);
2224 	    gcc_assert (res);
2225 	    return true;
2226 	  }
2227 
2228 	/* Internal calls provide no argument types, so the extra laxity
2229 	   for normal calls does not apply.  */
2230 	if (gimple_call_internal_p (stmt))
2231 	  return false;
2232 
2233         /* The heuristic of fold_builtin_alloca_with_align differs before and
2234 	   after inlining, so we don't require the arg to be changed into a
2235 	   constant for folding, but just to be constant.  */
2236         if (gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
2237           {
2238             tree new_rhs = fold_builtin_alloca_with_align (stmt);
2239             if (new_rhs)
2240 	      {
2241 		bool res = update_call_from_tree (gsi, new_rhs);
2242 		tree var = TREE_OPERAND (TREE_OPERAND (new_rhs, 0),0);
2243 		gcc_assert (res);
2244 		insert_clobbers_for_var (*gsi, var);
2245 		return true;
2246 	      }
2247           }
2248 
2249 	/* Propagate into the call arguments.  Compared to replace_uses_in
2250 	   this can use the argument slot types for type verification
2251 	   instead of the current argument type.  We also can safely
2252 	   drop qualifiers here as we are dealing with constants anyway.  */
2253 	argt = TYPE_ARG_TYPES (gimple_call_fntype (stmt));
2254 	for (i = 0; i < gimple_call_num_args (stmt) && argt;
2255 	     ++i, argt = TREE_CHAIN (argt))
2256 	  {
2257 	    tree arg = gimple_call_arg (stmt, i);
2258 	    if (TREE_CODE (arg) == SSA_NAME
2259 		&& (val = get_constant_value (arg))
2260 		&& useless_type_conversion_p
2261 		     (TYPE_MAIN_VARIANT (TREE_VALUE (argt)),
2262 		      TYPE_MAIN_VARIANT (TREE_TYPE (val))))
2263 	      {
2264 		gimple_call_set_arg (stmt, i, unshare_expr (val));
2265 		changed = true;
2266 	      }
2267 	  }
2268 
2269 	return changed;
2270       }
2271 
2272     case GIMPLE_ASSIGN:
2273       {
2274 	tree lhs = gimple_assign_lhs (stmt);
2275 	tree val;
2276 
2277 	/* If we have a load that turned out to be constant replace it
2278 	   as we cannot propagate into all uses in all cases.  */
2279 	if (gimple_assign_single_p (stmt)
2280 	    && TREE_CODE (lhs) == SSA_NAME
2281 	    && (val = get_constant_value (lhs)))
2282 	  {
2283 	    tree rhs = unshare_expr (val);
2284 	    if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
2285 	      rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), rhs);
2286 	    gimple_assign_set_rhs_from_tree (gsi, rhs);
2287 	    return true;
2288 	  }
2289 
2290 	return false;
2291       }
2292 
2293     default:
2294       return false;
2295     }
2296 }
2297 
2298 /* Visit the assignment statement STMT.  Set the value of its LHS to the
2299    value computed by the RHS and store LHS in *OUTPUT_P.  If STMT
2300    creates virtual definitions, set the value of each new name to that
2301    of the RHS (if we can derive a constant out of the RHS).
2302    Value-returning call statements also perform an assignment, and
2303    are handled here.  */
2304 
2305 static enum ssa_prop_result
2306 visit_assignment (gimple *stmt, tree *output_p)
2307 {
2308   ccp_prop_value_t val;
2309   enum ssa_prop_result retval = SSA_PROP_NOT_INTERESTING;
2310 
2311   tree lhs = gimple_get_lhs (stmt);
2312   if (TREE_CODE (lhs) == SSA_NAME)
2313     {
2314       /* Evaluate the statement, which could be
2315 	 either a GIMPLE_ASSIGN or a GIMPLE_CALL.  */
2316       val = evaluate_stmt (stmt);
2317 
2318       /* If STMT is an assignment to an SSA_NAME, we only have one
2319 	 value to set.  */
2320       if (set_lattice_value (lhs, &val))
2321 	{
2322 	  *output_p = lhs;
2323 	  if (val.lattice_val == VARYING)
2324 	    retval = SSA_PROP_VARYING;
2325 	  else
2326 	    retval = SSA_PROP_INTERESTING;
2327 	}
2328     }
2329 
2330   return retval;
2331 }
2332 
2333 
2334 /* Visit the conditional statement STMT.  Return SSA_PROP_INTERESTING
2335    if it can determine which edge will be taken.  Otherwise, return
2336    SSA_PROP_VARYING.  */
2337 
2338 static enum ssa_prop_result
2339 visit_cond_stmt (gimple *stmt, edge *taken_edge_p)
2340 {
2341   ccp_prop_value_t val;
2342   basic_block block;
2343 
2344   block = gimple_bb (stmt);
2345   val = evaluate_stmt (stmt);
2346   if (val.lattice_val != CONSTANT
2347       || val.mask != 0)
2348     return SSA_PROP_VARYING;
2349 
2350   /* Find which edge out of the conditional block will be taken and add it
2351      to the worklist.  If no single edge can be determined statically,
2352      return SSA_PROP_VARYING to feed all the outgoing edges to the
2353      propagation engine.  */
2354   *taken_edge_p = find_taken_edge (block, val.value);
2355   if (*taken_edge_p)
2356     return SSA_PROP_INTERESTING;
2357   else
2358     return SSA_PROP_VARYING;
2359 }
2360 
2361 
2362 /* Evaluate statement STMT.  If the statement produces an output value and
2363    its evaluation changes the lattice value of its output, return
2364    SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2365    output value.
2366 
2367    If STMT is a conditional branch and we can determine its truth
2368    value, set *TAKEN_EDGE_P accordingly.  If STMT produces a varying
2369    value, return SSA_PROP_VARYING.  */
2370 
2371 static enum ssa_prop_result
2372 ccp_visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
2373 {
2374   tree def;
2375   ssa_op_iter iter;
2376 
2377   if (dump_file && (dump_flags & TDF_DETAILS))
2378     {
2379       fprintf (dump_file, "\nVisiting statement:\n");
2380       print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2381     }
2382 
2383   switch (gimple_code (stmt))
2384     {
2385       case GIMPLE_ASSIGN:
2386         /* If the statement is an assignment that produces a single
2387            output value, evaluate its RHS to see if the lattice value of
2388            its output has changed.  */
2389         return visit_assignment (stmt, output_p);
2390 
2391       case GIMPLE_CALL:
2392         /* A value-returning call also performs an assignment.  */
2393         if (gimple_call_lhs (stmt) != NULL_TREE)
2394           return visit_assignment (stmt, output_p);
2395         break;
2396 
2397       case GIMPLE_COND:
2398       case GIMPLE_SWITCH:
2399         /* If STMT is a conditional branch, see if we can determine
2400            which branch will be taken.   */
2401         /* FIXME.  It appears that we should be able to optimize
2402            computed GOTOs here as well.  */
2403         return visit_cond_stmt (stmt, taken_edge_p);
2404 
2405       default:
2406         break;
2407     }
2408 
2409   /* Any other kind of statement is not interesting for constant
2410      propagation and, therefore, not worth simulating.  */
2411   if (dump_file && (dump_flags & TDF_DETAILS))
2412     fprintf (dump_file, "No interesting values produced.  Marked VARYING.\n");
2413 
2414   /* Definitions made by statements other than assignments to
2415      SSA_NAMEs represent unknown modifications to their outputs.
2416      Mark them VARYING.  */
2417   FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
2418     set_value_varying (def);
2419 
2420   return SSA_PROP_VARYING;
2421 }
2422 
2423 
2424 /* Main entry point for SSA Conditional Constant Propagation.  If NONZERO_P,
2425    record nonzero bits.  */
2426 
2427 static unsigned int
2428 do_ssa_ccp (bool nonzero_p)
2429 {
2430   unsigned int todo = 0;
2431   calculate_dominance_info (CDI_DOMINATORS);
2432 
2433   ccp_initialize ();
2434   ssa_propagate (ccp_visit_stmt, ccp_visit_phi_node);
2435   if (ccp_finalize (nonzero_p || flag_ipa_bit_cp))
2436     {
2437       todo = (TODO_cleanup_cfg | TODO_update_ssa);
2438 
2439       /* ccp_finalize does not preserve loop-closed ssa.  */
2440       loops_state_clear (LOOP_CLOSED_SSA);
2441     }
2442 
2443   free_dominance_info (CDI_DOMINATORS);
2444   return todo;
2445 }
2446 
2447 
2448 namespace {
2449 
2450 const pass_data pass_data_ccp =
2451 {
2452   GIMPLE_PASS, /* type */
2453   "ccp", /* name */
2454   OPTGROUP_NONE, /* optinfo_flags */
2455   TV_TREE_CCP, /* tv_id */
2456   ( PROP_cfg | PROP_ssa ), /* properties_required */
2457   0, /* properties_provided */
2458   0, /* properties_destroyed */
2459   0, /* todo_flags_start */
2460   TODO_update_address_taken, /* todo_flags_finish */
2461 };
2462 
2463 class pass_ccp : public gimple_opt_pass
2464 {
2465 public:
2466   pass_ccp (gcc::context *ctxt)
2467     : gimple_opt_pass (pass_data_ccp, ctxt), nonzero_p (false)
2468   {}
2469 
2470   /* opt_pass methods: */
2471   opt_pass * clone () { return new pass_ccp (m_ctxt); }
2472   void set_pass_param (unsigned int n, bool param)
2473     {
2474       gcc_assert (n == 0);
2475       nonzero_p = param;
2476     }
2477   virtual bool gate (function *) { return flag_tree_ccp != 0; }
2478   virtual unsigned int execute (function *) { return do_ssa_ccp (nonzero_p); }
2479 
2480  private:
2481   /* Determines whether the pass instance records nonzero bits.  */
2482   bool nonzero_p;
2483 }; // class pass_ccp
2484 
2485 } // anon namespace
2486 
2487 gimple_opt_pass *
2488 make_pass_ccp (gcc::context *ctxt)
2489 {
2490   return new pass_ccp (ctxt);
2491 }
2492 
2493 
2494 
2495 /* Try to optimize out __builtin_stack_restore.  Optimize it out
2496    if there is another __builtin_stack_restore in the same basic
2497    block and no calls or ASM_EXPRs are in between, or if this block's
2498    only outgoing edge is to EXIT_BLOCK and there are no calls or
2499    ASM_EXPRs after this __builtin_stack_restore.  */
2500 
2501 static tree
2502 optimize_stack_restore (gimple_stmt_iterator i)
2503 {
2504   tree callee;
2505   gimple *stmt;
2506 
2507   basic_block bb = gsi_bb (i);
2508   gimple *call = gsi_stmt (i);
2509 
2510   if (gimple_code (call) != GIMPLE_CALL
2511       || gimple_call_num_args (call) != 1
2512       || TREE_CODE (gimple_call_arg (call, 0)) != SSA_NAME
2513       || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call, 0))))
2514     return NULL_TREE;
2515 
2516   for (gsi_next (&i); !gsi_end_p (i); gsi_next (&i))
2517     {
2518       stmt = gsi_stmt (i);
2519       if (gimple_code (stmt) == GIMPLE_ASM)
2520 	return NULL_TREE;
2521       if (gimple_code (stmt) != GIMPLE_CALL)
2522 	continue;
2523 
2524       callee = gimple_call_fndecl (stmt);
2525       if (!callee
2526 	  || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
2527 	  /* All regular builtins are ok, just obviously not alloca.  */
2528 	  || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA
2529 	  || DECL_FUNCTION_CODE (callee) == BUILT_IN_ALLOCA_WITH_ALIGN)
2530 	return NULL_TREE;
2531 
2532       if (DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_RESTORE)
2533 	goto second_stack_restore;
2534     }
2535 
2536   if (!gsi_end_p (i))
2537     return NULL_TREE;
2538 
2539   /* Allow one successor of the exit block, or zero successors.  */
2540   switch (EDGE_COUNT (bb->succs))
2541     {
2542     case 0:
2543       break;
2544     case 1:
2545       if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2546 	return NULL_TREE;
2547       break;
2548     default:
2549       return NULL_TREE;
2550     }
2551  second_stack_restore:
2552 
2553   /* If there's exactly one use, then zap the call to __builtin_stack_save.
2554      If there are multiple uses, then the last one should remove the call.
2555      In any case, whether the call to __builtin_stack_save can be removed
2556      or not is irrelevant to removing the call to __builtin_stack_restore.  */
2557   if (has_single_use (gimple_call_arg (call, 0)))
2558     {
2559       gimple *stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0));
2560       if (is_gimple_call (stack_save))
2561 	{
2562 	  callee = gimple_call_fndecl (stack_save);
2563 	  if (callee
2564 	      && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2565 	      && DECL_FUNCTION_CODE (callee) == BUILT_IN_STACK_SAVE)
2566 	    {
2567 	      gimple_stmt_iterator stack_save_gsi;
2568 	      tree rhs;
2569 
2570 	      stack_save_gsi = gsi_for_stmt (stack_save);
2571 	      rhs = build_int_cst (TREE_TYPE (gimple_call_arg (call, 0)), 0);
2572 	      update_call_from_tree (&stack_save_gsi, rhs);
2573 	    }
2574 	}
2575     }
2576 
2577   /* No effect, so the statement will be deleted.  */
2578   return integer_zero_node;
2579 }
2580 
2581 /* If va_list type is a simple pointer and nothing special is needed,
2582    optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2583    __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2584    pointer assignment.  */
2585 
2586 static tree
2587 optimize_stdarg_builtin (gimple *call)
2588 {
2589   tree callee, lhs, rhs, cfun_va_list;
2590   bool va_list_simple_ptr;
2591   location_t loc = gimple_location (call);
2592 
2593   if (gimple_code (call) != GIMPLE_CALL)
2594     return NULL_TREE;
2595 
2596   callee = gimple_call_fndecl (call);
2597 
2598   cfun_va_list = targetm.fn_abi_va_list (callee);
2599   va_list_simple_ptr = POINTER_TYPE_P (cfun_va_list)
2600 		       && (TREE_TYPE (cfun_va_list) == void_type_node
2601 			   || TREE_TYPE (cfun_va_list) == char_type_node);
2602 
2603   switch (DECL_FUNCTION_CODE (callee))
2604     {
2605     case BUILT_IN_VA_START:
2606       if (!va_list_simple_ptr
2607 	  || targetm.expand_builtin_va_start != NULL
2608 	  || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG))
2609 	return NULL_TREE;
2610 
2611       if (gimple_call_num_args (call) != 2)
2612 	return NULL_TREE;
2613 
2614       lhs = gimple_call_arg (call, 0);
2615       if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2616 	  || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2617 	     != TYPE_MAIN_VARIANT (cfun_va_list))
2618 	return NULL_TREE;
2619 
2620       lhs = build_fold_indirect_ref_loc (loc, lhs);
2621       rhs = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_NEXT_ARG),
2622                              1, integer_zero_node);
2623       rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2624       return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2625 
2626     case BUILT_IN_VA_COPY:
2627       if (!va_list_simple_ptr)
2628 	return NULL_TREE;
2629 
2630       if (gimple_call_num_args (call) != 2)
2631 	return NULL_TREE;
2632 
2633       lhs = gimple_call_arg (call, 0);
2634       if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2635 	  || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2636 	     != TYPE_MAIN_VARIANT (cfun_va_list))
2637 	return NULL_TREE;
2638 
2639       lhs = build_fold_indirect_ref_loc (loc, lhs);
2640       rhs = gimple_call_arg (call, 1);
2641       if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs))
2642 	  != TYPE_MAIN_VARIANT (cfun_va_list))
2643 	return NULL_TREE;
2644 
2645       rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2646       return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2647 
2648     case BUILT_IN_VA_END:
2649       /* No effect, so the statement will be deleted.  */
2650       return integer_zero_node;
2651 
2652     default:
2653       gcc_unreachable ();
2654     }
2655 }
2656 
2657 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
2658    the incoming jumps.  Return true if at least one jump was changed.  */
2659 
2660 static bool
2661 optimize_unreachable (gimple_stmt_iterator i)
2662 {
2663   basic_block bb = gsi_bb (i);
2664   gimple_stmt_iterator gsi;
2665   gimple *stmt;
2666   edge_iterator ei;
2667   edge e;
2668   bool ret;
2669 
2670   if (flag_sanitize & SANITIZE_UNREACHABLE)
2671     return false;
2672 
2673   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2674     {
2675       stmt = gsi_stmt (gsi);
2676 
2677       if (is_gimple_debug (stmt))
2678        continue;
2679 
2680       if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
2681 	{
2682 	  /* Verify we do not need to preserve the label.  */
2683 	  if (FORCED_LABEL (gimple_label_label (label_stmt)))
2684 	    return false;
2685 
2686 	  continue;
2687 	}
2688 
2689       /* Only handle the case that __builtin_unreachable is the first statement
2690 	 in the block.  We rely on DCE to remove stmts without side-effects
2691 	 before __builtin_unreachable.  */
2692       if (gsi_stmt (gsi) != gsi_stmt (i))
2693         return false;
2694     }
2695 
2696   ret = false;
2697   FOR_EACH_EDGE (e, ei, bb->preds)
2698     {
2699       gsi = gsi_last_bb (e->src);
2700       if (gsi_end_p (gsi))
2701 	continue;
2702 
2703       stmt = gsi_stmt (gsi);
2704       if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
2705 	{
2706 	  if (e->flags & EDGE_TRUE_VALUE)
2707 	    gimple_cond_make_false (cond_stmt);
2708 	  else if (e->flags & EDGE_FALSE_VALUE)
2709 	    gimple_cond_make_true (cond_stmt);
2710 	  else
2711 	    gcc_unreachable ();
2712 	  update_stmt (cond_stmt);
2713 	}
2714       else
2715 	{
2716 	  /* Todo: handle other cases, f.i. switch statement.  */
2717 	  continue;
2718 	}
2719 
2720       ret = true;
2721     }
2722 
2723   return ret;
2724 }
2725 
2726 /* Optimize
2727      mask_2 = 1 << cnt_1;
2728      _4 = __atomic_fetch_or_* (ptr_6, mask_2, _3);
2729      _5 = _4 & mask_2;
2730    to
2731      _4 = ATOMIC_BIT_TEST_AND_SET (ptr_6, cnt_1, 0, _3);
2732      _5 = _4;
2733    If _5 is only used in _5 != 0 or _5 == 0 comparisons, 1
2734    is passed instead of 0, and the builtin just returns a zero
2735    or 1 value instead of the actual bit.
2736    Similarly for __sync_fetch_and_or_* (without the ", _3" part
2737    in there), and/or if mask_2 is a power of 2 constant.
2738    Similarly for xor instead of or, use ATOMIC_BIT_TEST_AND_COMPLEMENT
2739    in that case.  And similarly for and instead of or, except that
2740    the second argument to the builtin needs to be one's complement
2741    of the mask instead of mask.  */
2742 
2743 static void
2744 optimize_atomic_bit_test_and (gimple_stmt_iterator *gsip,
2745 			      enum internal_fn fn, bool has_model_arg,
2746 			      bool after)
2747 {
2748   gimple *call = gsi_stmt (*gsip);
2749   tree lhs = gimple_call_lhs (call);
2750   use_operand_p use_p;
2751   gimple *use_stmt;
2752   tree mask, bit;
2753   optab optab;
2754 
2755   if (!flag_inline_atomics
2756       || optimize_debug
2757       || !gimple_call_builtin_p (call, BUILT_IN_NORMAL)
2758       || !lhs
2759       || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
2760       || !single_imm_use (lhs, &use_p, &use_stmt)
2761       || !is_gimple_assign (use_stmt)
2762       || gimple_assign_rhs_code (use_stmt) != BIT_AND_EXPR
2763       || !gimple_vdef (call))
2764     return;
2765 
2766   switch (fn)
2767     {
2768     case IFN_ATOMIC_BIT_TEST_AND_SET:
2769       optab = atomic_bit_test_and_set_optab;
2770       break;
2771     case IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT:
2772       optab = atomic_bit_test_and_complement_optab;
2773       break;
2774     case IFN_ATOMIC_BIT_TEST_AND_RESET:
2775       optab = atomic_bit_test_and_reset_optab;
2776       break;
2777     default:
2778       return;
2779     }
2780 
2781   if (optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs))) == CODE_FOR_nothing)
2782     return;
2783 
2784   mask = gimple_call_arg (call, 1);
2785   tree use_lhs = gimple_assign_lhs (use_stmt);
2786   if (!use_lhs)
2787     return;
2788 
2789   if (TREE_CODE (mask) == INTEGER_CST)
2790     {
2791       if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
2792 	mask = const_unop (BIT_NOT_EXPR, TREE_TYPE (mask), mask);
2793       mask = fold_convert (TREE_TYPE (lhs), mask);
2794       int ibit = tree_log2 (mask);
2795       if (ibit < 0)
2796 	return;
2797       bit = build_int_cst (TREE_TYPE (lhs), ibit);
2798     }
2799   else if (TREE_CODE (mask) == SSA_NAME)
2800     {
2801       gimple *g = SSA_NAME_DEF_STMT (mask);
2802       if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
2803 	{
2804 	  if (!is_gimple_assign (g)
2805 	      || gimple_assign_rhs_code (g) != BIT_NOT_EXPR)
2806 	    return;
2807 	  mask = gimple_assign_rhs1 (g);
2808 	  if (TREE_CODE (mask) != SSA_NAME)
2809 	    return;
2810 	  g = SSA_NAME_DEF_STMT (mask);
2811 	}
2812       if (!is_gimple_assign (g)
2813 	  || gimple_assign_rhs_code (g) != LSHIFT_EXPR
2814 	  || !integer_onep (gimple_assign_rhs1 (g)))
2815 	return;
2816       bit = gimple_assign_rhs2 (g);
2817     }
2818   else
2819     return;
2820 
2821   if (gimple_assign_rhs1 (use_stmt) == lhs)
2822     {
2823       if (!operand_equal_p (gimple_assign_rhs2 (use_stmt), mask, 0))
2824 	return;
2825     }
2826   else if (gimple_assign_rhs2 (use_stmt) != lhs
2827 	   || !operand_equal_p (gimple_assign_rhs1 (use_stmt), mask, 0))
2828     return;
2829 
2830   bool use_bool = true;
2831   bool has_debug_uses = false;
2832   imm_use_iterator iter;
2833   gimple *g;
2834 
2835   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs))
2836     use_bool = false;
2837   FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
2838     {
2839       enum tree_code code = ERROR_MARK;
2840       tree op0 = NULL_TREE, op1 = NULL_TREE;
2841       if (is_gimple_debug (g))
2842 	{
2843 	  has_debug_uses = true;
2844 	  continue;
2845 	}
2846       else if (is_gimple_assign (g))
2847 	switch (gimple_assign_rhs_code (g))
2848 	  {
2849 	  case COND_EXPR:
2850 	    op1 = gimple_assign_rhs1 (g);
2851 	    code = TREE_CODE (op1);
2852 	    op0 = TREE_OPERAND (op1, 0);
2853 	    op1 = TREE_OPERAND (op1, 1);
2854 	    break;
2855 	  case EQ_EXPR:
2856 	  case NE_EXPR:
2857 	    code = gimple_assign_rhs_code (g);
2858 	    op0 = gimple_assign_rhs1 (g);
2859 	    op1 = gimple_assign_rhs2 (g);
2860 	    break;
2861 	  default:
2862 	    break;
2863 	  }
2864       else if (gimple_code (g) == GIMPLE_COND)
2865 	{
2866 	  code = gimple_cond_code (g);
2867 	  op0 = gimple_cond_lhs (g);
2868 	  op1 = gimple_cond_rhs (g);
2869 	}
2870 
2871       if ((code == EQ_EXPR || code == NE_EXPR)
2872 	  && op0 == use_lhs
2873 	  && integer_zerop (op1))
2874 	{
2875 	  use_operand_p use_p;
2876 	  int n = 0;
2877 	  FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2878 	    n++;
2879 	  if (n == 1)
2880 	    continue;
2881 	}
2882 
2883       use_bool = false;
2884       BREAK_FROM_IMM_USE_STMT (iter);
2885     }
2886 
2887   tree new_lhs = make_ssa_name (TREE_TYPE (lhs));
2888   tree flag = build_int_cst (TREE_TYPE (lhs), use_bool);
2889   if (has_model_arg)
2890     g = gimple_build_call_internal (fn, 4, gimple_call_arg (call, 0),
2891 				    bit, flag, gimple_call_arg (call, 2));
2892   else
2893     g = gimple_build_call_internal (fn, 3, gimple_call_arg (call, 0),
2894 				    bit, flag);
2895   gimple_call_set_lhs (g, new_lhs);
2896   gimple_set_location (g, gimple_location (call));
2897   gimple_set_vuse (g, gimple_vuse (call));
2898   gimple_set_vdef (g, gimple_vdef (call));
2899   bool throws = stmt_can_throw_internal (call);
2900   gimple_call_set_nothrow (as_a <gcall *> (g),
2901 			   gimple_call_nothrow_p (as_a <gcall *> (call)));
2902   SSA_NAME_DEF_STMT (gimple_vdef (call)) = g;
2903   gimple_stmt_iterator gsi = *gsip;
2904   gsi_insert_after (&gsi, g, GSI_NEW_STMT);
2905   edge e = NULL;
2906   if (throws)
2907     {
2908       maybe_clean_or_replace_eh_stmt (call, g);
2909       if (after || (use_bool && has_debug_uses))
2910 	e = find_fallthru_edge (gsi_bb (gsi)->succs);
2911     }
2912   if (after)
2913     {
2914       /* The internal function returns the value of the specified bit
2915 	 before the atomic operation.  If we are interested in the value
2916 	 of the specified bit after the atomic operation (makes only sense
2917 	 for xor, otherwise the bit content is compile time known),
2918 	 we need to invert the bit.  */
2919       g = gimple_build_assign (make_ssa_name (TREE_TYPE (lhs)),
2920 			       BIT_XOR_EXPR, new_lhs,
2921 			       use_bool ? build_int_cst (TREE_TYPE (lhs), 1)
2922 					: mask);
2923       new_lhs = gimple_assign_lhs (g);
2924       if (throws)
2925 	{
2926 	  gsi_insert_on_edge_immediate (e, g);
2927 	  gsi = gsi_for_stmt (g);
2928 	}
2929       else
2930 	gsi_insert_after (&gsi, g, GSI_NEW_STMT);
2931     }
2932   if (use_bool && has_debug_uses)
2933     {
2934       tree temp = NULL_TREE;
2935       if (!throws || after || single_pred_p (e->dest))
2936 	{
2937 	  temp = make_node (DEBUG_EXPR_DECL);
2938 	  DECL_ARTIFICIAL (temp) = 1;
2939 	  TREE_TYPE (temp) = TREE_TYPE (lhs);
2940 	  SET_DECL_MODE (temp, TYPE_MODE (TREE_TYPE (lhs)));
2941 	  tree t = build2 (LSHIFT_EXPR, TREE_TYPE (lhs), new_lhs, bit);
2942 	  g = gimple_build_debug_bind (temp, t, g);
2943 	  if (throws && !after)
2944 	    {
2945 	      gsi = gsi_after_labels (e->dest);
2946 	      gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2947 	    }
2948 	  else
2949 	    gsi_insert_after (&gsi, g, GSI_NEW_STMT);
2950 	}
2951       FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
2952 	if (is_gimple_debug (g))
2953 	  {
2954 	    use_operand_p use_p;
2955 	    if (temp == NULL_TREE)
2956 	      gimple_debug_bind_reset_value (g);
2957 	    else
2958 	      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2959 		SET_USE (use_p, temp);
2960 	    update_stmt (g);
2961 	  }
2962     }
2963   SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_lhs)
2964     = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs);
2965   replace_uses_by (use_lhs, new_lhs);
2966   gsi = gsi_for_stmt (use_stmt);
2967   gsi_remove (&gsi, true);
2968   release_defs (use_stmt);
2969   gsi_remove (gsip, true);
2970   release_ssa_name (lhs);
2971 }
2972 
2973 /* Optimize
2974    a = {};
2975    b = a;
2976    into
2977    a = {};
2978    b = {};
2979    Similarly for memset (&a, ..., sizeof (a)); instead of a = {};
2980    and/or memcpy (&b, &a, sizeof (a)); instead of b = a;  */
2981 
2982 static void
2983 optimize_memcpy (gimple_stmt_iterator *gsip, tree dest, tree src, tree len)
2984 {
2985   gimple *stmt = gsi_stmt (*gsip);
2986   if (gimple_has_volatile_ops (stmt))
2987     return;
2988 
2989   tree vuse = gimple_vuse (stmt);
2990   if (vuse == NULL)
2991     return;
2992 
2993   gimple *defstmt = SSA_NAME_DEF_STMT (vuse);
2994   tree src2 = NULL_TREE, len2 = NULL_TREE;
2995   HOST_WIDE_INT offset, offset2;
2996   tree val = integer_zero_node;
2997   if (gimple_store_p (defstmt)
2998       && gimple_assign_single_p (defstmt)
2999       && TREE_CODE (gimple_assign_rhs1 (defstmt)) == CONSTRUCTOR
3000       && !gimple_clobber_p (defstmt))
3001     src2 = gimple_assign_lhs (defstmt);
3002   else if (gimple_call_builtin_p (defstmt, BUILT_IN_MEMSET)
3003 	   && TREE_CODE (gimple_call_arg (defstmt, 0)) == ADDR_EXPR
3004 	   && TREE_CODE (gimple_call_arg (defstmt, 1)) == INTEGER_CST)
3005     {
3006       src2 = TREE_OPERAND (gimple_call_arg (defstmt, 0), 0);
3007       len2 = gimple_call_arg (defstmt, 2);
3008       val = gimple_call_arg (defstmt, 1);
3009       /* For non-0 val, we'd have to transform stmt from assignment
3010 	 into memset (only if dest is addressable).  */
3011       if (!integer_zerop (val) && is_gimple_assign (stmt))
3012 	src2 = NULL_TREE;
3013     }
3014 
3015   if (src2 == NULL_TREE)
3016     return;
3017 
3018   if (len == NULL_TREE)
3019     len = (TREE_CODE (src) == COMPONENT_REF
3020 	   ? DECL_SIZE_UNIT (TREE_OPERAND (src, 1))
3021 	   : TYPE_SIZE_UNIT (TREE_TYPE (src)));
3022   if (len2 == NULL_TREE)
3023     len2 = (TREE_CODE (src2) == COMPONENT_REF
3024 	    ? DECL_SIZE_UNIT (TREE_OPERAND (src2, 1))
3025 	    : TYPE_SIZE_UNIT (TREE_TYPE (src2)));
3026   if (len == NULL_TREE
3027       || TREE_CODE (len) != INTEGER_CST
3028       || len2 == NULL_TREE
3029       || TREE_CODE (len2) != INTEGER_CST)
3030     return;
3031 
3032   src = get_addr_base_and_unit_offset (src, &offset);
3033   src2 = get_addr_base_and_unit_offset (src2, &offset2);
3034   if (src == NULL_TREE
3035       || src2 == NULL_TREE
3036       || offset < offset2)
3037     return;
3038 
3039   if (!operand_equal_p (src, src2, 0))
3040     return;
3041 
3042   /* [ src + offset2, src + offset2 + len2 - 1 ] is set to val.
3043      Make sure that
3044      [ src + offset, src + offset + len - 1 ] is a subset of that.  */
3045   if (wi::to_offset (len) + (offset - offset2) > wi::to_offset (len2))
3046     return;
3047 
3048   if (dump_file && (dump_flags & TDF_DETAILS))
3049     {
3050       fprintf (dump_file, "Simplified\n  ");
3051       print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3052       fprintf (dump_file, "after previous\n  ");
3053       print_gimple_stmt (dump_file, defstmt, 0, dump_flags);
3054     }
3055 
3056   /* For simplicity, don't change the kind of the stmt,
3057      turn dest = src; into dest = {}; and memcpy (&dest, &src, len);
3058      into memset (&dest, val, len);
3059      In theory we could change dest = src into memset if dest
3060      is addressable (maybe beneficial if val is not 0), or
3061      memcpy (&dest, &src, len) into dest = {} if len is the size
3062      of dest, dest isn't volatile.  */
3063   if (is_gimple_assign (stmt))
3064     {
3065       tree ctor = build_constructor (TREE_TYPE (dest), NULL);
3066       gimple_assign_set_rhs_from_tree (gsip, ctor);
3067       update_stmt (stmt);
3068     }
3069   else /* If stmt is memcpy, transform it into memset.  */
3070     {
3071       gcall *call = as_a <gcall *> (stmt);
3072       tree fndecl = builtin_decl_implicit (BUILT_IN_MEMSET);
3073       gimple_call_set_fndecl (call, fndecl);
3074       gimple_call_set_fntype (call, TREE_TYPE (fndecl));
3075       gimple_call_set_arg (call, 1, val);
3076       update_stmt (stmt);
3077     }
3078 
3079   if (dump_file && (dump_flags & TDF_DETAILS))
3080     {
3081       fprintf (dump_file, "into\n  ");
3082       print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3083     }
3084 }
3085 
3086 /* A simple pass that attempts to fold all builtin functions.  This pass
3087    is run after we've propagated as many constants as we can.  */
3088 
3089 namespace {
3090 
3091 const pass_data pass_data_fold_builtins =
3092 {
3093   GIMPLE_PASS, /* type */
3094   "fab", /* name */
3095   OPTGROUP_NONE, /* optinfo_flags */
3096   TV_NONE, /* tv_id */
3097   ( PROP_cfg | PROP_ssa ), /* properties_required */
3098   0, /* properties_provided */
3099   0, /* properties_destroyed */
3100   0, /* todo_flags_start */
3101   TODO_update_ssa, /* todo_flags_finish */
3102 };
3103 
3104 class pass_fold_builtins : public gimple_opt_pass
3105 {
3106 public:
3107   pass_fold_builtins (gcc::context *ctxt)
3108     : gimple_opt_pass (pass_data_fold_builtins, ctxt)
3109   {}
3110 
3111   /* opt_pass methods: */
3112   opt_pass * clone () { return new pass_fold_builtins (m_ctxt); }
3113   virtual unsigned int execute (function *);
3114 
3115 }; // class pass_fold_builtins
3116 
3117 unsigned int
3118 pass_fold_builtins::execute (function *fun)
3119 {
3120   bool cfg_changed = false;
3121   basic_block bb;
3122   unsigned int todoflags = 0;
3123 
3124   FOR_EACH_BB_FN (bb, fun)
3125     {
3126       gimple_stmt_iterator i;
3127       for (i = gsi_start_bb (bb); !gsi_end_p (i); )
3128 	{
3129 	  gimple *stmt, *old_stmt;
3130 	  tree callee;
3131 	  enum built_in_function fcode;
3132 
3133 	  stmt = gsi_stmt (i);
3134 
3135           if (gimple_code (stmt) != GIMPLE_CALL)
3136 	    {
3137 	      /* Remove all *ssaname_N ={v} {CLOBBER}; stmts,
3138 		 after the last GIMPLE DSE they aren't needed and might
3139 		 unnecessarily keep the SSA_NAMEs live.  */
3140 	      if (gimple_clobber_p (stmt))
3141 		{
3142 		  tree lhs = gimple_assign_lhs (stmt);
3143 		  if (TREE_CODE (lhs) == MEM_REF
3144 		      && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME)
3145 		    {
3146 		      unlink_stmt_vdef (stmt);
3147 		      gsi_remove (&i, true);
3148 		      release_defs (stmt);
3149 		      continue;
3150 		    }
3151 		}
3152 	      else if (gimple_assign_load_p (stmt) && gimple_store_p (stmt))
3153 		optimize_memcpy (&i, gimple_assign_lhs (stmt),
3154 				 gimple_assign_rhs1 (stmt), NULL_TREE);
3155 	      gsi_next (&i);
3156 	      continue;
3157 	    }
3158 
3159 	  callee = gimple_call_fndecl (stmt);
3160 	  if (!callee || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL)
3161 	    {
3162 	      gsi_next (&i);
3163 	      continue;
3164 	    }
3165 
3166 	  fcode = DECL_FUNCTION_CODE (callee);
3167 	  if (fold_stmt (&i))
3168 	    ;
3169 	  else
3170 	    {
3171 	      tree result = NULL_TREE;
3172 	      switch (DECL_FUNCTION_CODE (callee))
3173 		{
3174 		case BUILT_IN_CONSTANT_P:
3175 		  /* Resolve __builtin_constant_p.  If it hasn't been
3176 		     folded to integer_one_node by now, it's fairly
3177 		     certain that the value simply isn't constant.  */
3178 		  result = integer_zero_node;
3179 		  break;
3180 
3181 		case BUILT_IN_ASSUME_ALIGNED:
3182 		  /* Remove __builtin_assume_aligned.  */
3183 		  result = gimple_call_arg (stmt, 0);
3184 		  break;
3185 
3186 		case BUILT_IN_STACK_RESTORE:
3187 		  result = optimize_stack_restore (i);
3188 		  if (result)
3189 		    break;
3190 		  gsi_next (&i);
3191 		  continue;
3192 
3193 		case BUILT_IN_UNREACHABLE:
3194 		  if (optimize_unreachable (i))
3195 		    cfg_changed = true;
3196 		  break;
3197 
3198 		case BUILT_IN_ATOMIC_FETCH_OR_1:
3199 		case BUILT_IN_ATOMIC_FETCH_OR_2:
3200 		case BUILT_IN_ATOMIC_FETCH_OR_4:
3201 		case BUILT_IN_ATOMIC_FETCH_OR_8:
3202 		case BUILT_IN_ATOMIC_FETCH_OR_16:
3203 		  optimize_atomic_bit_test_and (&i,
3204 						IFN_ATOMIC_BIT_TEST_AND_SET,
3205 						true, false);
3206 		  break;
3207 		case BUILT_IN_SYNC_FETCH_AND_OR_1:
3208 		case BUILT_IN_SYNC_FETCH_AND_OR_2:
3209 		case BUILT_IN_SYNC_FETCH_AND_OR_4:
3210 		case BUILT_IN_SYNC_FETCH_AND_OR_8:
3211 		case BUILT_IN_SYNC_FETCH_AND_OR_16:
3212 		  optimize_atomic_bit_test_and (&i,
3213 						IFN_ATOMIC_BIT_TEST_AND_SET,
3214 						false, false);
3215 		  break;
3216 
3217 		case BUILT_IN_ATOMIC_FETCH_XOR_1:
3218 		case BUILT_IN_ATOMIC_FETCH_XOR_2:
3219 		case BUILT_IN_ATOMIC_FETCH_XOR_4:
3220 		case BUILT_IN_ATOMIC_FETCH_XOR_8:
3221 		case BUILT_IN_ATOMIC_FETCH_XOR_16:
3222 		  optimize_atomic_bit_test_and
3223 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, false);
3224 		  break;
3225 		case BUILT_IN_SYNC_FETCH_AND_XOR_1:
3226 		case BUILT_IN_SYNC_FETCH_AND_XOR_2:
3227 		case BUILT_IN_SYNC_FETCH_AND_XOR_4:
3228 		case BUILT_IN_SYNC_FETCH_AND_XOR_8:
3229 		case BUILT_IN_SYNC_FETCH_AND_XOR_16:
3230 		  optimize_atomic_bit_test_and
3231 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, false);
3232 		  break;
3233 
3234 		case BUILT_IN_ATOMIC_XOR_FETCH_1:
3235 		case BUILT_IN_ATOMIC_XOR_FETCH_2:
3236 		case BUILT_IN_ATOMIC_XOR_FETCH_4:
3237 		case BUILT_IN_ATOMIC_XOR_FETCH_8:
3238 		case BUILT_IN_ATOMIC_XOR_FETCH_16:
3239 		  optimize_atomic_bit_test_and
3240 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, true);
3241 		  break;
3242 		case BUILT_IN_SYNC_XOR_AND_FETCH_1:
3243 		case BUILT_IN_SYNC_XOR_AND_FETCH_2:
3244 		case BUILT_IN_SYNC_XOR_AND_FETCH_4:
3245 		case BUILT_IN_SYNC_XOR_AND_FETCH_8:
3246 		case BUILT_IN_SYNC_XOR_AND_FETCH_16:
3247 		  optimize_atomic_bit_test_and
3248 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, true);
3249 		  break;
3250 
3251 		case BUILT_IN_ATOMIC_FETCH_AND_1:
3252 		case BUILT_IN_ATOMIC_FETCH_AND_2:
3253 		case BUILT_IN_ATOMIC_FETCH_AND_4:
3254 		case BUILT_IN_ATOMIC_FETCH_AND_8:
3255 		case BUILT_IN_ATOMIC_FETCH_AND_16:
3256 		  optimize_atomic_bit_test_and (&i,
3257 						IFN_ATOMIC_BIT_TEST_AND_RESET,
3258 						true, false);
3259 		  break;
3260 		case BUILT_IN_SYNC_FETCH_AND_AND_1:
3261 		case BUILT_IN_SYNC_FETCH_AND_AND_2:
3262 		case BUILT_IN_SYNC_FETCH_AND_AND_4:
3263 		case BUILT_IN_SYNC_FETCH_AND_AND_8:
3264 		case BUILT_IN_SYNC_FETCH_AND_AND_16:
3265 		  optimize_atomic_bit_test_and (&i,
3266 						IFN_ATOMIC_BIT_TEST_AND_RESET,
3267 						false, false);
3268 		  break;
3269 
3270 		case BUILT_IN_MEMCPY:
3271 		  if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)
3272 		      && TREE_CODE (gimple_call_arg (stmt, 0)) == ADDR_EXPR
3273 		      && TREE_CODE (gimple_call_arg (stmt, 1)) == ADDR_EXPR
3274 		      && TREE_CODE (gimple_call_arg (stmt, 2)) == INTEGER_CST)
3275 		    {
3276 		      tree dest = TREE_OPERAND (gimple_call_arg (stmt, 0), 0);
3277 		      tree src = TREE_OPERAND (gimple_call_arg (stmt, 1), 0);
3278 		      tree len = gimple_call_arg (stmt, 2);
3279 		      optimize_memcpy (&i, dest, src, len);
3280 		    }
3281 		  break;
3282 
3283 		case BUILT_IN_VA_START:
3284 		case BUILT_IN_VA_END:
3285 		case BUILT_IN_VA_COPY:
3286 		  /* These shouldn't be folded before pass_stdarg.  */
3287 		  result = optimize_stdarg_builtin (stmt);
3288 		  break;
3289 
3290 		default:;
3291 		}
3292 
3293 	      if (!result)
3294 		{
3295 		  gsi_next (&i);
3296 		  continue;
3297 		}
3298 
3299 	      if (!update_call_from_tree (&i, result))
3300 		gimplify_and_update_call_from_tree (&i, result);
3301 	    }
3302 
3303 	  todoflags |= TODO_update_address_taken;
3304 
3305 	  if (dump_file && (dump_flags & TDF_DETAILS))
3306 	    {
3307 	      fprintf (dump_file, "Simplified\n  ");
3308 	      print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3309 	    }
3310 
3311           old_stmt = stmt;
3312 	  stmt = gsi_stmt (i);
3313 	  update_stmt (stmt);
3314 
3315 	  if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)
3316 	      && gimple_purge_dead_eh_edges (bb))
3317 	    cfg_changed = true;
3318 
3319 	  if (dump_file && (dump_flags & TDF_DETAILS))
3320 	    {
3321 	      fprintf (dump_file, "to\n  ");
3322 	      print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3323 	      fprintf (dump_file, "\n");
3324 	    }
3325 
3326 	  /* Retry the same statement if it changed into another
3327 	     builtin, there might be new opportunities now.  */
3328           if (gimple_code (stmt) != GIMPLE_CALL)
3329 	    {
3330 	      gsi_next (&i);
3331 	      continue;
3332 	    }
3333 	  callee = gimple_call_fndecl (stmt);
3334 	  if (!callee
3335               || DECL_BUILT_IN_CLASS (callee) != BUILT_IN_NORMAL
3336 	      || DECL_FUNCTION_CODE (callee) == fcode)
3337 	    gsi_next (&i);
3338 	}
3339     }
3340 
3341   /* Delete unreachable blocks.  */
3342   if (cfg_changed)
3343     todoflags |= TODO_cleanup_cfg;
3344 
3345   return todoflags;
3346 }
3347 
3348 } // anon namespace
3349 
3350 gimple_opt_pass *
3351 make_pass_fold_builtins (gcc::context *ctxt)
3352 {
3353   return new pass_fold_builtins (ctxt);
3354 }
3355 
3356 /* A simple pass that emits some warnings post IPA.  */
3357 
3358 namespace {
3359 
3360 const pass_data pass_data_post_ipa_warn =
3361 {
3362   GIMPLE_PASS, /* type */
3363   "post_ipa_warn", /* name */
3364   OPTGROUP_NONE, /* optinfo_flags */
3365   TV_NONE, /* tv_id */
3366   ( PROP_cfg | PROP_ssa ), /* properties_required */
3367   0, /* properties_provided */
3368   0, /* properties_destroyed */
3369   0, /* todo_flags_start */
3370   0, /* todo_flags_finish */
3371 };
3372 
3373 class pass_post_ipa_warn : public gimple_opt_pass
3374 {
3375 public:
3376   pass_post_ipa_warn (gcc::context *ctxt)
3377     : gimple_opt_pass (pass_data_post_ipa_warn, ctxt)
3378   {}
3379 
3380   /* opt_pass methods: */
3381   opt_pass * clone () { return new pass_post_ipa_warn (m_ctxt); }
3382   virtual bool gate (function *) { return warn_nonnull != 0; }
3383   virtual unsigned int execute (function *);
3384 
3385 }; // class pass_fold_builtins
3386 
3387 unsigned int
3388 pass_post_ipa_warn::execute (function *fun)
3389 {
3390   basic_block bb;
3391 
3392   FOR_EACH_BB_FN (bb, fun)
3393     {
3394       gimple_stmt_iterator gsi;
3395       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3396 	{
3397 	  gimple *stmt = gsi_stmt (gsi);
3398 	  if (!is_gimple_call (stmt) || gimple_no_warning_p (stmt))
3399 	    continue;
3400 
3401 	  if (warn_nonnull)
3402 	    {
3403 	      bitmap nonnullargs
3404 		= get_nonnull_args (gimple_call_fntype (stmt));
3405 	      if (nonnullargs)
3406 		{
3407 		  for (unsigned i = 0; i < gimple_call_num_args (stmt); i++)
3408 		    {
3409 		      tree arg = gimple_call_arg (stmt, i);
3410 		      if (TREE_CODE (TREE_TYPE (arg)) != POINTER_TYPE)
3411 			continue;
3412 		      if (!integer_zerop (arg))
3413 			continue;
3414 		      if (!bitmap_empty_p (nonnullargs)
3415 			  && !bitmap_bit_p (nonnullargs, i))
3416 			continue;
3417 
3418 		      location_t loc = gimple_location (stmt);
3419 		      if (warning_at (loc, OPT_Wnonnull,
3420 				      "argument %u null where non-null "
3421 				      "expected", i + 1))
3422 			{
3423 			  tree fndecl = gimple_call_fndecl (stmt);
3424 			  if (fndecl && DECL_IS_BUILTIN (fndecl))
3425 			    inform (loc, "in a call to built-in function %qD",
3426 				    fndecl);
3427 			  else if (fndecl)
3428 			    inform (DECL_SOURCE_LOCATION (fndecl),
3429 				    "in a call to function %qD declared here",
3430 				    fndecl);
3431 
3432 			}
3433 		    }
3434 		  BITMAP_FREE (nonnullargs);
3435 		}
3436 	    }
3437 	}
3438     }
3439   return 0;
3440 }
3441 
3442 } // anon namespace
3443 
3444 gimple_opt_pass *
3445 make_pass_post_ipa_warn (gcc::context *ctxt)
3446 {
3447   return new pass_post_ipa_warn (ctxt);
3448 }
3449 
3450 #if defined(__NetBSD__) && defined(NETBSD_NATIVE)
3451 /*
3452  * This is a big, ugly, temporary hack:
3453  *    http://gcc.gnu.org/bugzilla/show_bug.cgi?id=59958
3454  * To make sure we have configured all our targets correctly, mimic the
3455  * #ifdef cascade from src/lib/libc/stdlib/jemalloc.c here and compile
3456  * time assert that the value matches gcc's MALLOC_ABI_ALIGNMENT here.
3457  */
3458 
3459 #if defined(__hppa__)
3460 #define	JEMALLOC_TINY_MIN_2POW	4
3461 #elif defined(__alpha__) || defined(__amd64__) || defined(__sparc64__)	\
3462      ||	(defined(__arm__) && defined(__ARM_EABI__)) \
3463      || defined(__ia64__) || defined(__powerpc__) \
3464      || defined(__aarch64__) \
3465      || ((defined(__mips__) || defined(__riscv__)) && defined(_LP64))
3466 #define	JEMALLOC_TINY_MIN_2POW	3
3467 #endif
3468 
3469 #ifndef JEMALLOC_TINY_MIN_2POW
3470 #define	JEMALLOC_TINY_MIN_2POW	2
3471 #endif
3472 
3473 /* make sure we test the (native) 64bit variant for targets supporting -m32 */
3474 #undef	TARGET_64BIT
3475 #ifdef _LP64
3476 #define	TARGET_64BIT	1
3477 #else
3478 #ifdef __sh__
3479 #undef UNITS_PER_WORD
3480 #define	UNITS_PER_WORD	4	/* original definition varies depending on cpu */
3481 #endif
3482 #define	TARGET_64BIT	0
3483 #endif
3484 
3485 /* ARM has a non-constant MALLOC_ABI_ALIGNMENT since GCC 5.  */
3486 #if !defined(__arm__)
3487 #ifdef __CTASSERT
3488 __CTASSERT((8<<JEMALLOC_TINY_MIN_2POW) == MALLOC_ABI_ALIGNMENT);
3489 #else
3490 #error compiling on an older NetBSD version?
3491 #endif
3492 #endif
3493 
3494 #endif
3495