xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-ssa-ccp.c (revision 23f5f46327e37e7811da3520f4bb933f9489322f)
1 /* Conditional constant propagation pass for the GNU compiler.
2    Copyright (C) 2000-2020 Free Software Foundation, Inc.
3    Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4    Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it
9 under the terms of the GNU General Public License as published by the
10 Free Software Foundation; either version 3, or (at your option) any
11 later version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT
14 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 /* Conditional constant propagation (CCP) is based on the SSA
23    propagation engine (tree-ssa-propagate.c).  Constant assignments of
24    the form VAR = CST are propagated from the assignments into uses of
25    VAR, which in turn may generate new constants.  The simulation uses
26    a four level lattice to keep track of constant values associated
27    with SSA names.  Given an SSA name V_i, it may take one of the
28    following values:
29 
30 	UNINITIALIZED   ->  the initial state of the value.  This value
31 			    is replaced with a correct initial value
32 			    the first time the value is used, so the
33 			    rest of the pass does not need to care about
34 			    it.  Using this value simplifies initialization
35 			    of the pass, and prevents us from needlessly
36 			    scanning statements that are never reached.
37 
38 	UNDEFINED	->  V_i is a local variable whose definition
39 			    has not been processed yet.  Therefore we
40 			    don't yet know if its value is a constant
41 			    or not.
42 
43 	CONSTANT	->  V_i has been found to hold a constant
44 			    value C.
45 
46 	VARYING		->  V_i cannot take a constant value, or if it
47 			    does, it is not possible to determine it
48 			    at compile time.
49 
50    The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
51 
52    1- In ccp_visit_stmt, we are interested in assignments whose RHS
53       evaluates into a constant and conditional jumps whose predicate
54       evaluates into a boolean true or false.  When an assignment of
55       the form V_i = CONST is found, V_i's lattice value is set to
56       CONSTANT and CONST is associated with it.  This causes the
57       propagation engine to add all the SSA edges coming out the
58       assignment into the worklists, so that statements that use V_i
59       can be visited.
60 
61       If the statement is a conditional with a constant predicate, we
62       mark the outgoing edges as executable or not executable
63       depending on the predicate's value.  This is then used when
64       visiting PHI nodes to know when a PHI argument can be ignored.
65 
66 
67    2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68       same constant C, then the LHS of the PHI is set to C.  This
69       evaluation is known as the "meet operation".  Since one of the
70       goals of this evaluation is to optimistically return constant
71       values as often as possible, it uses two main short cuts:
72 
73       - If an argument is flowing in through a non-executable edge, it
74 	is ignored.  This is useful in cases like this:
75 
76 			if (PRED)
77 			  a_9 = 3;
78 			else
79 			  a_10 = 100;
80 			a_11 = PHI (a_9, a_10)
81 
82 	If PRED is known to always evaluate to false, then we can
83 	assume that a_11 will always take its value from a_10, meaning
84 	that instead of consider it VARYING (a_9 and a_10 have
85 	different values), we can consider it CONSTANT 100.
86 
87       - If an argument has an UNDEFINED value, then it does not affect
88 	the outcome of the meet operation.  If a variable V_i has an
89 	UNDEFINED value, it means that either its defining statement
90 	hasn't been visited yet or V_i has no defining statement, in
91 	which case the original symbol 'V' is being used
92 	uninitialized.  Since 'V' is a local variable, the compiler
93 	may assume any initial value for it.
94 
95 
96    After propagation, every variable V_i that ends up with a lattice
97    value of CONSTANT will have the associated constant value in the
98    array CONST_VAL[i].VALUE.  That is fed into substitute_and_fold for
99    final substitution and folding.
100 
101    This algorithm uses wide-ints at the max precision of the target.
102    This means that, with one uninteresting exception, variables with
103    UNSIGNED types never go to VARYING because the bits above the
104    precision of the type of the variable are always zero.  The
105    uninteresting case is a variable of UNSIGNED type that has the
106    maximum precision of the target.  Such variables can go to VARYING,
107    but this causes no loss of infomation since these variables will
108    never be extended.
109 
110    References:
111 
112      Constant propagation with conditional branches,
113      Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
114 
115      Building an Optimizing Compiler,
116      Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
117 
118      Advanced Compiler Design and Implementation,
119      Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6  */
120 
121 #include "config.h"
122 #include "system.h"
123 #include "coretypes.h"
124 #include "backend.h"
125 #include "target.h"
126 #include "tree.h"
127 #include "gimple.h"
128 #include "tree-pass.h"
129 #include "ssa.h"
130 #include "gimple-pretty-print.h"
131 #include "fold-const.h"
132 #include "gimple-fold.h"
133 #include "tree-eh.h"
134 #include "gimplify.h"
135 #include "gimple-iterator.h"
136 #include "tree-cfg.h"
137 #include "tree-ssa-propagate.h"
138 #include "dbgcnt.h"
139 #include "builtins.h"
140 #include "cfgloop.h"
141 #include "stor-layout.h"
142 #include "optabs-query.h"
143 #include "tree-ssa-ccp.h"
144 #include "tree-dfa.h"
145 #include "diagnostic-core.h"
146 #include "stringpool.h"
147 #include "attribs.h"
148 #include "tree-vector-builder.h"
149 #include "cgraph.h"
150 #include "alloc-pool.h"
151 #include "symbol-summary.h"
152 #include "ipa-utils.h"
153 #include "ipa-prop.h"
154 
155 /* Possible lattice values.  */
156 typedef enum
157 {
158   UNINITIALIZED,
159   UNDEFINED,
160   CONSTANT,
161   VARYING
162 } ccp_lattice_t;
163 
164 class ccp_prop_value_t {
165 public:
166     /* Lattice value.  */
167     ccp_lattice_t lattice_val;
168 
169     /* Propagated value.  */
170     tree value;
171 
172     /* Mask that applies to the propagated value during CCP.  For X
173        with a CONSTANT lattice value X & ~mask == value & ~mask.  The
174        zero bits in the mask cover constant values.  The ones mean no
175        information.  */
176     widest_int mask;
177 };
178 
179 class ccp_propagate : public ssa_propagation_engine
180 {
181  public:
182   enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
183   enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
184 };
185 
186 /* Array of propagated constant values.  After propagation,
187    CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I).  If
188    the constant is held in an SSA name representing a memory store
189    (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
190    memory reference used to store (i.e., the LHS of the assignment
191    doing the store).  */
192 static ccp_prop_value_t *const_val;
193 static unsigned n_const_val;
194 
195 static void canonicalize_value (ccp_prop_value_t *);
196 static void ccp_lattice_meet (ccp_prop_value_t *, ccp_prop_value_t *);
197 
198 /* Dump constant propagation value VAL to file OUTF prefixed by PREFIX.  */
199 
200 static void
dump_lattice_value(FILE * outf,const char * prefix,ccp_prop_value_t val)201 dump_lattice_value (FILE *outf, const char *prefix, ccp_prop_value_t val)
202 {
203   switch (val.lattice_val)
204     {
205     case UNINITIALIZED:
206       fprintf (outf, "%sUNINITIALIZED", prefix);
207       break;
208     case UNDEFINED:
209       fprintf (outf, "%sUNDEFINED", prefix);
210       break;
211     case VARYING:
212       fprintf (outf, "%sVARYING", prefix);
213       break;
214     case CONSTANT:
215       if (TREE_CODE (val.value) != INTEGER_CST
216 	  || val.mask == 0)
217 	{
218 	  fprintf (outf, "%sCONSTANT ", prefix);
219 	  print_generic_expr (outf, val.value, dump_flags);
220 	}
221       else
222 	{
223 	  widest_int cval = wi::bit_and_not (wi::to_widest (val.value),
224 					     val.mask);
225 	  fprintf (outf, "%sCONSTANT ", prefix);
226 	  print_hex (cval, outf);
227 	  fprintf (outf, " (");
228 	  print_hex (val.mask, outf);
229 	  fprintf (outf, ")");
230 	}
231       break;
232     default:
233       gcc_unreachable ();
234     }
235 }
236 
237 
238 /* Print lattice value VAL to stderr.  */
239 
240 void debug_lattice_value (ccp_prop_value_t val);
241 
242 DEBUG_FUNCTION void
debug_lattice_value(ccp_prop_value_t val)243 debug_lattice_value (ccp_prop_value_t val)
244 {
245   dump_lattice_value (stderr, "", val);
246   fprintf (stderr, "\n");
247 }
248 
249 /* Extend NONZERO_BITS to a full mask, based on sgn.  */
250 
251 static widest_int
extend_mask(const wide_int & nonzero_bits,signop sgn)252 extend_mask (const wide_int &nonzero_bits, signop sgn)
253 {
254   return widest_int::from (nonzero_bits, sgn);
255 }
256 
257 /* Compute a default value for variable VAR and store it in the
258    CONST_VAL array.  The following rules are used to get default
259    values:
260 
261    1- Global and static variables that are declared constant are
262       considered CONSTANT.
263 
264    2- Any other value is considered UNDEFINED.  This is useful when
265       considering PHI nodes.  PHI arguments that are undefined do not
266       change the constant value of the PHI node, which allows for more
267       constants to be propagated.
268 
269    3- Variables defined by statements other than assignments and PHI
270       nodes are considered VARYING.
271 
272    4- Initial values of variables that are not GIMPLE registers are
273       considered VARYING.  */
274 
275 static ccp_prop_value_t
get_default_value(tree var)276 get_default_value (tree var)
277 {
278   ccp_prop_value_t val = { UNINITIALIZED, NULL_TREE, 0 };
279   gimple *stmt;
280 
281   stmt = SSA_NAME_DEF_STMT (var);
282 
283   if (gimple_nop_p (stmt))
284     {
285       /* Variables defined by an empty statement are those used
286 	 before being initialized.  If VAR is a local variable, we
287 	 can assume initially that it is UNDEFINED, otherwise we must
288 	 consider it VARYING.  */
289       if (!virtual_operand_p (var)
290 	  && SSA_NAME_VAR (var)
291 	  && TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL)
292 	val.lattice_val = UNDEFINED;
293       else
294 	{
295 	  val.lattice_val = VARYING;
296 	  val.mask = -1;
297 	  if (flag_tree_bit_ccp)
298 	    {
299 	      wide_int nonzero_bits = get_nonzero_bits (var);
300 	      tree value;
301 	      widest_int mask;
302 
303 	      if (SSA_NAME_VAR (var)
304 		  && TREE_CODE (SSA_NAME_VAR (var)) == PARM_DECL
305 		  && ipcp_get_parm_bits (SSA_NAME_VAR (var), &value, &mask))
306 		{
307 		  val.lattice_val = CONSTANT;
308 		  val.value = value;
309 		  widest_int ipa_value = wi::to_widest (value);
310 		  /* Unknown bits from IPA CP must be equal to zero.  */
311 		  gcc_assert (wi::bit_and (ipa_value, mask) == 0);
312 		  val.mask = mask;
313 		  if (nonzero_bits != -1)
314 		    val.mask &= extend_mask (nonzero_bits,
315 					     TYPE_SIGN (TREE_TYPE (var)));
316 		}
317 	      else if (nonzero_bits != -1)
318 		{
319 		  val.lattice_val = CONSTANT;
320 		  val.value = build_zero_cst (TREE_TYPE (var));
321 		  val.mask = extend_mask (nonzero_bits,
322 					  TYPE_SIGN (TREE_TYPE (var)));
323 		}
324 	    }
325 	}
326     }
327   else if (is_gimple_assign (stmt))
328     {
329       tree cst;
330       if (gimple_assign_single_p (stmt)
331 	  && DECL_P (gimple_assign_rhs1 (stmt))
332 	  && (cst = get_symbol_constant_value (gimple_assign_rhs1 (stmt))))
333 	{
334 	  val.lattice_val = CONSTANT;
335 	  val.value = cst;
336 	}
337       else
338 	{
339 	  /* Any other variable defined by an assignment is considered
340 	     UNDEFINED.  */
341 	  val.lattice_val = UNDEFINED;
342 	}
343     }
344   else if ((is_gimple_call (stmt)
345 	    && gimple_call_lhs (stmt) != NULL_TREE)
346 	   || gimple_code (stmt) == GIMPLE_PHI)
347     {
348       /* A variable defined by a call or a PHI node is considered
349 	 UNDEFINED.  */
350       val.lattice_val = UNDEFINED;
351     }
352   else
353     {
354       /* Otherwise, VAR will never take on a constant value.  */
355       val.lattice_val = VARYING;
356       val.mask = -1;
357     }
358 
359   return val;
360 }
361 
362 
363 /* Get the constant value associated with variable VAR.  */
364 
365 static inline ccp_prop_value_t *
get_value(tree var)366 get_value (tree var)
367 {
368   ccp_prop_value_t *val;
369 
370   if (const_val == NULL
371       || SSA_NAME_VERSION (var) >= n_const_val)
372     return NULL;
373 
374   val = &const_val[SSA_NAME_VERSION (var)];
375   if (val->lattice_val == UNINITIALIZED)
376     *val = get_default_value (var);
377 
378   canonicalize_value (val);
379 
380   return val;
381 }
382 
383 /* Return the constant tree value associated with VAR.  */
384 
385 static inline tree
get_constant_value(tree var)386 get_constant_value (tree var)
387 {
388   ccp_prop_value_t *val;
389   if (TREE_CODE (var) != SSA_NAME)
390     {
391       if (is_gimple_min_invariant (var))
392         return var;
393       return NULL_TREE;
394     }
395   val = get_value (var);
396   if (val
397       && val->lattice_val == CONSTANT
398       && (TREE_CODE (val->value) != INTEGER_CST
399 	  || val->mask == 0))
400     return val->value;
401   return NULL_TREE;
402 }
403 
404 /* Sets the value associated with VAR to VARYING.  */
405 
406 static inline void
set_value_varying(tree var)407 set_value_varying (tree var)
408 {
409   ccp_prop_value_t *val = &const_val[SSA_NAME_VERSION (var)];
410 
411   val->lattice_val = VARYING;
412   val->value = NULL_TREE;
413   val->mask = -1;
414 }
415 
416 /* For integer constants, make sure to drop TREE_OVERFLOW.  */
417 
418 static void
canonicalize_value(ccp_prop_value_t * val)419 canonicalize_value (ccp_prop_value_t *val)
420 {
421   if (val->lattice_val != CONSTANT)
422     return;
423 
424   if (TREE_OVERFLOW_P (val->value))
425     val->value = drop_tree_overflow (val->value);
426 }
427 
428 /* Return whether the lattice transition is valid.  */
429 
430 static bool
valid_lattice_transition(ccp_prop_value_t old_val,ccp_prop_value_t new_val)431 valid_lattice_transition (ccp_prop_value_t old_val, ccp_prop_value_t new_val)
432 {
433   /* Lattice transitions must always be monotonically increasing in
434      value.  */
435   if (old_val.lattice_val < new_val.lattice_val)
436     return true;
437 
438   if (old_val.lattice_val != new_val.lattice_val)
439     return false;
440 
441   if (!old_val.value && !new_val.value)
442     return true;
443 
444   /* Now both lattice values are CONSTANT.  */
445 
446   /* Allow arbitrary copy changes as we might look through PHI <a_1, ...>
447      when only a single copy edge is executable.  */
448   if (TREE_CODE (old_val.value) == SSA_NAME
449       && TREE_CODE (new_val.value) == SSA_NAME)
450     return true;
451 
452   /* Allow transitioning from a constant to a copy.  */
453   if (is_gimple_min_invariant (old_val.value)
454       && TREE_CODE (new_val.value) == SSA_NAME)
455     return true;
456 
457   /* Allow transitioning from PHI <&x, not executable> == &x
458      to PHI <&x, &y> == common alignment.  */
459   if (TREE_CODE (old_val.value) != INTEGER_CST
460       && TREE_CODE (new_val.value) == INTEGER_CST)
461     return true;
462 
463   /* Bit-lattices have to agree in the still valid bits.  */
464   if (TREE_CODE (old_val.value) == INTEGER_CST
465       && TREE_CODE (new_val.value) == INTEGER_CST)
466     return (wi::bit_and_not (wi::to_widest (old_val.value), new_val.mask)
467 	    == wi::bit_and_not (wi::to_widest (new_val.value), new_val.mask));
468 
469   /* Otherwise constant values have to agree.  */
470   if (operand_equal_p (old_val.value, new_val.value, 0))
471     return true;
472 
473   /* At least the kinds and types should agree now.  */
474   if (TREE_CODE (old_val.value) != TREE_CODE (new_val.value)
475       || !types_compatible_p (TREE_TYPE (old_val.value),
476 			      TREE_TYPE (new_val.value)))
477     return false;
478 
479   /* For floats and !HONOR_NANS allow transitions from (partial) NaN
480      to non-NaN.  */
481   tree type = TREE_TYPE (new_val.value);
482   if (SCALAR_FLOAT_TYPE_P (type)
483       && !HONOR_NANS (type))
484     {
485       if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val.value)))
486 	return true;
487     }
488   else if (VECTOR_FLOAT_TYPE_P (type)
489 	   && !HONOR_NANS (type))
490     {
491       unsigned int count
492 	= tree_vector_builder::binary_encoded_nelts (old_val.value,
493 						     new_val.value);
494       for (unsigned int i = 0; i < count; ++i)
495 	if (!REAL_VALUE_ISNAN
496 	       (TREE_REAL_CST (VECTOR_CST_ENCODED_ELT (old_val.value, i)))
497 	    && !operand_equal_p (VECTOR_CST_ENCODED_ELT (old_val.value, i),
498 				 VECTOR_CST_ENCODED_ELT (new_val.value, i), 0))
499 	  return false;
500       return true;
501     }
502   else if (COMPLEX_FLOAT_TYPE_P (type)
503 	   && !HONOR_NANS (type))
504     {
505       if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val.value)))
506 	  && !operand_equal_p (TREE_REALPART (old_val.value),
507 			       TREE_REALPART (new_val.value), 0))
508 	return false;
509       if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val.value)))
510 	  && !operand_equal_p (TREE_IMAGPART (old_val.value),
511 			       TREE_IMAGPART (new_val.value), 0))
512 	return false;
513       return true;
514     }
515   return false;
516 }
517 
518 /* Set the value for variable VAR to NEW_VAL.  Return true if the new
519    value is different from VAR's previous value.  */
520 
521 static bool
set_lattice_value(tree var,ccp_prop_value_t * new_val)522 set_lattice_value (tree var, ccp_prop_value_t *new_val)
523 {
524   /* We can deal with old UNINITIALIZED values just fine here.  */
525   ccp_prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)];
526 
527   canonicalize_value (new_val);
528 
529   /* We have to be careful to not go up the bitwise lattice
530      represented by the mask.  Instead of dropping to VARYING
531      use the meet operator to retain a conservative value.
532      Missed optimizations like PR65851 makes this necessary.
533      It also ensures we converge to a stable lattice solution.  */
534   if (old_val->lattice_val != UNINITIALIZED)
535     ccp_lattice_meet (new_val, old_val);
536 
537   gcc_checking_assert (valid_lattice_transition (*old_val, *new_val));
538 
539   /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
540      caller that this was a non-transition.  */
541   if (old_val->lattice_val != new_val->lattice_val
542       || (new_val->lattice_val == CONSTANT
543 	  && (TREE_CODE (new_val->value) != TREE_CODE (old_val->value)
544 	      || (TREE_CODE (new_val->value) == INTEGER_CST
545 		  && (new_val->mask != old_val->mask
546 		      || (wi::bit_and_not (wi::to_widest (old_val->value),
547 					   new_val->mask)
548 			  != wi::bit_and_not (wi::to_widest (new_val->value),
549 					      new_val->mask))))
550 	      || (TREE_CODE (new_val->value) != INTEGER_CST
551 		  && !operand_equal_p (new_val->value, old_val->value, 0)))))
552     {
553       /* ???  We would like to delay creation of INTEGER_CSTs from
554 	 partially constants here.  */
555 
556       if (dump_file && (dump_flags & TDF_DETAILS))
557 	{
558 	  dump_lattice_value (dump_file, "Lattice value changed to ", *new_val);
559 	  fprintf (dump_file, ".  Adding SSA edges to worklist.\n");
560 	}
561 
562       *old_val = *new_val;
563 
564       gcc_assert (new_val->lattice_val != UNINITIALIZED);
565       return true;
566     }
567 
568   return false;
569 }
570 
571 static ccp_prop_value_t get_value_for_expr (tree, bool);
572 static ccp_prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
573 void bit_value_binop (enum tree_code, signop, int, widest_int *, widest_int *,
574 		      signop, int, const widest_int &, const widest_int &,
575 		      signop, int, const widest_int &, const widest_int &);
576 
577 /* Return a widest_int that can be used for bitwise simplifications
578    from VAL.  */
579 
580 static widest_int
value_to_wide_int(ccp_prop_value_t val)581 value_to_wide_int (ccp_prop_value_t val)
582 {
583   if (val.value
584       && TREE_CODE (val.value) == INTEGER_CST)
585     return wi::to_widest (val.value);
586 
587   return 0;
588 }
589 
590 /* Return the value for the address expression EXPR based on alignment
591    information.  */
592 
593 static ccp_prop_value_t
get_value_from_alignment(tree expr)594 get_value_from_alignment (tree expr)
595 {
596   tree type = TREE_TYPE (expr);
597   ccp_prop_value_t val;
598   unsigned HOST_WIDE_INT bitpos;
599   unsigned int align;
600 
601   gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
602 
603   get_pointer_alignment_1 (expr, &align, &bitpos);
604   val.mask = wi::bit_and_not
605     (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
606      ? wi::mask <widest_int> (TYPE_PRECISION (type), false)
607      : -1,
608      align / BITS_PER_UNIT - 1);
609   val.lattice_val
610     = wi::sext (val.mask, TYPE_PRECISION (type)) == -1 ? VARYING : CONSTANT;
611   if (val.lattice_val == CONSTANT)
612     val.value = build_int_cstu (type, bitpos / BITS_PER_UNIT);
613   else
614     val.value = NULL_TREE;
615 
616   return val;
617 }
618 
619 /* Return the value for the tree operand EXPR.  If FOR_BITS_P is true
620    return constant bits extracted from alignment information for
621    invariant addresses.  */
622 
623 static ccp_prop_value_t
get_value_for_expr(tree expr,bool for_bits_p)624 get_value_for_expr (tree expr, bool for_bits_p)
625 {
626   ccp_prop_value_t val;
627 
628   if (TREE_CODE (expr) == SSA_NAME)
629     {
630       ccp_prop_value_t *val_ = get_value (expr);
631       if (val_)
632 	val = *val_;
633       else
634 	{
635 	  val.lattice_val = VARYING;
636 	  val.value = NULL_TREE;
637 	  val.mask = -1;
638 	}
639       if (for_bits_p
640 	  && val.lattice_val == CONSTANT)
641 	{
642 	  if (TREE_CODE (val.value) == ADDR_EXPR)
643 	    val = get_value_from_alignment (val.value);
644 	  else if (TREE_CODE (val.value) != INTEGER_CST)
645 	    {
646 	      val.lattice_val = VARYING;
647 	      val.value = NULL_TREE;
648 	      val.mask = -1;
649 	    }
650 	}
651       /* Fall back to a copy value.  */
652       if (!for_bits_p
653 	  && val.lattice_val == VARYING
654 	  && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr))
655 	{
656 	  val.lattice_val = CONSTANT;
657 	  val.value = expr;
658 	  val.mask = -1;
659 	}
660     }
661   else if (is_gimple_min_invariant (expr)
662 	   && (!for_bits_p || TREE_CODE (expr) == INTEGER_CST))
663     {
664       val.lattice_val = CONSTANT;
665       val.value = expr;
666       val.mask = 0;
667       canonicalize_value (&val);
668     }
669   else if (TREE_CODE (expr) == ADDR_EXPR)
670     val = get_value_from_alignment (expr);
671   else
672     {
673       val.lattice_val = VARYING;
674       val.mask = -1;
675       val.value = NULL_TREE;
676     }
677 
678   if (val.lattice_val == VARYING
679       && TYPE_UNSIGNED (TREE_TYPE (expr)))
680     val.mask = wi::zext (val.mask, TYPE_PRECISION (TREE_TYPE (expr)));
681 
682   return val;
683 }
684 
685 /* Return the likely CCP lattice value for STMT.
686 
687    If STMT has no operands, then return CONSTANT.
688 
689    Else if undefinedness of operands of STMT cause its value to be
690    undefined, then return UNDEFINED.
691 
692    Else if any operands of STMT are constants, then return CONSTANT.
693 
694    Else return VARYING.  */
695 
696 static ccp_lattice_t
likely_value(gimple * stmt)697 likely_value (gimple *stmt)
698 {
699   bool has_constant_operand, has_undefined_operand, all_undefined_operands;
700   bool has_nsa_operand;
701   tree use;
702   ssa_op_iter iter;
703   unsigned i;
704 
705   enum gimple_code code = gimple_code (stmt);
706 
707   /* This function appears to be called only for assignments, calls,
708      conditionals, and switches, due to the logic in visit_stmt.  */
709   gcc_assert (code == GIMPLE_ASSIGN
710               || code == GIMPLE_CALL
711               || code == GIMPLE_COND
712               || code == GIMPLE_SWITCH);
713 
714   /* If the statement has volatile operands, it won't fold to a
715      constant value.  */
716   if (gimple_has_volatile_ops (stmt))
717     return VARYING;
718 
719   /* Arrive here for more complex cases.  */
720   has_constant_operand = false;
721   has_undefined_operand = false;
722   all_undefined_operands = true;
723   has_nsa_operand = false;
724   FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
725     {
726       ccp_prop_value_t *val = get_value (use);
727 
728       if (val && val->lattice_val == UNDEFINED)
729 	has_undefined_operand = true;
730       else
731 	all_undefined_operands = false;
732 
733       if (val && val->lattice_val == CONSTANT)
734 	has_constant_operand = true;
735 
736       if (SSA_NAME_IS_DEFAULT_DEF (use)
737 	  || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use)))
738 	has_nsa_operand = true;
739     }
740 
741   /* There may be constants in regular rhs operands.  For calls we
742      have to ignore lhs, fndecl and static chain, otherwise only
743      the lhs.  */
744   for (i = (is_gimple_call (stmt) ? 2 : 0) + gimple_has_lhs (stmt);
745        i < gimple_num_ops (stmt); ++i)
746     {
747       tree op = gimple_op (stmt, i);
748       if (!op || TREE_CODE (op) == SSA_NAME)
749 	continue;
750       if (is_gimple_min_invariant (op))
751 	has_constant_operand = true;
752     }
753 
754   if (has_constant_operand)
755     all_undefined_operands = false;
756 
757   if (has_undefined_operand
758       && code == GIMPLE_CALL
759       && gimple_call_internal_p (stmt))
760     switch (gimple_call_internal_fn (stmt))
761       {
762 	/* These 3 builtins use the first argument just as a magic
763 	   way how to find out a decl uid.  */
764       case IFN_GOMP_SIMD_LANE:
765       case IFN_GOMP_SIMD_VF:
766       case IFN_GOMP_SIMD_LAST_LANE:
767 	has_undefined_operand = false;
768 	break;
769       default:
770 	break;
771       }
772 
773   /* If the operation combines operands like COMPLEX_EXPR make sure to
774      not mark the result UNDEFINED if only one part of the result is
775      undefined.  */
776   if (has_undefined_operand && all_undefined_operands)
777     return UNDEFINED;
778   else if (code == GIMPLE_ASSIGN && has_undefined_operand)
779     {
780       switch (gimple_assign_rhs_code (stmt))
781 	{
782 	/* Unary operators are handled with all_undefined_operands.  */
783 	case PLUS_EXPR:
784 	case MINUS_EXPR:
785 	case POINTER_PLUS_EXPR:
786 	case BIT_XOR_EXPR:
787 	  /* Not MIN_EXPR, MAX_EXPR.  One VARYING operand may be selected.
788 	     Not bitwise operators, one VARYING operand may specify the
789 	     result completely.
790 	     Not logical operators for the same reason, apart from XOR.
791 	     Not COMPLEX_EXPR as one VARYING operand makes the result partly
792 	     not UNDEFINED.  Not *DIV_EXPR, comparisons and shifts because
793 	     the undefined operand may be promoted.  */
794 	  return UNDEFINED;
795 
796 	case ADDR_EXPR:
797 	  /* If any part of an address is UNDEFINED, like the index
798 	     of an ARRAY_EXPR, then treat the result as UNDEFINED.  */
799 	  return UNDEFINED;
800 
801 	default:
802 	  ;
803 	}
804     }
805   /* If there was an UNDEFINED operand but the result may be not UNDEFINED
806      fall back to CONSTANT.  During iteration UNDEFINED may still drop
807      to CONSTANT.  */
808   if (has_undefined_operand)
809     return CONSTANT;
810 
811   /* We do not consider virtual operands here -- load from read-only
812      memory may have only VARYING virtual operands, but still be
813      constant.  Also we can combine the stmt with definitions from
814      operands whose definitions are not simulated again.  */
815   if (has_constant_operand
816       || has_nsa_operand
817       || gimple_references_memory_p (stmt))
818     return CONSTANT;
819 
820   return VARYING;
821 }
822 
823 /* Returns true if STMT cannot be constant.  */
824 
825 static bool
surely_varying_stmt_p(gimple * stmt)826 surely_varying_stmt_p (gimple *stmt)
827 {
828   /* If the statement has operands that we cannot handle, it cannot be
829      constant.  */
830   if (gimple_has_volatile_ops (stmt))
831     return true;
832 
833   /* If it is a call and does not return a value or is not a
834      builtin and not an indirect call or a call to function with
835      assume_aligned/alloc_align attribute, it is varying.  */
836   if (is_gimple_call (stmt))
837     {
838       tree fndecl, fntype = gimple_call_fntype (stmt);
839       if (!gimple_call_lhs (stmt)
840 	  || ((fndecl = gimple_call_fndecl (stmt)) != NULL_TREE
841 	      && !fndecl_built_in_p (fndecl)
842 	      && !lookup_attribute ("assume_aligned",
843 				    TYPE_ATTRIBUTES (fntype))
844 	      && !lookup_attribute ("alloc_align",
845 				    TYPE_ATTRIBUTES (fntype))))
846 	return true;
847     }
848 
849   /* Any other store operation is not interesting.  */
850   else if (gimple_vdef (stmt))
851     return true;
852 
853   /* Anything other than assignments and conditional jumps are not
854      interesting for CCP.  */
855   if (gimple_code (stmt) != GIMPLE_ASSIGN
856       && gimple_code (stmt) != GIMPLE_COND
857       && gimple_code (stmt) != GIMPLE_SWITCH
858       && gimple_code (stmt) != GIMPLE_CALL)
859     return true;
860 
861   return false;
862 }
863 
864 /* Initialize local data structures for CCP.  */
865 
866 static void
ccp_initialize(void)867 ccp_initialize (void)
868 {
869   basic_block bb;
870 
871   n_const_val = num_ssa_names;
872   const_val = XCNEWVEC (ccp_prop_value_t, n_const_val);
873 
874   /* Initialize simulation flags for PHI nodes and statements.  */
875   FOR_EACH_BB_FN (bb, cfun)
876     {
877       gimple_stmt_iterator i;
878 
879       for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
880         {
881 	  gimple *stmt = gsi_stmt (i);
882 	  bool is_varying;
883 
884 	  /* If the statement is a control insn, then we do not
885 	     want to avoid simulating the statement once.  Failure
886 	     to do so means that those edges will never get added.  */
887 	  if (stmt_ends_bb_p (stmt))
888 	    is_varying = false;
889 	  else
890 	    is_varying = surely_varying_stmt_p (stmt);
891 
892 	  if (is_varying)
893 	    {
894 	      tree def;
895 	      ssa_op_iter iter;
896 
897 	      /* If the statement will not produce a constant, mark
898 		 all its outputs VARYING.  */
899 	      FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
900 		set_value_varying (def);
901 	    }
902           prop_set_simulate_again (stmt, !is_varying);
903 	}
904     }
905 
906   /* Now process PHI nodes.  We never clear the simulate_again flag on
907      phi nodes, since we do not know which edges are executable yet,
908      except for phi nodes for virtual operands when we do not do store ccp.  */
909   FOR_EACH_BB_FN (bb, cfun)
910     {
911       gphi_iterator i;
912 
913       for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i))
914         {
915           gphi *phi = i.phi ();
916 
917 	  if (virtual_operand_p (gimple_phi_result (phi)))
918             prop_set_simulate_again (phi, false);
919 	  else
920             prop_set_simulate_again (phi, true);
921 	}
922     }
923 }
924 
925 /* Debug count support. Reset the values of ssa names
926    VARYING when the total number ssa names analyzed is
927    beyond the debug count specified.  */
928 
929 static void
do_dbg_cnt(void)930 do_dbg_cnt (void)
931 {
932   unsigned i;
933   for (i = 0; i < num_ssa_names; i++)
934     {
935       if (!dbg_cnt (ccp))
936         {
937           const_val[i].lattice_val = VARYING;
938 	  const_val[i].mask = -1;
939           const_val[i].value = NULL_TREE;
940         }
941     }
942 }
943 
944 
945 /* We want to provide our own GET_VALUE and FOLD_STMT virtual methods.  */
946 class ccp_folder : public substitute_and_fold_engine
947 {
948  public:
949   tree get_value (tree) FINAL OVERRIDE;
950   bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
951 };
952 
953 /* This method just wraps GET_CONSTANT_VALUE for now.  Over time
954    naked calls to GET_CONSTANT_VALUE should be eliminated in favor
955    of calling member functions.  */
956 
957 tree
get_value(tree op)958 ccp_folder::get_value (tree op)
959 {
960   return get_constant_value (op);
961 }
962 
963 /* Do final substitution of propagated values, cleanup the flowgraph and
964    free allocated storage.  If NONZERO_P, record nonzero bits.
965 
966    Return TRUE when something was optimized.  */
967 
968 static bool
ccp_finalize(bool nonzero_p)969 ccp_finalize (bool nonzero_p)
970 {
971   bool something_changed;
972   unsigned i;
973   tree name;
974 
975   do_dbg_cnt ();
976 
977   /* Derive alignment and misalignment information from partially
978      constant pointers in the lattice or nonzero bits from partially
979      constant integers.  */
980   FOR_EACH_SSA_NAME (i, name, cfun)
981     {
982       ccp_prop_value_t *val;
983       unsigned int tem, align;
984 
985       if (!POINTER_TYPE_P (TREE_TYPE (name))
986 	  && (!INTEGRAL_TYPE_P (TREE_TYPE (name))
987 	      /* Don't record nonzero bits before IPA to avoid
988 		 using too much memory.  */
989 	      || !nonzero_p))
990 	continue;
991 
992       val = get_value (name);
993       if (val->lattice_val != CONSTANT
994 	  || TREE_CODE (val->value) != INTEGER_CST
995 	  || val->mask == 0)
996 	continue;
997 
998       if (POINTER_TYPE_P (TREE_TYPE (name)))
999 	{
1000 	  /* Trailing mask bits specify the alignment, trailing value
1001 	     bits the misalignment.  */
1002 	  tem = val->mask.to_uhwi ();
1003 	  align = least_bit_hwi (tem);
1004 	  if (align > 1)
1005 	    set_ptr_info_alignment (get_ptr_info (name), align,
1006 				    (TREE_INT_CST_LOW (val->value)
1007 				     & (align - 1)));
1008 	}
1009       else
1010 	{
1011 	  unsigned int precision = TYPE_PRECISION (TREE_TYPE (val->value));
1012 	  wide_int nonzero_bits
1013 	    = (wide_int::from (val->mask, precision, UNSIGNED)
1014 	       | wi::to_wide (val->value));
1015 	  nonzero_bits &= get_nonzero_bits (name);
1016 	  set_nonzero_bits (name, nonzero_bits);
1017 	}
1018     }
1019 
1020   /* Perform substitutions based on the known constant values.  */
1021   class ccp_folder ccp_folder;
1022   something_changed = ccp_folder.substitute_and_fold ();
1023 
1024   free (const_val);
1025   const_val = NULL;
1026   return something_changed;
1027 }
1028 
1029 
1030 /* Compute the meet operator between *VAL1 and *VAL2.  Store the result
1031    in VAL1.
1032 
1033    		any  M UNDEFINED   = any
1034 		any  M VARYING     = VARYING
1035 		Ci   M Cj	   = Ci		if (i == j)
1036 		Ci   M Cj	   = VARYING	if (i != j)
1037    */
1038 
1039 static void
ccp_lattice_meet(ccp_prop_value_t * val1,ccp_prop_value_t * val2)1040 ccp_lattice_meet (ccp_prop_value_t *val1, ccp_prop_value_t *val2)
1041 {
1042   if (val1->lattice_val == UNDEFINED
1043       /* For UNDEFINED M SSA we can't always SSA because its definition
1044          may not dominate the PHI node.  Doing optimistic copy propagation
1045 	 also causes a lot of gcc.dg/uninit-pred*.c FAILs.  */
1046       && (val2->lattice_val != CONSTANT
1047 	  || TREE_CODE (val2->value) != SSA_NAME))
1048     {
1049       /* UNDEFINED M any = any   */
1050       *val1 = *val2;
1051     }
1052   else if (val2->lattice_val == UNDEFINED
1053 	   /* See above.  */
1054 	   && (val1->lattice_val != CONSTANT
1055 	       || TREE_CODE (val1->value) != SSA_NAME))
1056     {
1057       /* any M UNDEFINED = any
1058          Nothing to do.  VAL1 already contains the value we want.  */
1059       ;
1060     }
1061   else if (val1->lattice_val == VARYING
1062            || val2->lattice_val == VARYING)
1063     {
1064       /* any M VARYING = VARYING.  */
1065       val1->lattice_val = VARYING;
1066       val1->mask = -1;
1067       val1->value = NULL_TREE;
1068     }
1069   else if (val1->lattice_val == CONSTANT
1070 	   && val2->lattice_val == CONSTANT
1071 	   && TREE_CODE (val1->value) == INTEGER_CST
1072 	   && TREE_CODE (val2->value) == INTEGER_CST)
1073     {
1074       /* Ci M Cj = Ci		if (i == j)
1075 	 Ci M Cj = VARYING	if (i != j)
1076 
1077          For INTEGER_CSTs mask unequal bits.  If no equal bits remain,
1078 	 drop to varying.  */
1079       val1->mask = (val1->mask | val2->mask
1080 		    | (wi::to_widest (val1->value)
1081 		       ^ wi::to_widest (val2->value)));
1082       if (wi::sext (val1->mask, TYPE_PRECISION (TREE_TYPE (val1->value))) == -1)
1083 	{
1084 	  val1->lattice_val = VARYING;
1085 	  val1->value = NULL_TREE;
1086 	}
1087     }
1088   else if (val1->lattice_val == CONSTANT
1089 	   && val2->lattice_val == CONSTANT
1090 	   && operand_equal_p (val1->value, val2->value, 0))
1091     {
1092       /* Ci M Cj = Ci		if (i == j)
1093 	 Ci M Cj = VARYING	if (i != j)
1094 
1095          VAL1 already contains the value we want for equivalent values.  */
1096     }
1097   else if (val1->lattice_val == CONSTANT
1098 	   && val2->lattice_val == CONSTANT
1099 	   && (TREE_CODE (val1->value) == ADDR_EXPR
1100 	       || TREE_CODE (val2->value) == ADDR_EXPR))
1101     {
1102       /* When not equal addresses are involved try meeting for
1103 	 alignment.  */
1104       ccp_prop_value_t tem = *val2;
1105       if (TREE_CODE (val1->value) == ADDR_EXPR)
1106 	*val1 = get_value_for_expr (val1->value, true);
1107       if (TREE_CODE (val2->value) == ADDR_EXPR)
1108 	tem = get_value_for_expr (val2->value, true);
1109       ccp_lattice_meet (val1, &tem);
1110     }
1111   else
1112     {
1113       /* Any other combination is VARYING.  */
1114       val1->lattice_val = VARYING;
1115       val1->mask = -1;
1116       val1->value = NULL_TREE;
1117     }
1118 }
1119 
1120 
1121 /* Loop through the PHI_NODE's parameters for BLOCK and compare their
1122    lattice values to determine PHI_NODE's lattice value.  The value of a
1123    PHI node is determined calling ccp_lattice_meet with all the arguments
1124    of the PHI node that are incoming via executable edges.  */
1125 
1126 enum ssa_prop_result
visit_phi(gphi * phi)1127 ccp_propagate::visit_phi (gphi *phi)
1128 {
1129   unsigned i;
1130   ccp_prop_value_t new_val;
1131 
1132   if (dump_file && (dump_flags & TDF_DETAILS))
1133     {
1134       fprintf (dump_file, "\nVisiting PHI node: ");
1135       print_gimple_stmt (dump_file, phi, 0, dump_flags);
1136     }
1137 
1138   new_val.lattice_val = UNDEFINED;
1139   new_val.value = NULL_TREE;
1140   new_val.mask = 0;
1141 
1142   bool first = true;
1143   bool non_exec_edge = false;
1144   for (i = 0; i < gimple_phi_num_args (phi); i++)
1145     {
1146       /* Compute the meet operator over all the PHI arguments flowing
1147 	 through executable edges.  */
1148       edge e = gimple_phi_arg_edge (phi, i);
1149 
1150       if (dump_file && (dump_flags & TDF_DETAILS))
1151 	{
1152 	  fprintf (dump_file,
1153 	      "\tArgument #%d (%d -> %d %sexecutable)\n",
1154 	      i, e->src->index, e->dest->index,
1155 	      (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
1156 	}
1157 
1158       /* If the incoming edge is executable, Compute the meet operator for
1159 	 the existing value of the PHI node and the current PHI argument.  */
1160       if (e->flags & EDGE_EXECUTABLE)
1161 	{
1162 	  tree arg = gimple_phi_arg (phi, i)->def;
1163 	  ccp_prop_value_t arg_val = get_value_for_expr (arg, false);
1164 
1165 	  if (first)
1166 	    {
1167 	      new_val = arg_val;
1168 	      first = false;
1169 	    }
1170 	  else
1171 	    ccp_lattice_meet (&new_val, &arg_val);
1172 
1173 	  if (dump_file && (dump_flags & TDF_DETAILS))
1174 	    {
1175 	      fprintf (dump_file, "\t");
1176 	      print_generic_expr (dump_file, arg, dump_flags);
1177 	      dump_lattice_value (dump_file, "\tValue: ", arg_val);
1178 	      fprintf (dump_file, "\n");
1179 	    }
1180 
1181 	  if (new_val.lattice_val == VARYING)
1182 	    break;
1183 	}
1184       else
1185 	non_exec_edge = true;
1186     }
1187 
1188   /* In case there were non-executable edges and the value is a copy
1189      make sure its definition dominates the PHI node.  */
1190   if (non_exec_edge
1191       && new_val.lattice_val == CONSTANT
1192       && TREE_CODE (new_val.value) == SSA_NAME
1193       && ! SSA_NAME_IS_DEFAULT_DEF (new_val.value)
1194       && ! dominated_by_p (CDI_DOMINATORS, gimple_bb (phi),
1195 			   gimple_bb (SSA_NAME_DEF_STMT (new_val.value))))
1196     {
1197       new_val.lattice_val = VARYING;
1198       new_val.value = NULL_TREE;
1199       new_val.mask = -1;
1200     }
1201 
1202   if (dump_file && (dump_flags & TDF_DETAILS))
1203     {
1204       dump_lattice_value (dump_file, "\n    PHI node value: ", new_val);
1205       fprintf (dump_file, "\n\n");
1206     }
1207 
1208   /* Make the transition to the new value.  */
1209   if (set_lattice_value (gimple_phi_result (phi), &new_val))
1210     {
1211       if (new_val.lattice_val == VARYING)
1212 	return SSA_PROP_VARYING;
1213       else
1214 	return SSA_PROP_INTERESTING;
1215     }
1216   else
1217     return SSA_PROP_NOT_INTERESTING;
1218 }
1219 
1220 /* Return the constant value for OP or OP otherwise.  */
1221 
1222 static tree
valueize_op(tree op)1223 valueize_op (tree op)
1224 {
1225   if (TREE_CODE (op) == SSA_NAME)
1226     {
1227       tree tem = get_constant_value (op);
1228       if (tem)
1229 	return tem;
1230     }
1231   return op;
1232 }
1233 
1234 /* Return the constant value for OP, but signal to not follow SSA
1235    edges if the definition may be simulated again.  */
1236 
1237 static tree
valueize_op_1(tree op)1238 valueize_op_1 (tree op)
1239 {
1240   if (TREE_CODE (op) == SSA_NAME)
1241     {
1242       /* If the definition may be simulated again we cannot follow
1243          this SSA edge as the SSA propagator does not necessarily
1244 	 re-visit the use.  */
1245       gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1246       if (!gimple_nop_p (def_stmt)
1247 	  && prop_simulate_again_p (def_stmt))
1248 	return NULL_TREE;
1249       tree tem = get_constant_value (op);
1250       if (tem)
1251 	return tem;
1252     }
1253   return op;
1254 }
1255 
1256 /* CCP specific front-end to the non-destructive constant folding
1257    routines.
1258 
1259    Attempt to simplify the RHS of STMT knowing that one or more
1260    operands are constants.
1261 
1262    If simplification is possible, return the simplified RHS,
1263    otherwise return the original RHS or NULL_TREE.  */
1264 
1265 static tree
ccp_fold(gimple * stmt)1266 ccp_fold (gimple *stmt)
1267 {
1268   location_t loc = gimple_location (stmt);
1269   switch (gimple_code (stmt))
1270     {
1271     case GIMPLE_COND:
1272       {
1273         /* Handle comparison operators that can appear in GIMPLE form.  */
1274         tree op0 = valueize_op (gimple_cond_lhs (stmt));
1275         tree op1 = valueize_op (gimple_cond_rhs (stmt));
1276         enum tree_code code = gimple_cond_code (stmt);
1277         return fold_binary_loc (loc, code, boolean_type_node, op0, op1);
1278       }
1279 
1280     case GIMPLE_SWITCH:
1281       {
1282 	/* Return the constant switch index.  */
1283         return valueize_op (gimple_switch_index (as_a <gswitch *> (stmt)));
1284       }
1285 
1286     case GIMPLE_ASSIGN:
1287     case GIMPLE_CALL:
1288       return gimple_fold_stmt_to_constant_1 (stmt,
1289 					     valueize_op, valueize_op_1);
1290 
1291     default:
1292       gcc_unreachable ();
1293     }
1294 }
1295 
1296 /* Apply the operation CODE in type TYPE to the value, mask pair
1297    RVAL and RMASK representing a value of type RTYPE and set
1298    the value, mask pair *VAL and *MASK to the result.  */
1299 
1300 void
bit_value_unop(enum tree_code code,signop type_sgn,int type_precision,widest_int * val,widest_int * mask,signop rtype_sgn,int rtype_precision,const widest_int & rval,const widest_int & rmask)1301 bit_value_unop (enum tree_code code, signop type_sgn, int type_precision,
1302 		widest_int *val, widest_int *mask,
1303 		signop rtype_sgn, int rtype_precision,
1304 		const widest_int &rval, const widest_int &rmask)
1305 {
1306   switch (code)
1307     {
1308     case BIT_NOT_EXPR:
1309       *mask = rmask;
1310       *val = ~rval;
1311       break;
1312 
1313     case NEGATE_EXPR:
1314       {
1315 	widest_int temv, temm;
1316 	/* Return ~rval + 1.  */
1317 	bit_value_unop (BIT_NOT_EXPR, type_sgn, type_precision, &temv, &temm,
1318 			type_sgn, type_precision, rval, rmask);
1319 	bit_value_binop (PLUS_EXPR, type_sgn, type_precision, val, mask,
1320 			 type_sgn, type_precision, temv, temm,
1321 			 type_sgn, type_precision, 1, 0);
1322 	break;
1323       }
1324 
1325     CASE_CONVERT:
1326       {
1327 	/* First extend mask and value according to the original type.  */
1328 	*mask = wi::ext (rmask, rtype_precision, rtype_sgn);
1329 	*val = wi::ext (rval, rtype_precision, rtype_sgn);
1330 
1331 	/* Then extend mask and value according to the target type.  */
1332 	*mask = wi::ext (*mask, type_precision, type_sgn);
1333 	*val = wi::ext (*val, type_precision, type_sgn);
1334 	break;
1335       }
1336 
1337     default:
1338       *mask = -1;
1339       break;
1340     }
1341 }
1342 
1343 /* Apply the operation CODE in type TYPE to the value, mask pairs
1344    R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1345    and R2TYPE and set the value, mask pair *VAL and *MASK to the result.  */
1346 
1347 void
bit_value_binop(enum tree_code code,signop sgn,int width,widest_int * val,widest_int * mask,signop r1type_sgn,int r1type_precision,const widest_int & r1val,const widest_int & r1mask,signop r2type_sgn,int r2type_precision,const widest_int & r2val,const widest_int & r2mask)1348 bit_value_binop (enum tree_code code, signop sgn, int width,
1349 		 widest_int *val, widest_int *mask,
1350 		 signop r1type_sgn, int r1type_precision,
1351 		 const widest_int &r1val, const widest_int &r1mask,
1352 		 signop r2type_sgn, int r2type_precision,
1353 		 const widest_int &r2val, const widest_int &r2mask)
1354 {
1355   bool swap_p = false;
1356 
1357   /* Assume we'll get a constant result.  Use an initial non varying
1358      value, we fall back to varying in the end if necessary.  */
1359   *mask = -1;
1360 
1361   switch (code)
1362     {
1363     case BIT_AND_EXPR:
1364       /* The mask is constant where there is a known not
1365 	 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1366       *mask = (r1mask | r2mask) & (r1val | r1mask) & (r2val | r2mask);
1367       *val = r1val & r2val;
1368       break;
1369 
1370     case BIT_IOR_EXPR:
1371       /* The mask is constant where there is a known
1372 	 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)).  */
1373       *mask = wi::bit_and_not (r1mask | r2mask,
1374 			       wi::bit_and_not (r1val, r1mask)
1375 			       | wi::bit_and_not (r2val, r2mask));
1376       *val = r1val | r2val;
1377       break;
1378 
1379     case BIT_XOR_EXPR:
1380       /* m1 | m2  */
1381       *mask = r1mask | r2mask;
1382       *val = r1val ^ r2val;
1383       break;
1384 
1385     case LROTATE_EXPR:
1386     case RROTATE_EXPR:
1387       if (r2mask == 0)
1388 	{
1389 	  widest_int shift = r2val;
1390 	  if (shift == 0)
1391 	    {
1392 	      *mask = r1mask;
1393 	      *val = r1val;
1394 	    }
1395 	  else
1396 	    {
1397 	      if (wi::neg_p (shift))
1398 		{
1399 		  shift = -shift;
1400 		  if (code == RROTATE_EXPR)
1401 		    code = LROTATE_EXPR;
1402 		  else
1403 		    code = RROTATE_EXPR;
1404 		}
1405 	      if (code == RROTATE_EXPR)
1406 		{
1407 		  *mask = wi::rrotate (r1mask, shift, width);
1408 		  *val = wi::rrotate (r1val, shift, width);
1409 		}
1410 	      else
1411 		{
1412 		  *mask = wi::lrotate (r1mask, shift, width);
1413 		  *val = wi::lrotate (r1val, shift, width);
1414 		}
1415 	      *mask = wi::ext (*mask, width, sgn);
1416 	      *val = wi::ext (*val, width, sgn);
1417 	    }
1418 	}
1419       break;
1420 
1421     case LSHIFT_EXPR:
1422     case RSHIFT_EXPR:
1423       /* ???  We can handle partially known shift counts if we know
1424 	 its sign.  That way we can tell that (x << (y | 8)) & 255
1425 	 is zero.  */
1426       if (r2mask == 0)
1427 	{
1428 	  widest_int shift = r2val;
1429 	  if (shift == 0)
1430 	    {
1431 	      *mask = r1mask;
1432 	      *val = r1val;
1433 	    }
1434 	  else
1435 	    {
1436 	      if (wi::neg_p (shift))
1437 		{
1438 		  shift = -shift;
1439 		  if (code == RSHIFT_EXPR)
1440 		    code = LSHIFT_EXPR;
1441 		  else
1442 		    code = RSHIFT_EXPR;
1443 		}
1444 	      if (code == RSHIFT_EXPR)
1445 		{
1446 		  *mask = wi::rshift (wi::ext (r1mask, width, sgn), shift, sgn);
1447 		  *val = wi::rshift (wi::ext (r1val, width, sgn), shift, sgn);
1448 		}
1449 	      else
1450 		{
1451 		  *mask = wi::ext (r1mask << shift, width, sgn);
1452 		  *val = wi::ext (r1val << shift, width, sgn);
1453 		}
1454 	    }
1455 	}
1456       break;
1457 
1458     case PLUS_EXPR:
1459     case POINTER_PLUS_EXPR:
1460       {
1461 	/* Do the addition with unknown bits set to zero, to give carry-ins of
1462 	   zero wherever possible.  */
1463 	widest_int lo = (wi::bit_and_not (r1val, r1mask)
1464 			 + wi::bit_and_not (r2val, r2mask));
1465 	lo = wi::ext (lo, width, sgn);
1466 	/* Do the addition with unknown bits set to one, to give carry-ins of
1467 	   one wherever possible.  */
1468 	widest_int hi = (r1val | r1mask) + (r2val | r2mask);
1469 	hi = wi::ext (hi, width, sgn);
1470 	/* Each bit in the result is known if (a) the corresponding bits in
1471 	   both inputs are known, and (b) the carry-in to that bit position
1472 	   is known.  We can check condition (b) by seeing if we got the same
1473 	   result with minimised carries as with maximised carries.  */
1474 	*mask = r1mask | r2mask | (lo ^ hi);
1475 	*mask = wi::ext (*mask, width, sgn);
1476 	/* It shouldn't matter whether we choose lo or hi here.  */
1477 	*val = lo;
1478 	break;
1479       }
1480 
1481     case MINUS_EXPR:
1482       {
1483 	widest_int temv, temm;
1484 	bit_value_unop (NEGATE_EXPR, r2type_sgn, r2type_precision, &temv, &temm,
1485 			  r2type_sgn, r2type_precision, r2val, r2mask);
1486 	bit_value_binop (PLUS_EXPR, sgn, width, val, mask,
1487 			 r1type_sgn, r1type_precision, r1val, r1mask,
1488 			 r2type_sgn, r2type_precision, temv, temm);
1489 	break;
1490       }
1491 
1492     case MULT_EXPR:
1493       {
1494 	/* Just track trailing zeros in both operands and transfer
1495 	   them to the other.  */
1496 	int r1tz = wi::ctz (r1val | r1mask);
1497 	int r2tz = wi::ctz (r2val | r2mask);
1498 	if (r1tz + r2tz >= width)
1499 	  {
1500 	    *mask = 0;
1501 	    *val = 0;
1502 	  }
1503 	else if (r1tz + r2tz > 0)
1504 	  {
1505 	    *mask = wi::ext (wi::mask <widest_int> (r1tz + r2tz, true),
1506 			     width, sgn);
1507 	    *val = 0;
1508 	  }
1509 	break;
1510       }
1511 
1512     case EQ_EXPR:
1513     case NE_EXPR:
1514       {
1515 	widest_int m = r1mask | r2mask;
1516 	if (wi::bit_and_not (r1val, m) != wi::bit_and_not (r2val, m))
1517 	  {
1518 	    *mask = 0;
1519 	    *val = ((code == EQ_EXPR) ? 0 : 1);
1520 	  }
1521 	else
1522 	  {
1523 	    /* We know the result of a comparison is always one or zero.  */
1524 	    *mask = 1;
1525 	    *val = 0;
1526 	  }
1527 	break;
1528       }
1529 
1530     case GE_EXPR:
1531     case GT_EXPR:
1532       swap_p = true;
1533       code = swap_tree_comparison (code);
1534       /* Fall through.  */
1535     case LT_EXPR:
1536     case LE_EXPR:
1537       {
1538 	int minmax, maxmin;
1539 
1540 	const widest_int &o1val = swap_p ? r2val : r1val;
1541 	const widest_int &o1mask = swap_p ? r2mask : r1mask;
1542 	const widest_int &o2val = swap_p ? r1val : r2val;
1543 	const widest_int &o2mask = swap_p ? r1mask : r2mask;
1544 
1545 	/* If the most significant bits are not known we know nothing.  */
1546 	if (wi::neg_p (o1mask) || wi::neg_p (o2mask))
1547 	  break;
1548 
1549 	/* For comparisons the signedness is in the comparison operands.  */
1550 	sgn = r1type_sgn;
1551 
1552 	/* If we know the most significant bits we know the values
1553 	   value ranges by means of treating varying bits as zero
1554 	   or one.  Do a cross comparison of the max/min pairs.  */
1555 	maxmin = wi::cmp (o1val | o1mask,
1556 			  wi::bit_and_not (o2val, o2mask), sgn);
1557 	minmax = wi::cmp (wi::bit_and_not (o1val, o1mask),
1558 			  o2val | o2mask, sgn);
1559 	if (maxmin < 0)  /* o1 is less than o2.  */
1560 	  {
1561 	    *mask = 0;
1562 	    *val = 1;
1563 	  }
1564 	else if (minmax > 0)  /* o1 is not less or equal to o2.  */
1565 	  {
1566 	    *mask = 0;
1567 	    *val = 0;
1568 	  }
1569 	else if (maxmin == minmax)  /* o1 and o2 are equal.  */
1570 	  {
1571 	    /* This probably should never happen as we'd have
1572 	       folded the thing during fully constant value folding.  */
1573 	    *mask = 0;
1574 	    *val = (code == LE_EXPR ? 1 : 0);
1575 	  }
1576 	else
1577 	  {
1578 	    /* We know the result of a comparison is always one or zero.  */
1579 	    *mask = 1;
1580 	    *val = 0;
1581 	  }
1582 	break;
1583       }
1584 
1585     default:;
1586     }
1587 }
1588 
1589 /* Return the propagation value when applying the operation CODE to
1590    the value RHS yielding type TYPE.  */
1591 
1592 static ccp_prop_value_t
bit_value_unop(enum tree_code code,tree type,tree rhs)1593 bit_value_unop (enum tree_code code, tree type, tree rhs)
1594 {
1595   ccp_prop_value_t rval = get_value_for_expr (rhs, true);
1596   widest_int value, mask;
1597   ccp_prop_value_t val;
1598 
1599   if (rval.lattice_val == UNDEFINED)
1600     return rval;
1601 
1602   gcc_assert ((rval.lattice_val == CONSTANT
1603 	       && TREE_CODE (rval.value) == INTEGER_CST)
1604 	      || wi::sext (rval.mask, TYPE_PRECISION (TREE_TYPE (rhs))) == -1);
1605   bit_value_unop (code, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1606 		  TYPE_SIGN (TREE_TYPE (rhs)), TYPE_PRECISION (TREE_TYPE (rhs)),
1607 		  value_to_wide_int (rval), rval.mask);
1608   if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1609     {
1610       val.lattice_val = CONSTANT;
1611       val.mask = mask;
1612       /* ???  Delay building trees here.  */
1613       val.value = wide_int_to_tree (type, value);
1614     }
1615   else
1616     {
1617       val.lattice_val = VARYING;
1618       val.value = NULL_TREE;
1619       val.mask = -1;
1620     }
1621   return val;
1622 }
1623 
1624 /* Return the propagation value when applying the operation CODE to
1625    the values RHS1 and RHS2 yielding type TYPE.  */
1626 
1627 static ccp_prop_value_t
bit_value_binop(enum tree_code code,tree type,tree rhs1,tree rhs2)1628 bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
1629 {
1630   ccp_prop_value_t r1val = get_value_for_expr (rhs1, true);
1631   ccp_prop_value_t r2val = get_value_for_expr (rhs2, true);
1632   widest_int value, mask;
1633   ccp_prop_value_t val;
1634 
1635   if (r1val.lattice_val == UNDEFINED
1636       || r2val.lattice_val == UNDEFINED)
1637     {
1638       val.lattice_val = VARYING;
1639       val.value = NULL_TREE;
1640       val.mask = -1;
1641       return val;
1642     }
1643 
1644   gcc_assert ((r1val.lattice_val == CONSTANT
1645 	       && TREE_CODE (r1val.value) == INTEGER_CST)
1646 	      || wi::sext (r1val.mask,
1647 			   TYPE_PRECISION (TREE_TYPE (rhs1))) == -1);
1648   gcc_assert ((r2val.lattice_val == CONSTANT
1649 	       && TREE_CODE (r2val.value) == INTEGER_CST)
1650 	      || wi::sext (r2val.mask,
1651 			   TYPE_PRECISION (TREE_TYPE (rhs2))) == -1);
1652   bit_value_binop (code, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1653 		   TYPE_SIGN (TREE_TYPE (rhs1)), TYPE_PRECISION (TREE_TYPE (rhs1)),
1654 		   value_to_wide_int (r1val), r1val.mask,
1655 		   TYPE_SIGN (TREE_TYPE (rhs2)), TYPE_PRECISION (TREE_TYPE (rhs2)),
1656 		   value_to_wide_int (r2val), r2val.mask);
1657 
1658   /* (x * x) & 2 == 0.  */
1659   if (code == MULT_EXPR && rhs1 == rhs2 && TYPE_PRECISION (type) > 1)
1660     {
1661       widest_int m = 2;
1662       if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1663 	value = wi::bit_and_not (value, m);
1664       else
1665 	value = 0;
1666       mask = wi::bit_and_not (mask, m);
1667     }
1668 
1669   if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1670     {
1671       val.lattice_val = CONSTANT;
1672       val.mask = mask;
1673       /* ???  Delay building trees here.  */
1674       val.value = wide_int_to_tree (type, value);
1675     }
1676   else
1677     {
1678       val.lattice_val = VARYING;
1679       val.value = NULL_TREE;
1680       val.mask = -1;
1681     }
1682   return val;
1683 }
1684 
1685 /* Return the propagation value for __builtin_assume_aligned
1686    and functions with assume_aligned or alloc_aligned attribute.
1687    For __builtin_assume_aligned, ATTR is NULL_TREE,
1688    for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED
1689    is false, for alloc_aligned attribute ATTR is non-NULL and
1690    ALLOC_ALIGNED is true.  */
1691 
1692 static ccp_prop_value_t
bit_value_assume_aligned(gimple * stmt,tree attr,ccp_prop_value_t ptrval,bool alloc_aligned)1693 bit_value_assume_aligned (gimple *stmt, tree attr, ccp_prop_value_t ptrval,
1694 			  bool alloc_aligned)
1695 {
1696   tree align, misalign = NULL_TREE, type;
1697   unsigned HOST_WIDE_INT aligni, misaligni = 0;
1698   ccp_prop_value_t alignval;
1699   widest_int value, mask;
1700   ccp_prop_value_t val;
1701 
1702   if (attr == NULL_TREE)
1703     {
1704       tree ptr = gimple_call_arg (stmt, 0);
1705       type = TREE_TYPE (ptr);
1706       ptrval = get_value_for_expr (ptr, true);
1707     }
1708   else
1709     {
1710       tree lhs = gimple_call_lhs (stmt);
1711       type = TREE_TYPE (lhs);
1712     }
1713 
1714   if (ptrval.lattice_val == UNDEFINED)
1715     return ptrval;
1716   gcc_assert ((ptrval.lattice_val == CONSTANT
1717 	       && TREE_CODE (ptrval.value) == INTEGER_CST)
1718 	      || wi::sext (ptrval.mask, TYPE_PRECISION (type)) == -1);
1719   if (attr == NULL_TREE)
1720     {
1721       /* Get aligni and misaligni from __builtin_assume_aligned.  */
1722       align = gimple_call_arg (stmt, 1);
1723       if (!tree_fits_uhwi_p (align))
1724 	return ptrval;
1725       aligni = tree_to_uhwi (align);
1726       if (gimple_call_num_args (stmt) > 2)
1727 	{
1728 	  misalign = gimple_call_arg (stmt, 2);
1729 	  if (!tree_fits_uhwi_p (misalign))
1730 	    return ptrval;
1731 	  misaligni = tree_to_uhwi (misalign);
1732 	}
1733     }
1734   else
1735     {
1736       /* Get aligni and misaligni from assume_aligned or
1737 	 alloc_align attributes.  */
1738       if (TREE_VALUE (attr) == NULL_TREE)
1739 	return ptrval;
1740       attr = TREE_VALUE (attr);
1741       align = TREE_VALUE (attr);
1742       if (!tree_fits_uhwi_p (align))
1743 	return ptrval;
1744       aligni = tree_to_uhwi (align);
1745       if (alloc_aligned)
1746 	{
1747 	  if (aligni == 0 || aligni > gimple_call_num_args (stmt))
1748 	    return ptrval;
1749 	  align = gimple_call_arg (stmt, aligni - 1);
1750 	  if (!tree_fits_uhwi_p (align))
1751 	    return ptrval;
1752 	  aligni = tree_to_uhwi (align);
1753 	}
1754       else if (TREE_CHAIN (attr) && TREE_VALUE (TREE_CHAIN (attr)))
1755 	{
1756 	  misalign = TREE_VALUE (TREE_CHAIN (attr));
1757 	  if (!tree_fits_uhwi_p (misalign))
1758 	    return ptrval;
1759 	  misaligni = tree_to_uhwi (misalign);
1760 	}
1761     }
1762   if (aligni <= 1 || (aligni & (aligni - 1)) != 0 || misaligni >= aligni)
1763     return ptrval;
1764 
1765   align = build_int_cst_type (type, -aligni);
1766   alignval = get_value_for_expr (align, true);
1767   bit_value_binop (BIT_AND_EXPR, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask,
1768 		   TYPE_SIGN (type), TYPE_PRECISION (type), value_to_wide_int (ptrval), ptrval.mask,
1769 		   TYPE_SIGN (type), TYPE_PRECISION (type), value_to_wide_int (alignval), alignval.mask);
1770 
1771   if (wi::sext (mask, TYPE_PRECISION (type)) != -1)
1772     {
1773       val.lattice_val = CONSTANT;
1774       val.mask = mask;
1775       gcc_assert ((mask.to_uhwi () & (aligni - 1)) == 0);
1776       gcc_assert ((value.to_uhwi () & (aligni - 1)) == 0);
1777       value |= misaligni;
1778       /* ???  Delay building trees here.  */
1779       val.value = wide_int_to_tree (type, value);
1780     }
1781   else
1782     {
1783       val.lattice_val = VARYING;
1784       val.value = NULL_TREE;
1785       val.mask = -1;
1786     }
1787   return val;
1788 }
1789 
1790 /* Evaluate statement STMT.
1791    Valid only for assignments, calls, conditionals, and switches. */
1792 
1793 static ccp_prop_value_t
evaluate_stmt(gimple * stmt)1794 evaluate_stmt (gimple *stmt)
1795 {
1796   ccp_prop_value_t val;
1797   tree simplified = NULL_TREE;
1798   ccp_lattice_t likelyvalue = likely_value (stmt);
1799   bool is_constant = false;
1800   unsigned int align;
1801 
1802   if (dump_file && (dump_flags & TDF_DETAILS))
1803     {
1804       fprintf (dump_file, "which is likely ");
1805       switch (likelyvalue)
1806 	{
1807 	case CONSTANT:
1808 	  fprintf (dump_file, "CONSTANT");
1809 	  break;
1810 	case UNDEFINED:
1811 	  fprintf (dump_file, "UNDEFINED");
1812 	  break;
1813 	case VARYING:
1814 	  fprintf (dump_file, "VARYING");
1815 	  break;
1816 	default:;
1817 	}
1818       fprintf (dump_file, "\n");
1819     }
1820 
1821   /* If the statement is likely to have a CONSTANT result, then try
1822      to fold the statement to determine the constant value.  */
1823   /* FIXME.  This is the only place that we call ccp_fold.
1824      Since likely_value never returns CONSTANT for calls, we will
1825      not attempt to fold them, including builtins that may profit.  */
1826   if (likelyvalue == CONSTANT)
1827     {
1828       fold_defer_overflow_warnings ();
1829       simplified = ccp_fold (stmt);
1830       if (simplified
1831 	  && TREE_CODE (simplified) == SSA_NAME)
1832 	{
1833 	  /* We may not use values of something that may be simulated again,
1834 	     see valueize_op_1.  */
1835 	  if (SSA_NAME_IS_DEFAULT_DEF (simplified)
1836 	      || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (simplified)))
1837 	    {
1838 	      ccp_prop_value_t *val = get_value (simplified);
1839 	      if (val && val->lattice_val != VARYING)
1840 		{
1841 		  fold_undefer_overflow_warnings (true, stmt, 0);
1842 		  return *val;
1843 		}
1844 	    }
1845 	  else
1846 	    /* We may also not place a non-valueized copy in the lattice
1847 	       as that might become stale if we never re-visit this stmt.  */
1848 	    simplified = NULL_TREE;
1849 	}
1850       is_constant = simplified && is_gimple_min_invariant (simplified);
1851       fold_undefer_overflow_warnings (is_constant, stmt, 0);
1852       if (is_constant)
1853 	{
1854 	  /* The statement produced a constant value.  */
1855 	  val.lattice_val = CONSTANT;
1856 	  val.value = simplified;
1857 	  val.mask = 0;
1858 	  return val;
1859 	}
1860     }
1861   /* If the statement is likely to have a VARYING result, then do not
1862      bother folding the statement.  */
1863   else if (likelyvalue == VARYING)
1864     {
1865       enum gimple_code code = gimple_code (stmt);
1866       if (code == GIMPLE_ASSIGN)
1867         {
1868           enum tree_code subcode = gimple_assign_rhs_code (stmt);
1869 
1870           /* Other cases cannot satisfy is_gimple_min_invariant
1871              without folding.  */
1872           if (get_gimple_rhs_class (subcode) == GIMPLE_SINGLE_RHS)
1873             simplified = gimple_assign_rhs1 (stmt);
1874         }
1875       else if (code == GIMPLE_SWITCH)
1876         simplified = gimple_switch_index (as_a <gswitch *> (stmt));
1877       else
1878 	/* These cannot satisfy is_gimple_min_invariant without folding.  */
1879 	gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
1880       is_constant = simplified && is_gimple_min_invariant (simplified);
1881       if (is_constant)
1882 	{
1883 	  /* The statement produced a constant value.  */
1884 	  val.lattice_val = CONSTANT;
1885 	  val.value = simplified;
1886 	  val.mask = 0;
1887 	}
1888     }
1889   /* If the statement result is likely UNDEFINED, make it so.  */
1890   else if (likelyvalue == UNDEFINED)
1891     {
1892       val.lattice_val = UNDEFINED;
1893       val.value = NULL_TREE;
1894       val.mask = 0;
1895       return val;
1896     }
1897 
1898   /* Resort to simplification for bitwise tracking.  */
1899   if (flag_tree_bit_ccp
1900       && (likelyvalue == CONSTANT || is_gimple_call (stmt)
1901 	  || (gimple_assign_single_p (stmt)
1902 	      && gimple_assign_rhs_code (stmt) == ADDR_EXPR))
1903       && !is_constant)
1904     {
1905       enum gimple_code code = gimple_code (stmt);
1906       val.lattice_val = VARYING;
1907       val.value = NULL_TREE;
1908       val.mask = -1;
1909       if (code == GIMPLE_ASSIGN)
1910 	{
1911 	  enum tree_code subcode = gimple_assign_rhs_code (stmt);
1912 	  tree rhs1 = gimple_assign_rhs1 (stmt);
1913 	  tree lhs = gimple_assign_lhs (stmt);
1914 	  if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1915 	       || POINTER_TYPE_P (TREE_TYPE (lhs)))
1916 	      && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1917 		  || POINTER_TYPE_P (TREE_TYPE (rhs1))))
1918 	    switch (get_gimple_rhs_class (subcode))
1919 	      {
1920 	      case GIMPLE_SINGLE_RHS:
1921 	        val = get_value_for_expr (rhs1, true);
1922 		break;
1923 
1924 	      case GIMPLE_UNARY_RHS:
1925 		val = bit_value_unop (subcode, TREE_TYPE (lhs), rhs1);
1926 		break;
1927 
1928 	      case GIMPLE_BINARY_RHS:
1929 		val = bit_value_binop (subcode, TREE_TYPE (lhs), rhs1,
1930 				       gimple_assign_rhs2 (stmt));
1931 		break;
1932 
1933 	      default:;
1934 	      }
1935 	}
1936       else if (code == GIMPLE_COND)
1937 	{
1938 	  enum tree_code code = gimple_cond_code (stmt);
1939 	  tree rhs1 = gimple_cond_lhs (stmt);
1940 	  tree rhs2 = gimple_cond_rhs (stmt);
1941 	  if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1942 	      || POINTER_TYPE_P (TREE_TYPE (rhs1)))
1943 	    val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
1944 	}
1945       else if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
1946 	{
1947 	  tree fndecl = gimple_call_fndecl (stmt);
1948 	  switch (DECL_FUNCTION_CODE (fndecl))
1949 	    {
1950 	    case BUILT_IN_MALLOC:
1951 	    case BUILT_IN_REALLOC:
1952 	    case BUILT_IN_CALLOC:
1953 	    case BUILT_IN_STRDUP:
1954 	    case BUILT_IN_STRNDUP:
1955 	      val.lattice_val = CONSTANT;
1956 	      val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1957 	      val.mask = ~((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT
1958 			   / BITS_PER_UNIT - 1);
1959 	      break;
1960 
1961 	    CASE_BUILT_IN_ALLOCA:
1962 	      align = (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA
1963 		       ? BIGGEST_ALIGNMENT
1964 		       : TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)));
1965 	      val.lattice_val = CONSTANT;
1966 	      val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
1967 	      val.mask = ~((HOST_WIDE_INT) align / BITS_PER_UNIT - 1);
1968 	      break;
1969 
1970 	    /* These builtins return their first argument, unmodified.  */
1971 	    case BUILT_IN_MEMCPY:
1972 	    case BUILT_IN_MEMMOVE:
1973 	    case BUILT_IN_MEMSET:
1974 	    case BUILT_IN_STRCPY:
1975 	    case BUILT_IN_STRNCPY:
1976 	    case BUILT_IN_MEMCPY_CHK:
1977 	    case BUILT_IN_MEMMOVE_CHK:
1978 	    case BUILT_IN_MEMSET_CHK:
1979 	    case BUILT_IN_STRCPY_CHK:
1980 	    case BUILT_IN_STRNCPY_CHK:
1981 	      val = get_value_for_expr (gimple_call_arg (stmt, 0), true);
1982 	      break;
1983 
1984 	    case BUILT_IN_ASSUME_ALIGNED:
1985 	      val = bit_value_assume_aligned (stmt, NULL_TREE, val, false);
1986 	      break;
1987 
1988 	    case BUILT_IN_ALIGNED_ALLOC:
1989 	      {
1990 		tree align = get_constant_value (gimple_call_arg (stmt, 0));
1991 		if (align
1992 		    && tree_fits_uhwi_p (align))
1993 		  {
1994 		    unsigned HOST_WIDE_INT aligni = tree_to_uhwi (align);
1995 		    if (aligni > 1
1996 			/* align must be power-of-two */
1997 			&& (aligni & (aligni - 1)) == 0)
1998 		      {
1999 			val.lattice_val = CONSTANT;
2000 			val.value = build_int_cst (ptr_type_node, 0);
2001 			val.mask = -aligni;
2002 		      }
2003 		  }
2004 		break;
2005 	      }
2006 
2007 	    case BUILT_IN_BSWAP16:
2008 	    case BUILT_IN_BSWAP32:
2009 	    case BUILT_IN_BSWAP64:
2010 	      val = get_value_for_expr (gimple_call_arg (stmt, 0), true);
2011 	      if (val.lattice_val == UNDEFINED)
2012 		break;
2013 	      else if (val.lattice_val == CONSTANT
2014 		       && val.value
2015 		       && TREE_CODE (val.value) == INTEGER_CST)
2016 		{
2017 		  tree type = TREE_TYPE (gimple_call_lhs (stmt));
2018 		  int prec = TYPE_PRECISION (type);
2019 		  wide_int wval = wi::to_wide (val.value);
2020 		  val.value
2021 		    = wide_int_to_tree (type,
2022 					wide_int::from (wval, prec,
2023 							UNSIGNED).bswap ());
2024 		  val.mask
2025 		    = widest_int::from (wide_int::from (val.mask, prec,
2026 							UNSIGNED).bswap (),
2027 					UNSIGNED);
2028 		  if (wi::sext (val.mask, prec) != -1)
2029 		    break;
2030 		}
2031 	      val.lattice_val = VARYING;
2032 	      val.value = NULL_TREE;
2033 	      val.mask = -1;
2034 	      break;
2035 
2036 	    default:;
2037 	    }
2038 	}
2039       if (is_gimple_call (stmt) && gimple_call_lhs (stmt))
2040 	{
2041 	  tree fntype = gimple_call_fntype (stmt);
2042 	  if (fntype)
2043 	    {
2044 	      tree attrs = lookup_attribute ("assume_aligned",
2045 					     TYPE_ATTRIBUTES (fntype));
2046 	      if (attrs)
2047 		val = bit_value_assume_aligned (stmt, attrs, val, false);
2048 	      attrs = lookup_attribute ("alloc_align",
2049 					TYPE_ATTRIBUTES (fntype));
2050 	      if (attrs)
2051 		val = bit_value_assume_aligned (stmt, attrs, val, true);
2052 	    }
2053 	}
2054       is_constant = (val.lattice_val == CONSTANT);
2055     }
2056 
2057   if (flag_tree_bit_ccp
2058       && ((is_constant && TREE_CODE (val.value) == INTEGER_CST)
2059 	  || !is_constant)
2060       && gimple_get_lhs (stmt)
2061       && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME)
2062     {
2063       tree lhs = gimple_get_lhs (stmt);
2064       wide_int nonzero_bits = get_nonzero_bits (lhs);
2065       if (nonzero_bits != -1)
2066 	{
2067 	  if (!is_constant)
2068 	    {
2069 	      val.lattice_val = CONSTANT;
2070 	      val.value = build_zero_cst (TREE_TYPE (lhs));
2071 	      val.mask = extend_mask (nonzero_bits, TYPE_SIGN (TREE_TYPE (lhs)));
2072 	      is_constant = true;
2073 	    }
2074 	  else
2075 	    {
2076 	      if (wi::bit_and_not (wi::to_wide (val.value), nonzero_bits) != 0)
2077 		val.value = wide_int_to_tree (TREE_TYPE (lhs),
2078 					      nonzero_bits
2079 					      & wi::to_wide (val.value));
2080 	      if (nonzero_bits == 0)
2081 		val.mask = 0;
2082 	      else
2083 		val.mask = val.mask & extend_mask (nonzero_bits,
2084 						   TYPE_SIGN (TREE_TYPE (lhs)));
2085 	    }
2086 	}
2087     }
2088 
2089   /* The statement produced a nonconstant value.  */
2090   if (!is_constant)
2091     {
2092       /* The statement produced a copy.  */
2093       if (simplified && TREE_CODE (simplified) == SSA_NAME
2094 	  && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (simplified))
2095 	{
2096 	  val.lattice_val = CONSTANT;
2097 	  val.value = simplified;
2098 	  val.mask = -1;
2099 	}
2100       /* The statement is VARYING.  */
2101       else
2102 	{
2103 	  val.lattice_val = VARYING;
2104 	  val.value = NULL_TREE;
2105 	  val.mask = -1;
2106 	}
2107     }
2108 
2109   return val;
2110 }
2111 
2112 typedef hash_table<nofree_ptr_hash<gimple> > gimple_htab;
2113 
2114 /* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
2115    each matching BUILT_IN_STACK_RESTORE.  Mark visited phis in VISITED.  */
2116 
2117 static void
insert_clobber_before_stack_restore(tree saved_val,tree var,gimple_htab ** visited)2118 insert_clobber_before_stack_restore (tree saved_val, tree var,
2119 				     gimple_htab **visited)
2120 {
2121   gimple *stmt;
2122   gassign *clobber_stmt;
2123   tree clobber;
2124   imm_use_iterator iter;
2125   gimple_stmt_iterator i;
2126   gimple **slot;
2127 
2128   FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val)
2129     if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
2130       {
2131 	clobber = build_clobber (TREE_TYPE (var));
2132 	clobber_stmt = gimple_build_assign (var, clobber);
2133 
2134 	i = gsi_for_stmt (stmt);
2135 	gsi_insert_before (&i, clobber_stmt, GSI_SAME_STMT);
2136       }
2137     else if (gimple_code (stmt) == GIMPLE_PHI)
2138       {
2139 	if (!*visited)
2140 	  *visited = new gimple_htab (10);
2141 
2142 	slot = (*visited)->find_slot (stmt, INSERT);
2143 	if (*slot != NULL)
2144 	  continue;
2145 
2146 	*slot = stmt;
2147 	insert_clobber_before_stack_restore (gimple_phi_result (stmt), var,
2148 					     visited);
2149       }
2150     else if (gimple_assign_ssa_name_copy_p (stmt))
2151       insert_clobber_before_stack_restore (gimple_assign_lhs (stmt), var,
2152 					   visited);
2153 }
2154 
2155 /* Advance the iterator to the previous non-debug gimple statement in the same
2156    or dominating basic block.  */
2157 
2158 static inline void
gsi_prev_dom_bb_nondebug(gimple_stmt_iterator * i)2159 gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i)
2160 {
2161   basic_block dom;
2162 
2163   gsi_prev_nondebug (i);
2164   while (gsi_end_p (*i))
2165     {
2166       dom = get_immediate_dominator (CDI_DOMINATORS, i->bb);
2167       if (dom == NULL || dom == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2168 	return;
2169 
2170       *i = gsi_last_bb (dom);
2171     }
2172 }
2173 
2174 /* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
2175    a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
2176 
2177    It is possible that BUILT_IN_STACK_SAVE cannot be found in a dominator when
2178    a previous pass (such as DOM) duplicated it along multiple paths to a BB.
2179    In that case the function gives up without inserting the clobbers.  */
2180 
2181 static void
insert_clobbers_for_var(gimple_stmt_iterator i,tree var)2182 insert_clobbers_for_var (gimple_stmt_iterator i, tree var)
2183 {
2184   gimple *stmt;
2185   tree saved_val;
2186   gimple_htab *visited = NULL;
2187 
2188   for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i))
2189     {
2190       stmt = gsi_stmt (i);
2191 
2192       if (!gimple_call_builtin_p (stmt, BUILT_IN_STACK_SAVE))
2193 	continue;
2194 
2195       saved_val = gimple_call_lhs (stmt);
2196       if (saved_val == NULL_TREE)
2197 	continue;
2198 
2199       insert_clobber_before_stack_restore (saved_val, var, &visited);
2200       break;
2201     }
2202 
2203   delete visited;
2204 }
2205 
2206 /* Detects a __builtin_alloca_with_align with constant size argument.  Declares
2207    fixed-size array and returns the address, if found, otherwise returns
2208    NULL_TREE.  */
2209 
2210 static tree
fold_builtin_alloca_with_align(gimple * stmt)2211 fold_builtin_alloca_with_align (gimple *stmt)
2212 {
2213   unsigned HOST_WIDE_INT size, threshold, n_elem;
2214   tree lhs, arg, block, var, elem_type, array_type;
2215 
2216   /* Get lhs.  */
2217   lhs = gimple_call_lhs (stmt);
2218   if (lhs == NULL_TREE)
2219     return NULL_TREE;
2220 
2221   /* Detect constant argument.  */
2222   arg = get_constant_value (gimple_call_arg (stmt, 0));
2223   if (arg == NULL_TREE
2224       || TREE_CODE (arg) != INTEGER_CST
2225       || !tree_fits_uhwi_p (arg))
2226     return NULL_TREE;
2227 
2228   size = tree_to_uhwi (arg);
2229 
2230   /* Heuristic: don't fold large allocas.  */
2231   threshold = (unsigned HOST_WIDE_INT)param_large_stack_frame;
2232   /* In case the alloca is located at function entry, it has the same lifetime
2233      as a declared array, so we allow a larger size.  */
2234   block = gimple_block (stmt);
2235   if (!(cfun->after_inlining
2236 	&& block
2237         && TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL))
2238     threshold /= 10;
2239   if (size > threshold)
2240     return NULL_TREE;
2241 
2242   /* We have to be able to move points-to info.  We used to assert
2243      that we can but IPA PTA might end up with two UIDs here
2244      as it might need to handle more than one instance being
2245      live at the same time.  Instead of trying to detect this case
2246      (using the first UID would be OK) just give up for now.  */
2247   struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
2248   unsigned uid = 0;
2249   if (pi != NULL
2250       && !pi->pt.anything
2251       && !pt_solution_singleton_or_null_p (&pi->pt, &uid))
2252     return NULL_TREE;
2253 
2254   /* Declare array.  */
2255   elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1);
2256   n_elem = size * 8 / BITS_PER_UNIT;
2257   array_type = build_array_type_nelts (elem_type, n_elem);
2258 
2259   if (tree ssa_name = SSA_NAME_IDENTIFIER (lhs))
2260     {
2261       /* Give the temporary a name derived from the name of the VLA
2262 	 declaration so it can be referenced in diagnostics.  */
2263       const char *name = IDENTIFIER_POINTER (ssa_name);
2264       var = create_tmp_var (array_type, name);
2265     }
2266   else
2267     var = create_tmp_var (array_type);
2268 
2269   if (gimple *lhsdef = SSA_NAME_DEF_STMT (lhs))
2270     {
2271       /* Set the temporary's location to that of the VLA declaration
2272 	 so it can be pointed to in diagnostics.  */
2273       location_t loc = gimple_location (lhsdef);
2274       DECL_SOURCE_LOCATION (var) = loc;
2275     }
2276 
2277   SET_DECL_ALIGN (var, TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)));
2278   if (uid != 0)
2279     SET_DECL_PT_UID (var, uid);
2280 
2281   /* Fold alloca to the address of the array.  */
2282   return fold_convert (TREE_TYPE (lhs), build_fold_addr_expr (var));
2283 }
2284 
2285 /* Fold the stmt at *GSI with CCP specific information that propagating
2286    and regular folding does not catch.  */
2287 
2288 bool
fold_stmt(gimple_stmt_iterator * gsi)2289 ccp_folder::fold_stmt (gimple_stmt_iterator *gsi)
2290 {
2291   gimple *stmt = gsi_stmt (*gsi);
2292 
2293   switch (gimple_code (stmt))
2294     {
2295     case GIMPLE_COND:
2296       {
2297 	gcond *cond_stmt = as_a <gcond *> (stmt);
2298 	ccp_prop_value_t val;
2299 	/* Statement evaluation will handle type mismatches in constants
2300 	   more gracefully than the final propagation.  This allows us to
2301 	   fold more conditionals here.  */
2302 	val = evaluate_stmt (stmt);
2303 	if (val.lattice_val != CONSTANT
2304 	    || val.mask != 0)
2305 	  return false;
2306 
2307 	if (dump_file)
2308 	  {
2309 	    fprintf (dump_file, "Folding predicate ");
2310 	    print_gimple_expr (dump_file, stmt, 0);
2311 	    fprintf (dump_file, " to ");
2312 	    print_generic_expr (dump_file, val.value);
2313 	    fprintf (dump_file, "\n");
2314 	  }
2315 
2316 	if (integer_zerop (val.value))
2317 	  gimple_cond_make_false (cond_stmt);
2318 	else
2319 	  gimple_cond_make_true (cond_stmt);
2320 
2321 	return true;
2322       }
2323 
2324     case GIMPLE_CALL:
2325       {
2326 	tree lhs = gimple_call_lhs (stmt);
2327 	int flags = gimple_call_flags (stmt);
2328 	tree val;
2329 	tree argt;
2330 	bool changed = false;
2331 	unsigned i;
2332 
2333 	/* If the call was folded into a constant make sure it goes
2334 	   away even if we cannot propagate into all uses because of
2335 	   type issues.  */
2336 	if (lhs
2337 	    && TREE_CODE (lhs) == SSA_NAME
2338 	    && (val = get_constant_value (lhs))
2339 	    /* Don't optimize away calls that have side-effects.  */
2340 	    && (flags & (ECF_CONST|ECF_PURE)) != 0
2341 	    && (flags & ECF_LOOPING_CONST_OR_PURE) == 0)
2342 	  {
2343 	    tree new_rhs = unshare_expr (val);
2344 	    bool res;
2345 	    if (!useless_type_conversion_p (TREE_TYPE (lhs),
2346 					    TREE_TYPE (new_rhs)))
2347 	      new_rhs = fold_convert (TREE_TYPE (lhs), new_rhs);
2348 	    res = update_call_from_tree (gsi, new_rhs);
2349 	    gcc_assert (res);
2350 	    return true;
2351 	  }
2352 
2353 	/* Internal calls provide no argument types, so the extra laxity
2354 	   for normal calls does not apply.  */
2355 	if (gimple_call_internal_p (stmt))
2356 	  return false;
2357 
2358         /* The heuristic of fold_builtin_alloca_with_align differs before and
2359 	   after inlining, so we don't require the arg to be changed into a
2360 	   constant for folding, but just to be constant.  */
2361         if (gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN)
2362 	    || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN_AND_MAX))
2363           {
2364             tree new_rhs = fold_builtin_alloca_with_align (stmt);
2365             if (new_rhs)
2366 	      {
2367 		bool res = update_call_from_tree (gsi, new_rhs);
2368 		tree var = TREE_OPERAND (TREE_OPERAND (new_rhs, 0),0);
2369 		gcc_assert (res);
2370 		insert_clobbers_for_var (*gsi, var);
2371 		return true;
2372 	      }
2373           }
2374 
2375 	/* If there's no extra info from an assume_aligned call,
2376 	   drop it so it doesn't act as otherwise useless dataflow
2377 	   barrier.  */
2378 	if (gimple_call_builtin_p (stmt, BUILT_IN_ASSUME_ALIGNED))
2379 	  {
2380 	    tree ptr = gimple_call_arg (stmt, 0);
2381 	    ccp_prop_value_t ptrval = get_value_for_expr (ptr, true);
2382 	    if (ptrval.lattice_val == CONSTANT
2383 		&& TREE_CODE (ptrval.value) == INTEGER_CST
2384 		&& ptrval.mask != 0)
2385 	      {
2386 		ccp_prop_value_t val
2387 		  = bit_value_assume_aligned (stmt, NULL_TREE, ptrval, false);
2388 		unsigned int ptralign = least_bit_hwi (ptrval.mask.to_uhwi ());
2389 		unsigned int align = least_bit_hwi (val.mask.to_uhwi ());
2390 		if (ptralign == align
2391 		    && ((TREE_INT_CST_LOW (ptrval.value) & (align - 1))
2392 			== (TREE_INT_CST_LOW (val.value) & (align - 1))))
2393 		  {
2394 		    bool res = update_call_from_tree (gsi, ptr);
2395 		    gcc_assert (res);
2396 		    return true;
2397 		  }
2398 	      }
2399 	  }
2400 
2401 	/* Propagate into the call arguments.  Compared to replace_uses_in
2402 	   this can use the argument slot types for type verification
2403 	   instead of the current argument type.  We also can safely
2404 	   drop qualifiers here as we are dealing with constants anyway.  */
2405 	argt = TYPE_ARG_TYPES (gimple_call_fntype (stmt));
2406 	for (i = 0; i < gimple_call_num_args (stmt) && argt;
2407 	     ++i, argt = TREE_CHAIN (argt))
2408 	  {
2409 	    tree arg = gimple_call_arg (stmt, i);
2410 	    if (TREE_CODE (arg) == SSA_NAME
2411 		&& (val = get_constant_value (arg))
2412 		&& useless_type_conversion_p
2413 		     (TYPE_MAIN_VARIANT (TREE_VALUE (argt)),
2414 		      TYPE_MAIN_VARIANT (TREE_TYPE (val))))
2415 	      {
2416 		gimple_call_set_arg (stmt, i, unshare_expr (val));
2417 		changed = true;
2418 	      }
2419 	  }
2420 
2421 	return changed;
2422       }
2423 
2424     case GIMPLE_ASSIGN:
2425       {
2426 	tree lhs = gimple_assign_lhs (stmt);
2427 	tree val;
2428 
2429 	/* If we have a load that turned out to be constant replace it
2430 	   as we cannot propagate into all uses in all cases.  */
2431 	if (gimple_assign_single_p (stmt)
2432 	    && TREE_CODE (lhs) == SSA_NAME
2433 	    && (val = get_constant_value (lhs)))
2434 	  {
2435 	    tree rhs = unshare_expr (val);
2436 	    if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
2437 	      rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), rhs);
2438 	    gimple_assign_set_rhs_from_tree (gsi, rhs);
2439 	    return true;
2440 	  }
2441 
2442 	return false;
2443       }
2444 
2445     default:
2446       return false;
2447     }
2448 }
2449 
2450 /* Visit the assignment statement STMT.  Set the value of its LHS to the
2451    value computed by the RHS and store LHS in *OUTPUT_P.  If STMT
2452    creates virtual definitions, set the value of each new name to that
2453    of the RHS (if we can derive a constant out of the RHS).
2454    Value-returning call statements also perform an assignment, and
2455    are handled here.  */
2456 
2457 static enum ssa_prop_result
visit_assignment(gimple * stmt,tree * output_p)2458 visit_assignment (gimple *stmt, tree *output_p)
2459 {
2460   ccp_prop_value_t val;
2461   enum ssa_prop_result retval = SSA_PROP_NOT_INTERESTING;
2462 
2463   tree lhs = gimple_get_lhs (stmt);
2464   if (TREE_CODE (lhs) == SSA_NAME)
2465     {
2466       /* Evaluate the statement, which could be
2467 	 either a GIMPLE_ASSIGN or a GIMPLE_CALL.  */
2468       val = evaluate_stmt (stmt);
2469 
2470       /* If STMT is an assignment to an SSA_NAME, we only have one
2471 	 value to set.  */
2472       if (set_lattice_value (lhs, &val))
2473 	{
2474 	  *output_p = lhs;
2475 	  if (val.lattice_val == VARYING)
2476 	    retval = SSA_PROP_VARYING;
2477 	  else
2478 	    retval = SSA_PROP_INTERESTING;
2479 	}
2480     }
2481 
2482   return retval;
2483 }
2484 
2485 
2486 /* Visit the conditional statement STMT.  Return SSA_PROP_INTERESTING
2487    if it can determine which edge will be taken.  Otherwise, return
2488    SSA_PROP_VARYING.  */
2489 
2490 static enum ssa_prop_result
visit_cond_stmt(gimple * stmt,edge * taken_edge_p)2491 visit_cond_stmt (gimple *stmt, edge *taken_edge_p)
2492 {
2493   ccp_prop_value_t val;
2494   basic_block block;
2495 
2496   block = gimple_bb (stmt);
2497   val = evaluate_stmt (stmt);
2498   if (val.lattice_val != CONSTANT
2499       || val.mask != 0)
2500     return SSA_PROP_VARYING;
2501 
2502   /* Find which edge out of the conditional block will be taken and add it
2503      to the worklist.  If no single edge can be determined statically,
2504      return SSA_PROP_VARYING to feed all the outgoing edges to the
2505      propagation engine.  */
2506   *taken_edge_p = find_taken_edge (block, val.value);
2507   if (*taken_edge_p)
2508     return SSA_PROP_INTERESTING;
2509   else
2510     return SSA_PROP_VARYING;
2511 }
2512 
2513 
2514 /* Evaluate statement STMT.  If the statement produces an output value and
2515    its evaluation changes the lattice value of its output, return
2516    SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2517    output value.
2518 
2519    If STMT is a conditional branch and we can determine its truth
2520    value, set *TAKEN_EDGE_P accordingly.  If STMT produces a varying
2521    value, return SSA_PROP_VARYING.  */
2522 
2523 enum ssa_prop_result
visit_stmt(gimple * stmt,edge * taken_edge_p,tree * output_p)2524 ccp_propagate::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
2525 {
2526   tree def;
2527   ssa_op_iter iter;
2528 
2529   if (dump_file && (dump_flags & TDF_DETAILS))
2530     {
2531       fprintf (dump_file, "\nVisiting statement:\n");
2532       print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2533     }
2534 
2535   switch (gimple_code (stmt))
2536     {
2537       case GIMPLE_ASSIGN:
2538         /* If the statement is an assignment that produces a single
2539            output value, evaluate its RHS to see if the lattice value of
2540            its output has changed.  */
2541         return visit_assignment (stmt, output_p);
2542 
2543       case GIMPLE_CALL:
2544         /* A value-returning call also performs an assignment.  */
2545         if (gimple_call_lhs (stmt) != NULL_TREE)
2546           return visit_assignment (stmt, output_p);
2547         break;
2548 
2549       case GIMPLE_COND:
2550       case GIMPLE_SWITCH:
2551         /* If STMT is a conditional branch, see if we can determine
2552            which branch will be taken.   */
2553         /* FIXME.  It appears that we should be able to optimize
2554            computed GOTOs here as well.  */
2555         return visit_cond_stmt (stmt, taken_edge_p);
2556 
2557       default:
2558         break;
2559     }
2560 
2561   /* Any other kind of statement is not interesting for constant
2562      propagation and, therefore, not worth simulating.  */
2563   if (dump_file && (dump_flags & TDF_DETAILS))
2564     fprintf (dump_file, "No interesting values produced.  Marked VARYING.\n");
2565 
2566   /* Definitions made by statements other than assignments to
2567      SSA_NAMEs represent unknown modifications to their outputs.
2568      Mark them VARYING.  */
2569   FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
2570     set_value_varying (def);
2571 
2572   return SSA_PROP_VARYING;
2573 }
2574 
2575 
2576 /* Main entry point for SSA Conditional Constant Propagation.  If NONZERO_P,
2577    record nonzero bits.  */
2578 
2579 static unsigned int
do_ssa_ccp(bool nonzero_p)2580 do_ssa_ccp (bool nonzero_p)
2581 {
2582   unsigned int todo = 0;
2583   calculate_dominance_info (CDI_DOMINATORS);
2584 
2585   ccp_initialize ();
2586   class ccp_propagate ccp_propagate;
2587   ccp_propagate.ssa_propagate ();
2588   if (ccp_finalize (nonzero_p || flag_ipa_bit_cp))
2589     {
2590       todo = (TODO_cleanup_cfg | TODO_update_ssa);
2591 
2592       /* ccp_finalize does not preserve loop-closed ssa.  */
2593       loops_state_clear (LOOP_CLOSED_SSA);
2594     }
2595 
2596   free_dominance_info (CDI_DOMINATORS);
2597   return todo;
2598 }
2599 
2600 
2601 namespace {
2602 
2603 const pass_data pass_data_ccp =
2604 {
2605   GIMPLE_PASS, /* type */
2606   "ccp", /* name */
2607   OPTGROUP_NONE, /* optinfo_flags */
2608   TV_TREE_CCP, /* tv_id */
2609   ( PROP_cfg | PROP_ssa ), /* properties_required */
2610   0, /* properties_provided */
2611   0, /* properties_destroyed */
2612   0, /* todo_flags_start */
2613   TODO_update_address_taken, /* todo_flags_finish */
2614 };
2615 
2616 class pass_ccp : public gimple_opt_pass
2617 {
2618 public:
pass_ccp(gcc::context * ctxt)2619   pass_ccp (gcc::context *ctxt)
2620     : gimple_opt_pass (pass_data_ccp, ctxt), nonzero_p (false)
2621   {}
2622 
2623   /* opt_pass methods: */
clone()2624   opt_pass * clone () { return new pass_ccp (m_ctxt); }
set_pass_param(unsigned int n,bool param)2625   void set_pass_param (unsigned int n, bool param)
2626     {
2627       gcc_assert (n == 0);
2628       nonzero_p = param;
2629     }
gate(function *)2630   virtual bool gate (function *) { return flag_tree_ccp != 0; }
execute(function *)2631   virtual unsigned int execute (function *) { return do_ssa_ccp (nonzero_p); }
2632 
2633  private:
2634   /* Determines whether the pass instance records nonzero bits.  */
2635   bool nonzero_p;
2636 }; // class pass_ccp
2637 
2638 } // anon namespace
2639 
2640 gimple_opt_pass *
make_pass_ccp(gcc::context * ctxt)2641 make_pass_ccp (gcc::context *ctxt)
2642 {
2643   return new pass_ccp (ctxt);
2644 }
2645 
2646 
2647 
2648 /* Try to optimize out __builtin_stack_restore.  Optimize it out
2649    if there is another __builtin_stack_restore in the same basic
2650    block and no calls or ASM_EXPRs are in between, or if this block's
2651    only outgoing edge is to EXIT_BLOCK and there are no calls or
2652    ASM_EXPRs after this __builtin_stack_restore.  */
2653 
2654 static tree
optimize_stack_restore(gimple_stmt_iterator i)2655 optimize_stack_restore (gimple_stmt_iterator i)
2656 {
2657   tree callee;
2658   gimple *stmt;
2659 
2660   basic_block bb = gsi_bb (i);
2661   gimple *call = gsi_stmt (i);
2662 
2663   if (gimple_code (call) != GIMPLE_CALL
2664       || gimple_call_num_args (call) != 1
2665       || TREE_CODE (gimple_call_arg (call, 0)) != SSA_NAME
2666       || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call, 0))))
2667     return NULL_TREE;
2668 
2669   for (gsi_next (&i); !gsi_end_p (i); gsi_next (&i))
2670     {
2671       stmt = gsi_stmt (i);
2672       if (gimple_code (stmt) == GIMPLE_ASM)
2673 	return NULL_TREE;
2674       if (gimple_code (stmt) != GIMPLE_CALL)
2675 	continue;
2676 
2677       callee = gimple_call_fndecl (stmt);
2678       if (!callee
2679 	  || !fndecl_built_in_p (callee, BUILT_IN_NORMAL)
2680 	  /* All regular builtins are ok, just obviously not alloca.  */
2681 	  || ALLOCA_FUNCTION_CODE_P (DECL_FUNCTION_CODE (callee)))
2682 	return NULL_TREE;
2683 
2684       if (fndecl_built_in_p (callee, BUILT_IN_STACK_RESTORE))
2685 	goto second_stack_restore;
2686     }
2687 
2688   if (!gsi_end_p (i))
2689     return NULL_TREE;
2690 
2691   /* Allow one successor of the exit block, or zero successors.  */
2692   switch (EDGE_COUNT (bb->succs))
2693     {
2694     case 0:
2695       break;
2696     case 1:
2697       if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2698 	return NULL_TREE;
2699       break;
2700     default:
2701       return NULL_TREE;
2702     }
2703  second_stack_restore:
2704 
2705   /* If there's exactly one use, then zap the call to __builtin_stack_save.
2706      If there are multiple uses, then the last one should remove the call.
2707      In any case, whether the call to __builtin_stack_save can be removed
2708      or not is irrelevant to removing the call to __builtin_stack_restore.  */
2709   if (has_single_use (gimple_call_arg (call, 0)))
2710     {
2711       gimple *stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0));
2712       if (is_gimple_call (stack_save))
2713 	{
2714 	  callee = gimple_call_fndecl (stack_save);
2715 	  if (callee && fndecl_built_in_p (callee, BUILT_IN_STACK_SAVE))
2716 	    {
2717 	      gimple_stmt_iterator stack_save_gsi;
2718 	      tree rhs;
2719 
2720 	      stack_save_gsi = gsi_for_stmt (stack_save);
2721 	      rhs = build_int_cst (TREE_TYPE (gimple_call_arg (call, 0)), 0);
2722 	      update_call_from_tree (&stack_save_gsi, rhs);
2723 	    }
2724 	}
2725     }
2726 
2727   /* No effect, so the statement will be deleted.  */
2728   return integer_zero_node;
2729 }
2730 
2731 /* If va_list type is a simple pointer and nothing special is needed,
2732    optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
2733    __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
2734    pointer assignment.  */
2735 
2736 static tree
optimize_stdarg_builtin(gimple * call)2737 optimize_stdarg_builtin (gimple *call)
2738 {
2739   tree callee, lhs, rhs, cfun_va_list;
2740   bool va_list_simple_ptr;
2741   location_t loc = gimple_location (call);
2742 
2743   callee = gimple_call_fndecl (call);
2744 
2745   cfun_va_list = targetm.fn_abi_va_list (callee);
2746   va_list_simple_ptr = POINTER_TYPE_P (cfun_va_list)
2747 		       && (TREE_TYPE (cfun_va_list) == void_type_node
2748 			   || TREE_TYPE (cfun_va_list) == char_type_node);
2749 
2750   switch (DECL_FUNCTION_CODE (callee))
2751     {
2752     case BUILT_IN_VA_START:
2753       if (!va_list_simple_ptr
2754 	  || targetm.expand_builtin_va_start != NULL
2755 	  || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG))
2756 	return NULL_TREE;
2757 
2758       if (gimple_call_num_args (call) != 2)
2759 	return NULL_TREE;
2760 
2761       lhs = gimple_call_arg (call, 0);
2762       if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2763 	  || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2764 	     != TYPE_MAIN_VARIANT (cfun_va_list))
2765 	return NULL_TREE;
2766 
2767       lhs = build_fold_indirect_ref_loc (loc, lhs);
2768       rhs = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_NEXT_ARG),
2769                              1, integer_zero_node);
2770       rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2771       return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2772 
2773     case BUILT_IN_VA_COPY:
2774       if (!va_list_simple_ptr)
2775 	return NULL_TREE;
2776 
2777       if (gimple_call_num_args (call) != 2)
2778 	return NULL_TREE;
2779 
2780       lhs = gimple_call_arg (call, 0);
2781       if (!POINTER_TYPE_P (TREE_TYPE (lhs))
2782 	  || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
2783 	     != TYPE_MAIN_VARIANT (cfun_va_list))
2784 	return NULL_TREE;
2785 
2786       lhs = build_fold_indirect_ref_loc (loc, lhs);
2787       rhs = gimple_call_arg (call, 1);
2788       if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs))
2789 	  != TYPE_MAIN_VARIANT (cfun_va_list))
2790 	return NULL_TREE;
2791 
2792       rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
2793       return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
2794 
2795     case BUILT_IN_VA_END:
2796       /* No effect, so the statement will be deleted.  */
2797       return integer_zero_node;
2798 
2799     default:
2800       gcc_unreachable ();
2801     }
2802 }
2803 
2804 /* Attemp to make the block of __builtin_unreachable I unreachable by changing
2805    the incoming jumps.  Return true if at least one jump was changed.  */
2806 
2807 static bool
optimize_unreachable(gimple_stmt_iterator i)2808 optimize_unreachable (gimple_stmt_iterator i)
2809 {
2810   basic_block bb = gsi_bb (i);
2811   gimple_stmt_iterator gsi;
2812   gimple *stmt;
2813   edge_iterator ei;
2814   edge e;
2815   bool ret;
2816 
2817   if (flag_sanitize & SANITIZE_UNREACHABLE)
2818     return false;
2819 
2820   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2821     {
2822       stmt = gsi_stmt (gsi);
2823 
2824       if (is_gimple_debug (stmt))
2825        continue;
2826 
2827       if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
2828 	{
2829 	  /* Verify we do not need to preserve the label.  */
2830 	  if (FORCED_LABEL (gimple_label_label (label_stmt)))
2831 	    return false;
2832 
2833 	  continue;
2834 	}
2835 
2836       /* Only handle the case that __builtin_unreachable is the first statement
2837 	 in the block.  We rely on DCE to remove stmts without side-effects
2838 	 before __builtin_unreachable.  */
2839       if (gsi_stmt (gsi) != gsi_stmt (i))
2840         return false;
2841     }
2842 
2843   ret = false;
2844   FOR_EACH_EDGE (e, ei, bb->preds)
2845     {
2846       gsi = gsi_last_bb (e->src);
2847       if (gsi_end_p (gsi))
2848 	continue;
2849 
2850       stmt = gsi_stmt (gsi);
2851       if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
2852 	{
2853 	  if (e->flags & EDGE_TRUE_VALUE)
2854 	    gimple_cond_make_false (cond_stmt);
2855 	  else if (e->flags & EDGE_FALSE_VALUE)
2856 	    gimple_cond_make_true (cond_stmt);
2857 	  else
2858 	    gcc_unreachable ();
2859 	  update_stmt (cond_stmt);
2860 	}
2861       else
2862 	{
2863 	  /* Todo: handle other cases.  Note that unreachable switch case
2864 	     statements have already been removed.  */
2865 	  continue;
2866 	}
2867 
2868       ret = true;
2869     }
2870 
2871   return ret;
2872 }
2873 
2874 /* Optimize
2875      mask_2 = 1 << cnt_1;
2876      _4 = __atomic_fetch_or_* (ptr_6, mask_2, _3);
2877      _5 = _4 & mask_2;
2878    to
2879      _4 = ATOMIC_BIT_TEST_AND_SET (ptr_6, cnt_1, 0, _3);
2880      _5 = _4;
2881    If _5 is only used in _5 != 0 or _5 == 0 comparisons, 1
2882    is passed instead of 0, and the builtin just returns a zero
2883    or 1 value instead of the actual bit.
2884    Similarly for __sync_fetch_and_or_* (without the ", _3" part
2885    in there), and/or if mask_2 is a power of 2 constant.
2886    Similarly for xor instead of or, use ATOMIC_BIT_TEST_AND_COMPLEMENT
2887    in that case.  And similarly for and instead of or, except that
2888    the second argument to the builtin needs to be one's complement
2889    of the mask instead of mask.  */
2890 
2891 static void
optimize_atomic_bit_test_and(gimple_stmt_iterator * gsip,enum internal_fn fn,bool has_model_arg,bool after)2892 optimize_atomic_bit_test_and (gimple_stmt_iterator *gsip,
2893 			      enum internal_fn fn, bool has_model_arg,
2894 			      bool after)
2895 {
2896   gimple *call = gsi_stmt (*gsip);
2897   tree lhs = gimple_call_lhs (call);
2898   use_operand_p use_p;
2899   gimple *use_stmt;
2900   tree mask, bit;
2901   optab optab;
2902 
2903   if (!flag_inline_atomics
2904       || optimize_debug
2905       || !gimple_call_builtin_p (call, BUILT_IN_NORMAL)
2906       || !lhs
2907       || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
2908       || !single_imm_use (lhs, &use_p, &use_stmt)
2909       || !is_gimple_assign (use_stmt)
2910       || gimple_assign_rhs_code (use_stmt) != BIT_AND_EXPR
2911       || !gimple_vdef (call))
2912     return;
2913 
2914   switch (fn)
2915     {
2916     case IFN_ATOMIC_BIT_TEST_AND_SET:
2917       optab = atomic_bit_test_and_set_optab;
2918       break;
2919     case IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT:
2920       optab = atomic_bit_test_and_complement_optab;
2921       break;
2922     case IFN_ATOMIC_BIT_TEST_AND_RESET:
2923       optab = atomic_bit_test_and_reset_optab;
2924       break;
2925     default:
2926       return;
2927     }
2928 
2929   if (optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs))) == CODE_FOR_nothing)
2930     return;
2931 
2932   mask = gimple_call_arg (call, 1);
2933   tree use_lhs = gimple_assign_lhs (use_stmt);
2934   if (!use_lhs)
2935     return;
2936 
2937   if (TREE_CODE (mask) == INTEGER_CST)
2938     {
2939       if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
2940 	mask = const_unop (BIT_NOT_EXPR, TREE_TYPE (mask), mask);
2941       mask = fold_convert (TREE_TYPE (lhs), mask);
2942       int ibit = tree_log2 (mask);
2943       if (ibit < 0)
2944 	return;
2945       bit = build_int_cst (TREE_TYPE (lhs), ibit);
2946     }
2947   else if (TREE_CODE (mask) == SSA_NAME)
2948     {
2949       gimple *g = SSA_NAME_DEF_STMT (mask);
2950       if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
2951 	{
2952 	  if (!is_gimple_assign (g)
2953 	      || gimple_assign_rhs_code (g) != BIT_NOT_EXPR)
2954 	    return;
2955 	  mask = gimple_assign_rhs1 (g);
2956 	  if (TREE_CODE (mask) != SSA_NAME)
2957 	    return;
2958 	  g = SSA_NAME_DEF_STMT (mask);
2959 	}
2960       if (!is_gimple_assign (g)
2961 	  || gimple_assign_rhs_code (g) != LSHIFT_EXPR
2962 	  || !integer_onep (gimple_assign_rhs1 (g)))
2963 	return;
2964       bit = gimple_assign_rhs2 (g);
2965     }
2966   else
2967     return;
2968 
2969   if (gimple_assign_rhs1 (use_stmt) == lhs)
2970     {
2971       if (!operand_equal_p (gimple_assign_rhs2 (use_stmt), mask, 0))
2972 	return;
2973     }
2974   else if (gimple_assign_rhs2 (use_stmt) != lhs
2975 	   || !operand_equal_p (gimple_assign_rhs1 (use_stmt), mask, 0))
2976     return;
2977 
2978   bool use_bool = true;
2979   bool has_debug_uses = false;
2980   imm_use_iterator iter;
2981   gimple *g;
2982 
2983   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs))
2984     use_bool = false;
2985   FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
2986     {
2987       enum tree_code code = ERROR_MARK;
2988       tree op0 = NULL_TREE, op1 = NULL_TREE;
2989       if (is_gimple_debug (g))
2990 	{
2991 	  has_debug_uses = true;
2992 	  continue;
2993 	}
2994       else if (is_gimple_assign (g))
2995 	switch (gimple_assign_rhs_code (g))
2996 	  {
2997 	  case COND_EXPR:
2998 	    op1 = gimple_assign_rhs1 (g);
2999 	    code = TREE_CODE (op1);
3000 	    op0 = TREE_OPERAND (op1, 0);
3001 	    op1 = TREE_OPERAND (op1, 1);
3002 	    break;
3003 	  case EQ_EXPR:
3004 	  case NE_EXPR:
3005 	    code = gimple_assign_rhs_code (g);
3006 	    op0 = gimple_assign_rhs1 (g);
3007 	    op1 = gimple_assign_rhs2 (g);
3008 	    break;
3009 	  default:
3010 	    break;
3011 	  }
3012       else if (gimple_code (g) == GIMPLE_COND)
3013 	{
3014 	  code = gimple_cond_code (g);
3015 	  op0 = gimple_cond_lhs (g);
3016 	  op1 = gimple_cond_rhs (g);
3017 	}
3018 
3019       if ((code == EQ_EXPR || code == NE_EXPR)
3020 	  && op0 == use_lhs
3021 	  && integer_zerop (op1))
3022 	{
3023 	  use_operand_p use_p;
3024 	  int n = 0;
3025 	  FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3026 	    n++;
3027 	  if (n == 1)
3028 	    continue;
3029 	}
3030 
3031       use_bool = false;
3032       BREAK_FROM_IMM_USE_STMT (iter);
3033     }
3034 
3035   tree new_lhs = make_ssa_name (TREE_TYPE (lhs));
3036   tree flag = build_int_cst (TREE_TYPE (lhs), use_bool);
3037   if (has_model_arg)
3038     g = gimple_build_call_internal (fn, 4, gimple_call_arg (call, 0),
3039 				    bit, flag, gimple_call_arg (call, 2));
3040   else
3041     g = gimple_build_call_internal (fn, 3, gimple_call_arg (call, 0),
3042 				    bit, flag);
3043   gimple_call_set_lhs (g, new_lhs);
3044   gimple_set_location (g, gimple_location (call));
3045   gimple_move_vops (g, call);
3046   bool throws = stmt_can_throw_internal (cfun, call);
3047   gimple_call_set_nothrow (as_a <gcall *> (g),
3048 			   gimple_call_nothrow_p (as_a <gcall *> (call)));
3049   gimple_stmt_iterator gsi = *gsip;
3050   gsi_insert_after (&gsi, g, GSI_NEW_STMT);
3051   edge e = NULL;
3052   if (throws)
3053     {
3054       maybe_clean_or_replace_eh_stmt (call, g);
3055       if (after || (use_bool && has_debug_uses))
3056 	e = find_fallthru_edge (gsi_bb (gsi)->succs);
3057     }
3058   if (after)
3059     {
3060       /* The internal function returns the value of the specified bit
3061 	 before the atomic operation.  If we are interested in the value
3062 	 of the specified bit after the atomic operation (makes only sense
3063 	 for xor, otherwise the bit content is compile time known),
3064 	 we need to invert the bit.  */
3065       g = gimple_build_assign (make_ssa_name (TREE_TYPE (lhs)),
3066 			       BIT_XOR_EXPR, new_lhs,
3067 			       use_bool ? build_int_cst (TREE_TYPE (lhs), 1)
3068 					: mask);
3069       new_lhs = gimple_assign_lhs (g);
3070       if (throws)
3071 	{
3072 	  gsi_insert_on_edge_immediate (e, g);
3073 	  gsi = gsi_for_stmt (g);
3074 	}
3075       else
3076 	gsi_insert_after (&gsi, g, GSI_NEW_STMT);
3077     }
3078   if (use_bool && has_debug_uses)
3079     {
3080       tree temp = NULL_TREE;
3081       if (!throws || after || single_pred_p (e->dest))
3082 	{
3083 	  temp = make_node (DEBUG_EXPR_DECL);
3084 	  DECL_ARTIFICIAL (temp) = 1;
3085 	  TREE_TYPE (temp) = TREE_TYPE (lhs);
3086 	  SET_DECL_MODE (temp, TYPE_MODE (TREE_TYPE (lhs)));
3087 	  tree t = build2 (LSHIFT_EXPR, TREE_TYPE (lhs), new_lhs, bit);
3088 	  g = gimple_build_debug_bind (temp, t, g);
3089 	  if (throws && !after)
3090 	    {
3091 	      gsi = gsi_after_labels (e->dest);
3092 	      gsi_insert_before (&gsi, g, GSI_SAME_STMT);
3093 	    }
3094 	  else
3095 	    gsi_insert_after (&gsi, g, GSI_NEW_STMT);
3096 	}
3097       FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
3098 	if (is_gimple_debug (g))
3099 	  {
3100 	    use_operand_p use_p;
3101 	    if (temp == NULL_TREE)
3102 	      gimple_debug_bind_reset_value (g);
3103 	    else
3104 	      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3105 		SET_USE (use_p, temp);
3106 	    update_stmt (g);
3107 	  }
3108     }
3109   SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_lhs)
3110     = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs);
3111   replace_uses_by (use_lhs, new_lhs);
3112   gsi = gsi_for_stmt (use_stmt);
3113   gsi_remove (&gsi, true);
3114   release_defs (use_stmt);
3115   gsi_remove (gsip, true);
3116   release_ssa_name (lhs);
3117 }
3118 
3119 /* Optimize
3120    a = {};
3121    b = a;
3122    into
3123    a = {};
3124    b = {};
3125    Similarly for memset (&a, ..., sizeof (a)); instead of a = {};
3126    and/or memcpy (&b, &a, sizeof (a)); instead of b = a;  */
3127 
3128 static void
optimize_memcpy(gimple_stmt_iterator * gsip,tree dest,tree src,tree len)3129 optimize_memcpy (gimple_stmt_iterator *gsip, tree dest, tree src, tree len)
3130 {
3131   gimple *stmt = gsi_stmt (*gsip);
3132   if (gimple_has_volatile_ops (stmt))
3133     return;
3134 
3135   tree vuse = gimple_vuse (stmt);
3136   if (vuse == NULL)
3137     return;
3138 
3139   gimple *defstmt = SSA_NAME_DEF_STMT (vuse);
3140   tree src2 = NULL_TREE, len2 = NULL_TREE;
3141   poly_int64 offset, offset2;
3142   tree val = integer_zero_node;
3143   if (gimple_store_p (defstmt)
3144       && gimple_assign_single_p (defstmt)
3145       && TREE_CODE (gimple_assign_rhs1 (defstmt)) == CONSTRUCTOR
3146       && !gimple_clobber_p (defstmt))
3147     src2 = gimple_assign_lhs (defstmt);
3148   else if (gimple_call_builtin_p (defstmt, BUILT_IN_MEMSET)
3149 	   && TREE_CODE (gimple_call_arg (defstmt, 0)) == ADDR_EXPR
3150 	   && TREE_CODE (gimple_call_arg (defstmt, 1)) == INTEGER_CST)
3151     {
3152       src2 = TREE_OPERAND (gimple_call_arg (defstmt, 0), 0);
3153       len2 = gimple_call_arg (defstmt, 2);
3154       val = gimple_call_arg (defstmt, 1);
3155       /* For non-0 val, we'd have to transform stmt from assignment
3156 	 into memset (only if dest is addressable).  */
3157       if (!integer_zerop (val) && is_gimple_assign (stmt))
3158 	src2 = NULL_TREE;
3159     }
3160 
3161   if (src2 == NULL_TREE)
3162     return;
3163 
3164   if (len == NULL_TREE)
3165     len = (TREE_CODE (src) == COMPONENT_REF
3166 	   ? DECL_SIZE_UNIT (TREE_OPERAND (src, 1))
3167 	   : TYPE_SIZE_UNIT (TREE_TYPE (src)));
3168   if (len2 == NULL_TREE)
3169     len2 = (TREE_CODE (src2) == COMPONENT_REF
3170 	    ? DECL_SIZE_UNIT (TREE_OPERAND (src2, 1))
3171 	    : TYPE_SIZE_UNIT (TREE_TYPE (src2)));
3172   if (len == NULL_TREE
3173       || !poly_int_tree_p (len)
3174       || len2 == NULL_TREE
3175       || !poly_int_tree_p (len2))
3176     return;
3177 
3178   src = get_addr_base_and_unit_offset (src, &offset);
3179   src2 = get_addr_base_and_unit_offset (src2, &offset2);
3180   if (src == NULL_TREE
3181       || src2 == NULL_TREE
3182       || maybe_lt (offset, offset2))
3183     return;
3184 
3185   if (!operand_equal_p (src, src2, 0))
3186     return;
3187 
3188   /* [ src + offset2, src + offset2 + len2 - 1 ] is set to val.
3189      Make sure that
3190      [ src + offset, src + offset + len - 1 ] is a subset of that.  */
3191   if (maybe_gt (wi::to_poly_offset (len) + (offset - offset2),
3192 		wi::to_poly_offset (len2)))
3193     return;
3194 
3195   if (dump_file && (dump_flags & TDF_DETAILS))
3196     {
3197       fprintf (dump_file, "Simplified\n  ");
3198       print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3199       fprintf (dump_file, "after previous\n  ");
3200       print_gimple_stmt (dump_file, defstmt, 0, dump_flags);
3201     }
3202 
3203   /* For simplicity, don't change the kind of the stmt,
3204      turn dest = src; into dest = {}; and memcpy (&dest, &src, len);
3205      into memset (&dest, val, len);
3206      In theory we could change dest = src into memset if dest
3207      is addressable (maybe beneficial if val is not 0), or
3208      memcpy (&dest, &src, len) into dest = {} if len is the size
3209      of dest, dest isn't volatile.  */
3210   if (is_gimple_assign (stmt))
3211     {
3212       tree ctor = build_constructor (TREE_TYPE (dest), NULL);
3213       gimple_assign_set_rhs_from_tree (gsip, ctor);
3214       update_stmt (stmt);
3215     }
3216   else /* If stmt is memcpy, transform it into memset.  */
3217     {
3218       gcall *call = as_a <gcall *> (stmt);
3219       tree fndecl = builtin_decl_implicit (BUILT_IN_MEMSET);
3220       gimple_call_set_fndecl (call, fndecl);
3221       gimple_call_set_fntype (call, TREE_TYPE (fndecl));
3222       gimple_call_set_arg (call, 1, val);
3223       update_stmt (stmt);
3224     }
3225 
3226   if (dump_file && (dump_flags & TDF_DETAILS))
3227     {
3228       fprintf (dump_file, "into\n  ");
3229       print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3230     }
3231 }
3232 
3233 /* A simple pass that attempts to fold all builtin functions.  This pass
3234    is run after we've propagated as many constants as we can.  */
3235 
3236 namespace {
3237 
3238 const pass_data pass_data_fold_builtins =
3239 {
3240   GIMPLE_PASS, /* type */
3241   "fab", /* name */
3242   OPTGROUP_NONE, /* optinfo_flags */
3243   TV_NONE, /* tv_id */
3244   ( PROP_cfg | PROP_ssa ), /* properties_required */
3245   0, /* properties_provided */
3246   0, /* properties_destroyed */
3247   0, /* todo_flags_start */
3248   TODO_update_ssa, /* todo_flags_finish */
3249 };
3250 
3251 class pass_fold_builtins : public gimple_opt_pass
3252 {
3253 public:
pass_fold_builtins(gcc::context * ctxt)3254   pass_fold_builtins (gcc::context *ctxt)
3255     : gimple_opt_pass (pass_data_fold_builtins, ctxt)
3256   {}
3257 
3258   /* opt_pass methods: */
clone()3259   opt_pass * clone () { return new pass_fold_builtins (m_ctxt); }
3260   virtual unsigned int execute (function *);
3261 
3262 }; // class pass_fold_builtins
3263 
3264 unsigned int
execute(function * fun)3265 pass_fold_builtins::execute (function *fun)
3266 {
3267   bool cfg_changed = false;
3268   basic_block bb;
3269   unsigned int todoflags = 0;
3270 
3271   FOR_EACH_BB_FN (bb, fun)
3272     {
3273       gimple_stmt_iterator i;
3274       for (i = gsi_start_bb (bb); !gsi_end_p (i); )
3275 	{
3276 	  gimple *stmt, *old_stmt;
3277 	  tree callee;
3278 	  enum built_in_function fcode;
3279 
3280 	  stmt = gsi_stmt (i);
3281 
3282           if (gimple_code (stmt) != GIMPLE_CALL)
3283 	    {
3284 	      /* Remove all *ssaname_N ={v} {CLOBBER}; stmts,
3285 		 after the last GIMPLE DSE they aren't needed and might
3286 		 unnecessarily keep the SSA_NAMEs live.  */
3287 	      if (gimple_clobber_p (stmt))
3288 		{
3289 		  tree lhs = gimple_assign_lhs (stmt);
3290 		  if (TREE_CODE (lhs) == MEM_REF
3291 		      && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME)
3292 		    {
3293 		      unlink_stmt_vdef (stmt);
3294 		      gsi_remove (&i, true);
3295 		      release_defs (stmt);
3296 		      continue;
3297 		    }
3298 		}
3299 	      else if (gimple_assign_load_p (stmt) && gimple_store_p (stmt))
3300 		optimize_memcpy (&i, gimple_assign_lhs (stmt),
3301 				 gimple_assign_rhs1 (stmt), NULL_TREE);
3302 	      gsi_next (&i);
3303 	      continue;
3304 	    }
3305 
3306 	  callee = gimple_call_fndecl (stmt);
3307 	  if (!callee || !fndecl_built_in_p (callee, BUILT_IN_NORMAL))
3308 	    {
3309 	      gsi_next (&i);
3310 	      continue;
3311 	    }
3312 
3313 	  fcode = DECL_FUNCTION_CODE (callee);
3314 	  if (fold_stmt (&i))
3315 	    ;
3316 	  else
3317 	    {
3318 	      tree result = NULL_TREE;
3319 	      switch (DECL_FUNCTION_CODE (callee))
3320 		{
3321 		case BUILT_IN_CONSTANT_P:
3322 		  /* Resolve __builtin_constant_p.  If it hasn't been
3323 		     folded to integer_one_node by now, it's fairly
3324 		     certain that the value simply isn't constant.  */
3325 		  result = integer_zero_node;
3326 		  break;
3327 
3328 		case BUILT_IN_ASSUME_ALIGNED:
3329 		  /* Remove __builtin_assume_aligned.  */
3330 		  result = gimple_call_arg (stmt, 0);
3331 		  break;
3332 
3333 		case BUILT_IN_STACK_RESTORE:
3334 		  result = optimize_stack_restore (i);
3335 		  if (result)
3336 		    break;
3337 		  gsi_next (&i);
3338 		  continue;
3339 
3340 		case BUILT_IN_UNREACHABLE:
3341 		  if (optimize_unreachable (i))
3342 		    cfg_changed = true;
3343 		  break;
3344 
3345 		case BUILT_IN_ATOMIC_FETCH_OR_1:
3346 		case BUILT_IN_ATOMIC_FETCH_OR_2:
3347 		case BUILT_IN_ATOMIC_FETCH_OR_4:
3348 		case BUILT_IN_ATOMIC_FETCH_OR_8:
3349 		case BUILT_IN_ATOMIC_FETCH_OR_16:
3350 		  optimize_atomic_bit_test_and (&i,
3351 						IFN_ATOMIC_BIT_TEST_AND_SET,
3352 						true, false);
3353 		  break;
3354 		case BUILT_IN_SYNC_FETCH_AND_OR_1:
3355 		case BUILT_IN_SYNC_FETCH_AND_OR_2:
3356 		case BUILT_IN_SYNC_FETCH_AND_OR_4:
3357 		case BUILT_IN_SYNC_FETCH_AND_OR_8:
3358 		case BUILT_IN_SYNC_FETCH_AND_OR_16:
3359 		  optimize_atomic_bit_test_and (&i,
3360 						IFN_ATOMIC_BIT_TEST_AND_SET,
3361 						false, false);
3362 		  break;
3363 
3364 		case BUILT_IN_ATOMIC_FETCH_XOR_1:
3365 		case BUILT_IN_ATOMIC_FETCH_XOR_2:
3366 		case BUILT_IN_ATOMIC_FETCH_XOR_4:
3367 		case BUILT_IN_ATOMIC_FETCH_XOR_8:
3368 		case BUILT_IN_ATOMIC_FETCH_XOR_16:
3369 		  optimize_atomic_bit_test_and
3370 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, false);
3371 		  break;
3372 		case BUILT_IN_SYNC_FETCH_AND_XOR_1:
3373 		case BUILT_IN_SYNC_FETCH_AND_XOR_2:
3374 		case BUILT_IN_SYNC_FETCH_AND_XOR_4:
3375 		case BUILT_IN_SYNC_FETCH_AND_XOR_8:
3376 		case BUILT_IN_SYNC_FETCH_AND_XOR_16:
3377 		  optimize_atomic_bit_test_and
3378 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, false);
3379 		  break;
3380 
3381 		case BUILT_IN_ATOMIC_XOR_FETCH_1:
3382 		case BUILT_IN_ATOMIC_XOR_FETCH_2:
3383 		case BUILT_IN_ATOMIC_XOR_FETCH_4:
3384 		case BUILT_IN_ATOMIC_XOR_FETCH_8:
3385 		case BUILT_IN_ATOMIC_XOR_FETCH_16:
3386 		  optimize_atomic_bit_test_and
3387 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, true);
3388 		  break;
3389 		case BUILT_IN_SYNC_XOR_AND_FETCH_1:
3390 		case BUILT_IN_SYNC_XOR_AND_FETCH_2:
3391 		case BUILT_IN_SYNC_XOR_AND_FETCH_4:
3392 		case BUILT_IN_SYNC_XOR_AND_FETCH_8:
3393 		case BUILT_IN_SYNC_XOR_AND_FETCH_16:
3394 		  optimize_atomic_bit_test_and
3395 			(&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, true);
3396 		  break;
3397 
3398 		case BUILT_IN_ATOMIC_FETCH_AND_1:
3399 		case BUILT_IN_ATOMIC_FETCH_AND_2:
3400 		case BUILT_IN_ATOMIC_FETCH_AND_4:
3401 		case BUILT_IN_ATOMIC_FETCH_AND_8:
3402 		case BUILT_IN_ATOMIC_FETCH_AND_16:
3403 		  optimize_atomic_bit_test_and (&i,
3404 						IFN_ATOMIC_BIT_TEST_AND_RESET,
3405 						true, false);
3406 		  break;
3407 		case BUILT_IN_SYNC_FETCH_AND_AND_1:
3408 		case BUILT_IN_SYNC_FETCH_AND_AND_2:
3409 		case BUILT_IN_SYNC_FETCH_AND_AND_4:
3410 		case BUILT_IN_SYNC_FETCH_AND_AND_8:
3411 		case BUILT_IN_SYNC_FETCH_AND_AND_16:
3412 		  optimize_atomic_bit_test_and (&i,
3413 						IFN_ATOMIC_BIT_TEST_AND_RESET,
3414 						false, false);
3415 		  break;
3416 
3417 		case BUILT_IN_MEMCPY:
3418 		  if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)
3419 		      && TREE_CODE (gimple_call_arg (stmt, 0)) == ADDR_EXPR
3420 		      && TREE_CODE (gimple_call_arg (stmt, 1)) == ADDR_EXPR
3421 		      && TREE_CODE (gimple_call_arg (stmt, 2)) == INTEGER_CST)
3422 		    {
3423 		      tree dest = TREE_OPERAND (gimple_call_arg (stmt, 0), 0);
3424 		      tree src = TREE_OPERAND (gimple_call_arg (stmt, 1), 0);
3425 		      tree len = gimple_call_arg (stmt, 2);
3426 		      optimize_memcpy (&i, dest, src, len);
3427 		    }
3428 		  break;
3429 
3430 		case BUILT_IN_VA_START:
3431 		case BUILT_IN_VA_END:
3432 		case BUILT_IN_VA_COPY:
3433 		  /* These shouldn't be folded before pass_stdarg.  */
3434 		  result = optimize_stdarg_builtin (stmt);
3435 		  break;
3436 
3437 		default:;
3438 		}
3439 
3440 	      if (!result)
3441 		{
3442 		  gsi_next (&i);
3443 		  continue;
3444 		}
3445 
3446 	      if (!update_call_from_tree (&i, result))
3447 		gimplify_and_update_call_from_tree (&i, result);
3448 	    }
3449 
3450 	  todoflags |= TODO_update_address_taken;
3451 
3452 	  if (dump_file && (dump_flags & TDF_DETAILS))
3453 	    {
3454 	      fprintf (dump_file, "Simplified\n  ");
3455 	      print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3456 	    }
3457 
3458           old_stmt = stmt;
3459 	  stmt = gsi_stmt (i);
3460 	  update_stmt (stmt);
3461 
3462 	  if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)
3463 	      && gimple_purge_dead_eh_edges (bb))
3464 	    cfg_changed = true;
3465 
3466 	  if (dump_file && (dump_flags & TDF_DETAILS))
3467 	    {
3468 	      fprintf (dump_file, "to\n  ");
3469 	      print_gimple_stmt (dump_file, stmt, 0, dump_flags);
3470 	      fprintf (dump_file, "\n");
3471 	    }
3472 
3473 	  /* Retry the same statement if it changed into another
3474 	     builtin, there might be new opportunities now.  */
3475           if (gimple_code (stmt) != GIMPLE_CALL)
3476 	    {
3477 	      gsi_next (&i);
3478 	      continue;
3479 	    }
3480 	  callee = gimple_call_fndecl (stmt);
3481 	  if (!callee
3482 	      || !fndecl_built_in_p (callee, fcode))
3483 	    gsi_next (&i);
3484 	}
3485     }
3486 
3487   /* Delete unreachable blocks.  */
3488   if (cfg_changed)
3489     todoflags |= TODO_cleanup_cfg;
3490 
3491   return todoflags;
3492 }
3493 
3494 } // anon namespace
3495 
3496 gimple_opt_pass *
make_pass_fold_builtins(gcc::context * ctxt)3497 make_pass_fold_builtins (gcc::context *ctxt)
3498 {
3499   return new pass_fold_builtins (ctxt);
3500 }
3501 
3502 /* A simple pass that emits some warnings post IPA.  */
3503 
3504 namespace {
3505 
3506 const pass_data pass_data_post_ipa_warn =
3507 {
3508   GIMPLE_PASS, /* type */
3509   "post_ipa_warn", /* name */
3510   OPTGROUP_NONE, /* optinfo_flags */
3511   TV_NONE, /* tv_id */
3512   ( PROP_cfg | PROP_ssa ), /* properties_required */
3513   0, /* properties_provided */
3514   0, /* properties_destroyed */
3515   0, /* todo_flags_start */
3516   0, /* todo_flags_finish */
3517 };
3518 
3519 class pass_post_ipa_warn : public gimple_opt_pass
3520 {
3521 public:
pass_post_ipa_warn(gcc::context * ctxt)3522   pass_post_ipa_warn (gcc::context *ctxt)
3523     : gimple_opt_pass (pass_data_post_ipa_warn, ctxt)
3524   {}
3525 
3526   /* opt_pass methods: */
clone()3527   opt_pass * clone () { return new pass_post_ipa_warn (m_ctxt); }
gate(function *)3528   virtual bool gate (function *) { return warn_nonnull != 0; }
3529   virtual unsigned int execute (function *);
3530 
3531 }; // class pass_fold_builtins
3532 
3533 unsigned int
execute(function * fun)3534 pass_post_ipa_warn::execute (function *fun)
3535 {
3536   basic_block bb;
3537 
3538   FOR_EACH_BB_FN (bb, fun)
3539     {
3540       gimple_stmt_iterator gsi;
3541       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3542 	{
3543 	  gimple *stmt = gsi_stmt (gsi);
3544 	  if (!is_gimple_call (stmt) || gimple_no_warning_p (stmt))
3545 	    continue;
3546 
3547 	  if (warn_nonnull)
3548 	    {
3549 	      bitmap nonnullargs
3550 		= get_nonnull_args (gimple_call_fntype (stmt));
3551 	      if (nonnullargs)
3552 		{
3553 		  for (unsigned i = 0; i < gimple_call_num_args (stmt); i++)
3554 		    {
3555 		      tree arg = gimple_call_arg (stmt, i);
3556 		      if (TREE_CODE (TREE_TYPE (arg)) != POINTER_TYPE)
3557 			continue;
3558 		      if (!integer_zerop (arg))
3559 			continue;
3560 		      if (!bitmap_empty_p (nonnullargs)
3561 			  && !bitmap_bit_p (nonnullargs, i))
3562 			continue;
3563 
3564 		      location_t loc = gimple_location (stmt);
3565 		      auto_diagnostic_group d;
3566 		      if (warning_at (loc, OPT_Wnonnull,
3567 				      "%Gargument %u null where non-null "
3568 				      "expected", stmt, i + 1))
3569 			{
3570 			  tree fndecl = gimple_call_fndecl (stmt);
3571 			  if (fndecl && DECL_IS_BUILTIN (fndecl))
3572 			    inform (loc, "in a call to built-in function %qD",
3573 				    fndecl);
3574 			  else if (fndecl)
3575 			    inform (DECL_SOURCE_LOCATION (fndecl),
3576 				    "in a call to function %qD declared here",
3577 				    fndecl);
3578 
3579 			}
3580 		    }
3581 		  BITMAP_FREE (nonnullargs);
3582 		}
3583 	    }
3584 	}
3585     }
3586   return 0;
3587 }
3588 
3589 } // anon namespace
3590 
3591 gimple_opt_pass *
make_pass_post_ipa_warn(gcc::context * ctxt)3592 make_pass_post_ipa_warn (gcc::context *ctxt)
3593 {
3594   return new pass_post_ipa_warn (ctxt);
3595 }
3596 
3597 #if defined(__NetBSD__) && defined(NETBSD_NATIVE)
3598 /*
3599  * This is a big, ugly, temporary hack:
3600  *    http://gcc.gnu.org/bugzilla/show_bug.cgi?id=59958
3601  * To make sure we have configured all our targets correctly, mimic the
3602  * #ifdef cascade from src/lib/libc/stdlib/jemalloc.c here and compile
3603  * time assert that the value matches gcc's MALLOC_ABI_ALIGNMENT here.
3604  */
3605 
3606 #if defined(__hppa__)
3607 #define	JEMALLOC_TINY_MIN_2POW	4
3608 #elif defined(__alpha__) || defined(__amd64__) || defined(__sparc64__)	\
3609      ||	(defined(__arm__) && defined(__ARM_EABI__)) \
3610      || defined(__ia64__) || defined(__powerpc__) \
3611      || defined(__aarch64__) \
3612      || ((defined(__mips__) || defined(__riscv__)) && defined(_LP64))
3613 #define	JEMALLOC_TINY_MIN_2POW	3
3614 #endif
3615 
3616 #ifndef JEMALLOC_TINY_MIN_2POW
3617 #define	JEMALLOC_TINY_MIN_2POW	2
3618 #endif
3619 
3620 /* make sure we test the (native) 64bit variant for targets supporting -m32 */
3621 #undef	TARGET_64BIT
3622 #ifdef _LP64
3623 #define	TARGET_64BIT	1
3624 #else
3625 #ifdef __sh__
3626 #undef UNITS_PER_WORD
3627 #define	UNITS_PER_WORD	4	/* original definition varies depending on cpu */
3628 #endif
3629 #define	TARGET_64BIT	0
3630 #endif
3631 
3632 /* ARM has a non-constant MALLOC_ABI_ALIGNMENT since GCC 5.  */
3633 #if !defined(__arm__)
3634 #ifdef __CTASSERT
3635 __CTASSERT((8<<JEMALLOC_TINY_MIN_2POW) == MALLOC_ABI_ALIGNMENT);
3636 #else
3637 #error compiling on an older NetBSD version?
3638 #endif
3639 #endif
3640 
3641 #endif
3642