xref: /netbsd-src/external/gpl3/gcc/dist/gcc/tree-vrp.cc (revision b1e838363e3c6fc78a55519254d99869742dd33c)
1 /* Support routines for Value Range Propagation (VRP).
2    Copyright (C) 2005-2022 Free Software Foundation, Inc.
3    Contributed by Diego Novillo <dnovillo@redhat.com>.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11 
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 GNU General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "basic-block.h"
25 #include "bitmap.h"
26 #include "sbitmap.h"
27 #include "options.h"
28 #include "dominance.h"
29 #include "function.h"
30 #include "cfg.h"
31 #include "tree.h"
32 #include "gimple.h"
33 #include "tree-pass.h"
34 #include "ssa.h"
35 #include "gimple-pretty-print.h"
36 #include "fold-const.h"
37 #include "cfganal.h"
38 #include "gimple-iterator.h"
39 #include "tree-cfg.h"
40 #include "tree-ssa-loop-manip.h"
41 #include "tree-ssa-loop-niter.h"
42 #include "tree-into-ssa.h"
43 #include "cfgloop.h"
44 #include "tree-scalar-evolution.h"
45 #include "tree-ssa-propagate.h"
46 #include "domwalk.h"
47 #include "vr-values.h"
48 #include "gimple-array-bounds.h"
49 #include "gimple-range.h"
50 #include "gimple-range-path.h"
51 #include "value-pointer-equiv.h"
52 #include "gimple-fold.h"
53 
54 /* Set of SSA names found live during the RPO traversal of the function
55    for still active basic-blocks.  */
56 class live_names
57 {
58 public:
59   live_names ();
60   ~live_names ();
61   void set (tree, basic_block);
62   void clear (tree, basic_block);
63   void merge (basic_block dest, basic_block src);
64   bool live_on_block_p (tree, basic_block);
65   bool live_on_edge_p (tree, edge);
66   bool block_has_live_names_p (basic_block);
67   void clear_block (basic_block);
68 
69 private:
70   sbitmap *live;
71   unsigned num_blocks;
72   void init_bitmap_if_needed (basic_block);
73 };
74 
75 void
init_bitmap_if_needed(basic_block bb)76 live_names::init_bitmap_if_needed (basic_block bb)
77 {
78   unsigned i = bb->index;
79   if (!live[i])
80     {
81       live[i] = sbitmap_alloc (num_ssa_names);
82       bitmap_clear (live[i]);
83     }
84 }
85 
86 bool
block_has_live_names_p(basic_block bb)87 live_names::block_has_live_names_p (basic_block bb)
88 {
89   unsigned i = bb->index;
90   return live[i] && bitmap_empty_p (live[i]);
91 }
92 
93 void
clear_block(basic_block bb)94 live_names::clear_block (basic_block bb)
95 {
96   unsigned i = bb->index;
97   if (live[i])
98     {
99       sbitmap_free (live[i]);
100       live[i] = NULL;
101     }
102 }
103 
104 void
merge(basic_block dest,basic_block src)105 live_names::merge (basic_block dest, basic_block src)
106 {
107   init_bitmap_if_needed (dest);
108   init_bitmap_if_needed (src);
109   bitmap_ior (live[dest->index], live[dest->index], live[src->index]);
110 }
111 
112 void
set(tree name,basic_block bb)113 live_names::set (tree name, basic_block bb)
114 {
115   init_bitmap_if_needed (bb);
116   bitmap_set_bit (live[bb->index], SSA_NAME_VERSION (name));
117 }
118 
119 void
clear(tree name,basic_block bb)120 live_names::clear (tree name, basic_block bb)
121 {
122   unsigned i = bb->index;
123   if (live[i])
124     bitmap_clear_bit (live[i], SSA_NAME_VERSION (name));
125 }
126 
live_names()127 live_names::live_names ()
128 {
129   num_blocks = last_basic_block_for_fn (cfun);
130   live = XCNEWVEC (sbitmap, num_blocks);
131 }
132 
~live_names()133 live_names::~live_names ()
134 {
135   for (unsigned i = 0; i < num_blocks; ++i)
136     if (live[i])
137       sbitmap_free (live[i]);
138   XDELETEVEC (live);
139 }
140 
141 bool
live_on_block_p(tree name,basic_block bb)142 live_names::live_on_block_p (tree name, basic_block bb)
143 {
144   return (live[bb->index]
145 	  && bitmap_bit_p (live[bb->index], SSA_NAME_VERSION (name)));
146 }
147 
148 /* Return true if the SSA name NAME is live on the edge E.  */
149 
150 bool
live_on_edge_p(tree name,edge e)151 live_names::live_on_edge_p (tree name, edge e)
152 {
153   return live_on_block_p (name, e->dest);
154 }
155 
156 
157 /* VR_TYPE describes a range with mininum value *MIN and maximum
158    value *MAX.  Restrict the range to the set of values that have
159    no bits set outside NONZERO_BITS.  Update *MIN and *MAX and
160    return the new range type.
161 
162    SGN gives the sign of the values described by the range.  */
163 
164 enum value_range_kind
intersect_range_with_nonzero_bits(enum value_range_kind vr_type,wide_int * min,wide_int * max,const wide_int & nonzero_bits,signop sgn)165 intersect_range_with_nonzero_bits (enum value_range_kind vr_type,
166 				   wide_int *min, wide_int *max,
167 				   const wide_int &nonzero_bits,
168 				   signop sgn)
169 {
170   if (vr_type == VR_ANTI_RANGE)
171     {
172       /* The VR_ANTI_RANGE is equivalent to the union of the ranges
173 	 A: [-INF, *MIN) and B: (*MAX, +INF].  First use NONZERO_BITS
174 	 to create an inclusive upper bound for A and an inclusive lower
175 	 bound for B.  */
176       wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
177       wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
178 
179       /* If the calculation of A_MAX wrapped, A is effectively empty
180 	 and A_MAX is the highest value that satisfies NONZERO_BITS.
181 	 Likewise if the calculation of B_MIN wrapped, B is effectively
182 	 empty and B_MIN is the lowest value that satisfies NONZERO_BITS.  */
183       bool a_empty = wi::ge_p (a_max, *min, sgn);
184       bool b_empty = wi::le_p (b_min, *max, sgn);
185 
186       /* If both A and B are empty, there are no valid values.  */
187       if (a_empty && b_empty)
188 	return VR_UNDEFINED;
189 
190       /* If exactly one of A or B is empty, return a VR_RANGE for the
191 	 other one.  */
192       if (a_empty || b_empty)
193 	{
194 	  *min = b_min;
195 	  *max = a_max;
196 	  gcc_checking_assert (wi::le_p (*min, *max, sgn));
197 	  return VR_RANGE;
198 	}
199 
200       /* Update the VR_ANTI_RANGE bounds.  */
201       *min = a_max + 1;
202       *max = b_min - 1;
203       gcc_checking_assert (wi::le_p (*min, *max, sgn));
204 
205       /* Now check whether the excluded range includes any values that
206 	 satisfy NONZERO_BITS.  If not, switch to a full VR_RANGE.  */
207       if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
208 	{
209 	  unsigned int precision = min->get_precision ();
210 	  *min = wi::min_value (precision, sgn);
211 	  *max = wi::max_value (precision, sgn);
212 	  vr_type = VR_RANGE;
213 	}
214     }
215   if (vr_type == VR_RANGE || vr_type == VR_VARYING)
216     {
217       *max = wi::round_down_for_mask (*max, nonzero_bits);
218 
219       /* Check that the range contains at least one valid value.  */
220       if (wi::gt_p (*min, *max, sgn))
221 	return VR_UNDEFINED;
222 
223       *min = wi::round_up_for_mask (*min, nonzero_bits);
224       gcc_checking_assert (wi::le_p (*min, *max, sgn));
225     }
226   return vr_type;
227 }
228 
229 /* Return true if max and min of VR are INTEGER_CST.  It's not necessary
230    a singleton.  */
231 
232 bool
range_int_cst_p(const value_range * vr)233 range_int_cst_p (const value_range *vr)
234 {
235   return (vr->kind () == VR_RANGE && range_has_numeric_bounds_p (vr));
236 }
237 
238 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
239    otherwise.  We only handle additive operations and set NEG to true if the
240    symbol is negated and INV to the invariant part, if any.  */
241 
242 tree
get_single_symbol(tree t,bool * neg,tree * inv)243 get_single_symbol (tree t, bool *neg, tree *inv)
244 {
245   bool neg_;
246   tree inv_;
247 
248   *inv = NULL_TREE;
249   *neg = false;
250 
251   if (TREE_CODE (t) == PLUS_EXPR
252       || TREE_CODE (t) == POINTER_PLUS_EXPR
253       || TREE_CODE (t) == MINUS_EXPR)
254     {
255       if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
256 	{
257 	  neg_ = (TREE_CODE (t) == MINUS_EXPR);
258 	  inv_ = TREE_OPERAND (t, 0);
259 	  t = TREE_OPERAND (t, 1);
260 	}
261       else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
262 	{
263 	  neg_ = false;
264 	  inv_ = TREE_OPERAND (t, 1);
265 	  t = TREE_OPERAND (t, 0);
266 	}
267       else
268         return NULL_TREE;
269     }
270   else
271     {
272       neg_ = false;
273       inv_ = NULL_TREE;
274     }
275 
276   if (TREE_CODE (t) == NEGATE_EXPR)
277     {
278       t = TREE_OPERAND (t, 0);
279       neg_ = !neg_;
280     }
281 
282   if (TREE_CODE (t) != SSA_NAME)
283     return NULL_TREE;
284 
285   if (inv_ && TREE_OVERFLOW_P (inv_))
286     inv_ = drop_tree_overflow (inv_);
287 
288   *neg = neg_;
289   *inv = inv_;
290   return t;
291 }
292 
293 /* The reverse operation: build a symbolic expression with TYPE
294    from symbol SYM, negated according to NEG, and invariant INV.  */
295 
296 static tree
build_symbolic_expr(tree type,tree sym,bool neg,tree inv)297 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
298 {
299   const bool pointer_p = POINTER_TYPE_P (type);
300   tree t = sym;
301 
302   if (neg)
303     t = build1 (NEGATE_EXPR, type, t);
304 
305   if (integer_zerop (inv))
306     return t;
307 
308   return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
309 }
310 
311 /* Return
312    1 if VAL < VAL2
313    0 if !(VAL < VAL2)
314    -2 if those are incomparable.  */
315 int
operand_less_p(tree val,tree val2)316 operand_less_p (tree val, tree val2)
317 {
318   /* LT is folded faster than GE and others.  Inline the common case.  */
319   if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
320     return tree_int_cst_lt (val, val2);
321   else if (TREE_CODE (val) == SSA_NAME && TREE_CODE (val2) == SSA_NAME)
322     return val == val2 ? 0 : -2;
323   else
324     {
325       int cmp = compare_values (val, val2);
326       if (cmp == -1)
327 	return 1;
328       else if (cmp == 0 || cmp == 1)
329 	return 0;
330       else
331 	return -2;
332     }
333 }
334 
335 /* Compare two values VAL1 and VAL2.  Return
336 
337    	-2 if VAL1 and VAL2 cannot be compared at compile-time,
338    	-1 if VAL1 < VAL2,
339    	 0 if VAL1 == VAL2,
340 	+1 if VAL1 > VAL2, and
341 	+2 if VAL1 != VAL2
342 
343    This is similar to tree_int_cst_compare but supports pointer values
344    and values that cannot be compared at compile time.
345 
346    If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
347    true if the return value is only valid if we assume that signed
348    overflow is undefined.  */
349 
350 int
compare_values_warnv(tree val1,tree val2,bool * strict_overflow_p)351 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
352 {
353   if (val1 == val2)
354     return 0;
355 
356   /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
357      both integers.  */
358   gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
359 	      == POINTER_TYPE_P (TREE_TYPE (val2)));
360 
361   /* Convert the two values into the same type.  This is needed because
362      sizetype causes sign extension even for unsigned types.  */
363   if (!useless_type_conversion_p (TREE_TYPE (val1), TREE_TYPE (val2)))
364     val2 = fold_convert (TREE_TYPE (val1), val2);
365 
366   const bool overflow_undefined
367     = INTEGRAL_TYPE_P (TREE_TYPE (val1))
368       && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
369   tree inv1, inv2;
370   bool neg1, neg2;
371   tree sym1 = get_single_symbol (val1, &neg1, &inv1);
372   tree sym2 = get_single_symbol (val2, &neg2, &inv2);
373 
374   /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
375      accordingly.  If VAL1 and VAL2 don't use the same name, return -2.  */
376   if (sym1 && sym2)
377     {
378       /* Both values must use the same name with the same sign.  */
379       if (sym1 != sym2 || neg1 != neg2)
380 	return -2;
381 
382       /* [-]NAME + CST == [-]NAME + CST.  */
383       if (inv1 == inv2)
384 	return 0;
385 
386       /* If overflow is defined we cannot simplify more.  */
387       if (!overflow_undefined)
388 	return -2;
389 
390       if (strict_overflow_p != NULL
391 	  /* Symbolic range building sets the no-warning bit to declare
392 	     that overflow doesn't happen.  */
393 	  && (!inv1 || !warning_suppressed_p (val1, OPT_Woverflow))
394 	  && (!inv2 || !warning_suppressed_p (val2, OPT_Woverflow)))
395 	*strict_overflow_p = true;
396 
397       if (!inv1)
398 	inv1 = build_int_cst (TREE_TYPE (val1), 0);
399       if (!inv2)
400 	inv2 = build_int_cst (TREE_TYPE (val2), 0);
401 
402       return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
403 		      TYPE_SIGN (TREE_TYPE (val1)));
404     }
405 
406   const bool cst1 = is_gimple_min_invariant (val1);
407   const bool cst2 = is_gimple_min_invariant (val2);
408 
409   /* If one is of the form '[-]NAME + CST' and the other is constant, then
410      it might be possible to say something depending on the constants.  */
411   if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
412     {
413       if (!overflow_undefined)
414 	return -2;
415 
416       if (strict_overflow_p != NULL
417 	  /* Symbolic range building sets the no-warning bit to declare
418 	     that overflow doesn't happen.  */
419 	  && (!sym1 || !warning_suppressed_p (val1, OPT_Woverflow))
420 	  && (!sym2 || !warning_suppressed_p (val2, OPT_Woverflow)))
421 	*strict_overflow_p = true;
422 
423       const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
424       tree cst = cst1 ? val1 : val2;
425       tree inv = cst1 ? inv2 : inv1;
426 
427       /* Compute the difference between the constants.  If it overflows or
428 	 underflows, this means that we can trivially compare the NAME with
429 	 it and, consequently, the two values with each other.  */
430       wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
431       if (wi::cmp (0, wi::to_wide (inv), sgn)
432 	  != wi::cmp (diff, wi::to_wide (cst), sgn))
433 	{
434 	  const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
435 	  return cst1 ? res : -res;
436 	}
437 
438       return -2;
439     }
440 
441   /* We cannot say anything more for non-constants.  */
442   if (!cst1 || !cst2)
443     return -2;
444 
445   if (!POINTER_TYPE_P (TREE_TYPE (val1)))
446     {
447       /* We cannot compare overflowed values.  */
448       if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
449 	return -2;
450 
451       if (TREE_CODE (val1) == INTEGER_CST
452 	  && TREE_CODE (val2) == INTEGER_CST)
453 	return tree_int_cst_compare (val1, val2);
454 
455       if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
456 	{
457 	  if (known_eq (wi::to_poly_widest (val1),
458 			wi::to_poly_widest (val2)))
459 	    return 0;
460 	  if (known_lt (wi::to_poly_widest (val1),
461 			wi::to_poly_widest (val2)))
462 	    return -1;
463 	  if (known_gt (wi::to_poly_widest (val1),
464 			wi::to_poly_widest (val2)))
465 	    return 1;
466 	}
467 
468       return -2;
469     }
470   else
471     {
472       if (TREE_CODE (val1) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
473 	{
474 	  /* We cannot compare overflowed values.  */
475 	  if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
476 	    return -2;
477 
478 	  return tree_int_cst_compare (val1, val2);
479 	}
480 
481       /* First see if VAL1 and VAL2 are not the same.  */
482       if (operand_equal_p (val1, val2, 0))
483 	return 0;
484 
485       fold_defer_overflow_warnings ();
486 
487       /* If VAL1 is a lower address than VAL2, return -1.  */
488       tree t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val1, val2);
489       if (t && integer_onep (t))
490 	{
491 	  fold_undefer_and_ignore_overflow_warnings ();
492 	  return -1;
493 	}
494 
495       /* If VAL1 is a higher address than VAL2, return +1.  */
496       t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val2, val1);
497       if (t && integer_onep (t))
498 	{
499 	  fold_undefer_and_ignore_overflow_warnings ();
500 	  return 1;
501 	}
502 
503       /* If VAL1 is different than VAL2, return +2.  */
504       t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
505       fold_undefer_and_ignore_overflow_warnings ();
506       if (t && integer_onep (t))
507 	return 2;
508 
509       return -2;
510     }
511 }
512 
513 /* Compare values like compare_values_warnv.  */
514 
515 int
compare_values(tree val1,tree val2)516 compare_values (tree val1, tree val2)
517 {
518   bool sop;
519   return compare_values_warnv (val1, val2, &sop);
520 }
521 
522 /* If BOUND will include a symbolic bound, adjust it accordingly,
523    otherwise leave it as is.
524 
525    CODE is the original operation that combined the bounds (PLUS_EXPR
526    or MINUS_EXPR).
527 
528    TYPE is the type of the original operation.
529 
530    SYM_OPn is the symbolic for OPn if it has a symbolic.
531 
532    NEG_OPn is TRUE if the OPn was negated.  */
533 
534 static void
adjust_symbolic_bound(tree & bound,enum tree_code code,tree type,tree sym_op0,tree sym_op1,bool neg_op0,bool neg_op1)535 adjust_symbolic_bound (tree &bound, enum tree_code code, tree type,
536 		       tree sym_op0, tree sym_op1,
537 		       bool neg_op0, bool neg_op1)
538 {
539   bool minus_p = (code == MINUS_EXPR);
540   /* If the result bound is constant, we're done; otherwise, build the
541      symbolic lower bound.  */
542   if (sym_op0 == sym_op1)
543     ;
544   else if (sym_op0)
545     bound = build_symbolic_expr (type, sym_op0,
546 				 neg_op0, bound);
547   else if (sym_op1)
548     {
549       /* We may not negate if that might introduce
550 	 undefined overflow.  */
551       if (!minus_p
552 	  || neg_op1
553 	  || TYPE_OVERFLOW_WRAPS (type))
554 	bound = build_symbolic_expr (type, sym_op1,
555 				     neg_op1 ^ minus_p, bound);
556       else
557 	bound = NULL_TREE;
558     }
559 }
560 
561 /* Combine OP1 and OP1, which are two parts of a bound, into one wide
562    int bound according to CODE.  CODE is the operation combining the
563    bound (either a PLUS_EXPR or a MINUS_EXPR).
564 
565    TYPE is the type of the combine operation.
566 
567    WI is the wide int to store the result.
568 
569    OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
570    if over/underflow occurred.  */
571 
572 static void
combine_bound(enum tree_code code,wide_int & wi,wi::overflow_type & ovf,tree type,tree op0,tree op1)573 combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf,
574 	       tree type, tree op0, tree op1)
575 {
576   bool minus_p = (code == MINUS_EXPR);
577   const signop sgn = TYPE_SIGN (type);
578   const unsigned int prec = TYPE_PRECISION (type);
579 
580   /* Combine the bounds, if any.  */
581   if (op0 && op1)
582     {
583       if (minus_p)
584 	wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
585       else
586 	wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
587     }
588   else if (op0)
589     wi = wi::to_wide (op0);
590   else if (op1)
591     {
592       if (minus_p)
593 	wi = wi::neg (wi::to_wide (op1), &ovf);
594       else
595 	wi = wi::to_wide (op1);
596     }
597   else
598     wi = wi::shwi (0, prec);
599 }
600 
601 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and
602    put the result in VR.
603 
604    TYPE is the type of the range.
605 
606    MIN_OVF and MAX_OVF indicate what type of overflow, if any,
607    occurred while originally calculating WMIN or WMAX.  -1 indicates
608    underflow.  +1 indicates overflow.  0 indicates neither.  */
609 
610 static void
set_value_range_with_overflow(value_range_kind & kind,tree & min,tree & max,tree type,const wide_int & wmin,const wide_int & wmax,wi::overflow_type min_ovf,wi::overflow_type max_ovf)611 set_value_range_with_overflow (value_range_kind &kind, tree &min, tree &max,
612 			       tree type,
613 			       const wide_int &wmin, const wide_int &wmax,
614 			       wi::overflow_type min_ovf,
615 			       wi::overflow_type max_ovf)
616 {
617   const signop sgn = TYPE_SIGN (type);
618   const unsigned int prec = TYPE_PRECISION (type);
619 
620   /* For one bit precision if max < min, then the swapped
621      range covers all values.  */
622   if (prec == 1 && wi::lt_p (wmax, wmin, sgn))
623     {
624       kind = VR_VARYING;
625       return;
626     }
627 
628   if (TYPE_OVERFLOW_WRAPS (type))
629     {
630       /* If overflow wraps, truncate the values and adjust the
631 	 range kind and bounds appropriately.  */
632       wide_int tmin = wide_int::from (wmin, prec, sgn);
633       wide_int tmax = wide_int::from (wmax, prec, sgn);
634       if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
635 	{
636 	  /* If the limits are swapped, we wrapped around and cover
637 	     the entire range.  */
638 	  if (wi::gt_p (tmin, tmax, sgn))
639 	    kind = VR_VARYING;
640 	  else
641 	    {
642 	      kind = VR_RANGE;
643 	      /* No overflow or both overflow or underflow.  The
644 		 range kind stays VR_RANGE.  */
645 	      min = wide_int_to_tree (type, tmin);
646 	      max = wide_int_to_tree (type, tmax);
647 	    }
648 	  return;
649 	}
650       else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
651 	       || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
652 	{
653 	  /* Min underflow or max overflow.  The range kind
654 	     changes to VR_ANTI_RANGE.  */
655 	  bool covers = false;
656 	  wide_int tem = tmin;
657 	  tmin = tmax + 1;
658 	  if (wi::cmp (tmin, tmax, sgn) < 0)
659 	    covers = true;
660 	  tmax = tem - 1;
661 	  if (wi::cmp (tmax, tem, sgn) > 0)
662 	    covers = true;
663 	  /* If the anti-range would cover nothing, drop to varying.
664 	     Likewise if the anti-range bounds are outside of the
665 	     types values.  */
666 	  if (covers || wi::cmp (tmin, tmax, sgn) > 0)
667 	    {
668 	      kind = VR_VARYING;
669 	      return;
670 	    }
671 	  kind = VR_ANTI_RANGE;
672 	  min = wide_int_to_tree (type, tmin);
673 	  max = wide_int_to_tree (type, tmax);
674 	  return;
675 	}
676       else
677 	{
678 	  /* Other underflow and/or overflow, drop to VR_VARYING.  */
679 	  kind = VR_VARYING;
680 	  return;
681 	}
682     }
683   else
684     {
685       /* If overflow does not wrap, saturate to the types min/max
686 	 value.  */
687       wide_int type_min = wi::min_value (prec, sgn);
688       wide_int type_max = wi::max_value (prec, sgn);
689       kind = VR_RANGE;
690       if (min_ovf == wi::OVF_UNDERFLOW)
691 	min = wide_int_to_tree (type, type_min);
692       else if (min_ovf == wi::OVF_OVERFLOW)
693 	min = wide_int_to_tree (type, type_max);
694       else
695 	min = wide_int_to_tree (type, wmin);
696 
697       if (max_ovf == wi::OVF_UNDERFLOW)
698 	max = wide_int_to_tree (type, type_min);
699       else if (max_ovf == wi::OVF_OVERFLOW)
700 	max = wide_int_to_tree (type, type_max);
701       else
702 	max = wide_int_to_tree (type, wmax);
703     }
704 }
705 
706 /* Fold two value range's of a POINTER_PLUS_EXPR into VR.  */
707 
708 static void
extract_range_from_pointer_plus_expr(value_range * vr,enum tree_code code,tree expr_type,const value_range * vr0,const value_range * vr1)709 extract_range_from_pointer_plus_expr (value_range *vr,
710 				      enum tree_code code,
711 				      tree expr_type,
712 				      const value_range *vr0,
713 				      const value_range *vr1)
714 {
715   gcc_checking_assert (POINTER_TYPE_P (expr_type)
716 		       && code == POINTER_PLUS_EXPR);
717   /* For pointer types, we are really only interested in asserting
718      whether the expression evaluates to non-NULL.
719      With -fno-delete-null-pointer-checks we need to be more
720      conservative.  As some object might reside at address 0,
721      then some offset could be added to it and the same offset
722      subtracted again and the result would be NULL.
723      E.g.
724      static int a[12]; where &a[0] is NULL and
725      ptr = &a[6];
726      ptr -= 6;
727      ptr will be NULL here, even when there is POINTER_PLUS_EXPR
728      where the first range doesn't include zero and the second one
729      doesn't either.  As the second operand is sizetype (unsigned),
730      consider all ranges where the MSB could be set as possible
731      subtractions where the result might be NULL.  */
732   if ((!range_includes_zero_p (vr0)
733        || !range_includes_zero_p (vr1))
734       && !TYPE_OVERFLOW_WRAPS (expr_type)
735       && (flag_delete_null_pointer_checks
736 	  || (range_int_cst_p (vr1)
737 	      && !tree_int_cst_sign_bit (vr1->max ()))))
738     vr->set_nonzero (expr_type);
739   else if (vr0->zero_p () && vr1->zero_p ())
740     vr->set_zero (expr_type);
741   else
742     vr->set_varying (expr_type);
743 }
744 
745 /* Extract range information from a PLUS/MINUS_EXPR and store the
746    result in *VR.  */
747 
748 static void
extract_range_from_plus_minus_expr(value_range * vr,enum tree_code code,tree expr_type,const value_range * vr0_,const value_range * vr1_)749 extract_range_from_plus_minus_expr (value_range *vr,
750 				    enum tree_code code,
751 				    tree expr_type,
752 				    const value_range *vr0_,
753 				    const value_range *vr1_)
754 {
755   gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
756 
757   value_range vr0 = *vr0_, vr1 = *vr1_;
758   value_range vrtem0, vrtem1;
759 
760   /* Now canonicalize anti-ranges to ranges when they are not symbolic
761      and express ~[] op X as ([]' op X) U ([]'' op X).  */
762   if (vr0.kind () == VR_ANTI_RANGE
763       && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
764     {
765       extract_range_from_plus_minus_expr (vr, code, expr_type, &vrtem0, vr1_);
766       if (!vrtem1.undefined_p ())
767 	{
768 	  value_range vrres;
769 	  extract_range_from_plus_minus_expr (&vrres, code, expr_type,
770 					      &vrtem1, vr1_);
771 	  vr->union_ (&vrres);
772 	}
773       return;
774     }
775   /* Likewise for X op ~[].  */
776   if (vr1.kind () == VR_ANTI_RANGE
777       && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
778     {
779       extract_range_from_plus_minus_expr (vr, code, expr_type, vr0_, &vrtem0);
780       if (!vrtem1.undefined_p ())
781 	{
782 	  value_range vrres;
783 	  extract_range_from_plus_minus_expr (&vrres, code, expr_type,
784 					      vr0_, &vrtem1);
785 	  vr->union_ (&vrres);
786 	}
787       return;
788     }
789 
790   value_range_kind kind;
791   value_range_kind vr0_kind = vr0.kind (), vr1_kind = vr1.kind ();
792   tree vr0_min = vr0.min (), vr0_max = vr0.max ();
793   tree vr1_min = vr1.min (), vr1_max = vr1.max ();
794   tree min = NULL_TREE, max = NULL_TREE;
795 
796   /* This will normalize things such that calculating
797      [0,0] - VR_VARYING is not dropped to varying, but is
798      calculated as [MIN+1, MAX].  */
799   if (vr0.varying_p ())
800     {
801       vr0_kind = VR_RANGE;
802       vr0_min = vrp_val_min (expr_type);
803       vr0_max = vrp_val_max (expr_type);
804     }
805   if (vr1.varying_p ())
806     {
807       vr1_kind = VR_RANGE;
808       vr1_min = vrp_val_min (expr_type);
809       vr1_max = vrp_val_max (expr_type);
810     }
811 
812   const bool minus_p = (code == MINUS_EXPR);
813   tree min_op0 = vr0_min;
814   tree min_op1 = minus_p ? vr1_max : vr1_min;
815   tree max_op0 = vr0_max;
816   tree max_op1 = minus_p ? vr1_min : vr1_max;
817   tree sym_min_op0 = NULL_TREE;
818   tree sym_min_op1 = NULL_TREE;
819   tree sym_max_op0 = NULL_TREE;
820   tree sym_max_op1 = NULL_TREE;
821   bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
822 
823   neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false;
824 
825   /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
826      single-symbolic ranges, try to compute the precise resulting range,
827      but only if we know that this resulting range will also be constant
828      or single-symbolic.  */
829   if (vr0_kind == VR_RANGE && vr1_kind == VR_RANGE
830       && (TREE_CODE (min_op0) == INTEGER_CST
831 	  || (sym_min_op0
832 	      = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
833       && (TREE_CODE (min_op1) == INTEGER_CST
834 	  || (sym_min_op1
835 	      = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
836       && (!(sym_min_op0 && sym_min_op1)
837 	  || (sym_min_op0 == sym_min_op1
838 	      && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
839       && (TREE_CODE (max_op0) == INTEGER_CST
840 	  || (sym_max_op0
841 	      = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
842       && (TREE_CODE (max_op1) == INTEGER_CST
843 	  || (sym_max_op1
844 	      = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
845       && (!(sym_max_op0 && sym_max_op1)
846 	  || (sym_max_op0 == sym_max_op1
847 	      && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
848     {
849       wide_int wmin, wmax;
850       wi::overflow_type min_ovf = wi::OVF_NONE;
851       wi::overflow_type max_ovf = wi::OVF_NONE;
852 
853       /* Build the bounds.  */
854       combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1);
855       combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1);
856 
857       /* If the resulting range will be symbolic, we need to eliminate any
858 	 explicit or implicit overflow introduced in the above computation
859 	 because compare_values could make an incorrect use of it.  That's
860 	 why we require one of the ranges to be a singleton.  */
861       if ((sym_min_op0 != sym_min_op1 || sym_max_op0 != sym_max_op1)
862 	  && ((bool)min_ovf || (bool)max_ovf
863 	      || (min_op0 != max_op0 && min_op1 != max_op1)))
864 	{
865 	  vr->set_varying (expr_type);
866 	  return;
867 	}
868 
869       /* Adjust the range for possible overflow.  */
870       set_value_range_with_overflow (kind, min, max, expr_type,
871 				     wmin, wmax, min_ovf, max_ovf);
872       if (kind == VR_VARYING)
873 	{
874 	  vr->set_varying (expr_type);
875 	  return;
876 	}
877 
878       /* Build the symbolic bounds if needed.  */
879       adjust_symbolic_bound (min, code, expr_type,
880 			     sym_min_op0, sym_min_op1,
881 			     neg_min_op0, neg_min_op1);
882       adjust_symbolic_bound (max, code, expr_type,
883 			     sym_max_op0, sym_max_op1,
884 			     neg_max_op0, neg_max_op1);
885     }
886   else
887     {
888       /* For other cases, for example if we have a PLUS_EXPR with two
889 	 VR_ANTI_RANGEs, drop to VR_VARYING.  It would take more effort
890 	 to compute a precise range for such a case.
891 	 ???  General even mixed range kind operations can be expressed
892 	 by for example transforming ~[3, 5] + [1, 2] to range-only
893 	 operations and a union primitive:
894 	 [-INF, 2] + [1, 2]  U  [5, +INF] + [1, 2]
895 	 [-INF+1, 4]     U    [6, +INF(OVF)]
896 	 though usually the union is not exactly representable with
897 	 a single range or anti-range as the above is
898 	 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
899 	 but one could use a scheme similar to equivalences for this. */
900       vr->set_varying (expr_type);
901       return;
902     }
903 
904   /* If either MIN or MAX overflowed, then set the resulting range to
905      VARYING.  */
906   if (min == NULL_TREE
907       || TREE_OVERFLOW_P (min)
908       || max == NULL_TREE
909       || TREE_OVERFLOW_P (max))
910     {
911       vr->set_varying (expr_type);
912       return;
913     }
914 
915   int cmp = compare_values (min, max);
916   if (cmp == -2 || cmp == 1)
917     {
918       /* If the new range has its limits swapped around (MIN > MAX),
919 	 then the operation caused one of them to wrap around, mark
920 	 the new range VARYING.  */
921       vr->set_varying (expr_type);
922     }
923   else
924     vr->set (min, max, kind);
925 }
926 
927 /* Return the range-ops handler for CODE and EXPR_TYPE.  If no
928    suitable operator is found, return NULL and set VR to VARYING.  */
929 
930 static const range_operator *
get_range_op_handler(value_range * vr,enum tree_code code,tree expr_type)931 get_range_op_handler (value_range *vr,
932 		      enum tree_code code,
933 		      tree expr_type)
934 {
935   const range_operator *op = range_op_handler (code, expr_type);
936   if (!op)
937     vr->set_varying (expr_type);
938   return op;
939 }
940 
941 /* If the types passed are supported, return TRUE, otherwise set VR to
942    VARYING and return FALSE.  */
943 
944 static bool
supported_types_p(value_range * vr,tree type0,tree type1=NULL)945 supported_types_p (value_range *vr,
946 		   tree type0,
947 		   tree type1 = NULL)
948 {
949   if (!value_range::supports_type_p (type0)
950       || (type1 && !value_range::supports_type_p (type1)))
951     {
952       vr->set_varying (type0);
953       return false;
954     }
955   return true;
956 }
957 
958 /* If any of the ranges passed are defined, return TRUE, otherwise set
959    VR to UNDEFINED and return FALSE.  */
960 
961 static bool
defined_ranges_p(value_range * vr,const value_range * vr0,const value_range * vr1=NULL)962 defined_ranges_p (value_range *vr,
963 		  const value_range *vr0, const value_range *vr1 = NULL)
964 {
965   if (vr0->undefined_p () && (!vr1 || vr1->undefined_p ()))
966     {
967       vr->set_undefined ();
968       return false;
969     }
970   return true;
971 }
972 
973 static value_range
drop_undefines_to_varying(const value_range * vr,tree expr_type)974 drop_undefines_to_varying (const value_range *vr, tree expr_type)
975 {
976   if (vr->undefined_p ())
977     return value_range (expr_type);
978   else
979     return *vr;
980 }
981 
982 /* If any operand is symbolic, perform a binary operation on them and
983    return TRUE, otherwise return FALSE.  */
984 
985 static bool
range_fold_binary_symbolics_p(value_range * vr,tree_code code,tree expr_type,const value_range * vr0_,const value_range * vr1_)986 range_fold_binary_symbolics_p (value_range *vr,
987 			       tree_code code,
988 			       tree expr_type,
989 			       const value_range *vr0_,
990 			       const value_range *vr1_)
991 {
992   if (vr0_->symbolic_p () || vr1_->symbolic_p ())
993     {
994       value_range vr0 = drop_undefines_to_varying (vr0_, expr_type);
995       value_range vr1 = drop_undefines_to_varying (vr1_, expr_type);
996       if ((code == PLUS_EXPR || code == MINUS_EXPR))
997 	{
998 	  extract_range_from_plus_minus_expr (vr, code, expr_type,
999 					      &vr0, &vr1);
1000 	  return true;
1001 	}
1002       if (POINTER_TYPE_P (expr_type) && code == POINTER_PLUS_EXPR)
1003 	{
1004 	  extract_range_from_pointer_plus_expr (vr, code, expr_type,
1005 						&vr0, &vr1);
1006 	  return true;
1007 	}
1008       const range_operator *op = get_range_op_handler (vr, code, expr_type);
1009       vr0.normalize_symbolics ();
1010       vr1.normalize_symbolics ();
1011       return op->fold_range (*vr, expr_type, vr0, vr1);
1012     }
1013   return false;
1014 }
1015 
1016 /* If operand is symbolic, perform a unary operation on it and return
1017    TRUE, otherwise return FALSE.  */
1018 
1019 static bool
range_fold_unary_symbolics_p(value_range * vr,tree_code code,tree expr_type,const value_range * vr0)1020 range_fold_unary_symbolics_p (value_range *vr,
1021 			      tree_code code,
1022 			      tree expr_type,
1023 			      const value_range *vr0)
1024 {
1025   if (vr0->symbolic_p ())
1026     {
1027       if (code == NEGATE_EXPR)
1028 	{
1029 	  /* -X is simply 0 - X.  */
1030 	  value_range zero;
1031 	  zero.set_zero (vr0->type ());
1032 	  range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &zero, vr0);
1033 	  return true;
1034 	}
1035       if (code == BIT_NOT_EXPR)
1036 	{
1037 	  /* ~X is simply -1 - X.  */
1038 	  value_range minusone;
1039 	  minusone.set (build_int_cst (vr0->type (), -1));
1040 	  range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &minusone, vr0);
1041 	  return true;
1042 	}
1043       const range_operator *op = get_range_op_handler (vr, code, expr_type);
1044       value_range vr0_cst (*vr0);
1045       vr0_cst.normalize_symbolics ();
1046       return op->fold_range (*vr, expr_type, vr0_cst, value_range (expr_type));
1047     }
1048   return false;
1049 }
1050 
1051 /* Perform a binary operation on a pair of ranges.  */
1052 
1053 void
range_fold_binary_expr(value_range * vr,enum tree_code code,tree expr_type,const value_range * vr0_,const value_range * vr1_)1054 range_fold_binary_expr (value_range *vr,
1055 			enum tree_code code,
1056 			tree expr_type,
1057 			const value_range *vr0_,
1058 			const value_range *vr1_)
1059 {
1060   if (!supported_types_p (vr, expr_type)
1061       || !defined_ranges_p (vr, vr0_, vr1_))
1062     return;
1063   const range_operator *op = get_range_op_handler (vr, code, expr_type);
1064   if (!op)
1065     return;
1066 
1067   if (range_fold_binary_symbolics_p (vr, code, expr_type, vr0_, vr1_))
1068     return;
1069 
1070   value_range vr0 (*vr0_);
1071   value_range vr1 (*vr1_);
1072   if (vr0.undefined_p ())
1073     vr0.set_varying (expr_type);
1074   if (vr1.undefined_p ())
1075     vr1.set_varying (expr_type);
1076   vr0.normalize_addresses ();
1077   vr1.normalize_addresses ();
1078   op->fold_range (*vr, expr_type, vr0, vr1);
1079 }
1080 
1081 /* Perform a unary operation on a range.  */
1082 
1083 void
range_fold_unary_expr(value_range * vr,enum tree_code code,tree expr_type,const value_range * vr0,tree vr0_type)1084 range_fold_unary_expr (value_range *vr,
1085 		       enum tree_code code, tree expr_type,
1086 		       const value_range *vr0,
1087 		       tree vr0_type)
1088 {
1089   if (!supported_types_p (vr, expr_type, vr0_type)
1090       || !defined_ranges_p (vr, vr0))
1091     return;
1092   const range_operator *op = get_range_op_handler (vr, code, expr_type);
1093   if (!op)
1094     return;
1095 
1096   if (range_fold_unary_symbolics_p (vr, code, expr_type, vr0))
1097     return;
1098 
1099   value_range vr0_cst (*vr0);
1100   vr0_cst.normalize_addresses ();
1101   op->fold_range (*vr, expr_type, vr0_cst, value_range (expr_type));
1102 }
1103 
1104 /* If the range of values taken by OP can be inferred after STMT executes,
1105    return the comparison code (COMP_CODE_P) and value (VAL_P) that
1106    describes the inferred range.  Return true if a range could be
1107    inferred.  */
1108 
1109 bool
infer_value_range(gimple * stmt,tree op,tree_code * comp_code_p,tree * val_p)1110 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
1111 {
1112   *val_p = NULL_TREE;
1113   *comp_code_p = ERROR_MARK;
1114 
1115   /* Do not attempt to infer anything in names that flow through
1116      abnormal edges.  */
1117   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
1118     return false;
1119 
1120   /* If STMT is the last statement of a basic block with no normal
1121      successors, there is no point inferring anything about any of its
1122      operands.  We would not be able to find a proper insertion point
1123      for the assertion, anyway.  */
1124   if (stmt_ends_bb_p (stmt))
1125     {
1126       edge_iterator ei;
1127       edge e;
1128 
1129       FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
1130 	if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
1131 	  break;
1132       if (e == NULL)
1133 	return false;
1134     }
1135 
1136   if (infer_nonnull_range (stmt, op))
1137     {
1138       *val_p = build_int_cst (TREE_TYPE (op), 0);
1139       *comp_code_p = NE_EXPR;
1140       return true;
1141     }
1142 
1143   return false;
1144 }
1145 
1146 /* Dump assert_info structure.  */
1147 
1148 void
dump_assert_info(FILE * file,const assert_info & assert)1149 dump_assert_info (FILE *file, const assert_info &assert)
1150 {
1151   fprintf (file, "Assert for: ");
1152   print_generic_expr (file, assert.name);
1153   fprintf (file, "\n\tPREDICATE: expr=[");
1154   print_generic_expr (file, assert.expr);
1155   fprintf (file, "] %s ", get_tree_code_name (assert.comp_code));
1156   fprintf (file, "val=[");
1157   print_generic_expr (file, assert.val);
1158   fprintf (file, "]\n\n");
1159 }
1160 
1161 DEBUG_FUNCTION void
debug(const assert_info & assert)1162 debug (const assert_info &assert)
1163 {
1164   dump_assert_info (stderr, assert);
1165 }
1166 
1167 /* Dump a vector of assert_info's.  */
1168 
1169 void
dump_asserts_info(FILE * file,const vec<assert_info> & asserts)1170 dump_asserts_info (FILE *file, const vec<assert_info> &asserts)
1171 {
1172   for (unsigned i = 0; i < asserts.length (); ++i)
1173     {
1174       dump_assert_info (file, asserts[i]);
1175       fprintf (file, "\n");
1176     }
1177 }
1178 
1179 DEBUG_FUNCTION void
debug(const vec<assert_info> & asserts)1180 debug (const vec<assert_info> &asserts)
1181 {
1182   dump_asserts_info (stderr, asserts);
1183 }
1184 
1185 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS.  */
1186 
1187 static void
add_assert_info(vec<assert_info> & asserts,tree name,tree expr,enum tree_code comp_code,tree val)1188 add_assert_info (vec<assert_info> &asserts,
1189 		 tree name, tree expr, enum tree_code comp_code, tree val)
1190 {
1191   assert_info info;
1192   info.comp_code = comp_code;
1193   info.name = name;
1194   if (TREE_OVERFLOW_P (val))
1195     val = drop_tree_overflow (val);
1196   info.val = val;
1197   info.expr = expr;
1198   asserts.safe_push (info);
1199   if (dump_enabled_p ())
1200     dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
1201 		 "Adding assert for %T from %T %s %T\n",
1202 		 name, expr, op_symbol_code (comp_code), val);
1203 }
1204 
1205 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
1206    Extract a suitable test code and value and store them into *CODE_P and
1207    *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
1208 
1209    If no extraction was possible, return FALSE, otherwise return TRUE.
1210 
1211    If INVERT is true, then we invert the result stored into *CODE_P.  */
1212 
1213 static bool
extract_code_and_val_from_cond_with_ops(tree name,enum tree_code cond_code,tree cond_op0,tree cond_op1,bool invert,enum tree_code * code_p,tree * val_p)1214 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
1215 					 tree cond_op0, tree cond_op1,
1216 					 bool invert, enum tree_code *code_p,
1217 					 tree *val_p)
1218 {
1219   enum tree_code comp_code;
1220   tree val;
1221 
1222   /* Otherwise, we have a comparison of the form NAME COMP VAL
1223      or VAL COMP NAME.  */
1224   if (name == cond_op1)
1225     {
1226       /* If the predicate is of the form VAL COMP NAME, flip
1227 	 COMP around because we need to register NAME as the
1228 	 first operand in the predicate.  */
1229       comp_code = swap_tree_comparison (cond_code);
1230       val = cond_op0;
1231     }
1232   else if (name == cond_op0)
1233     {
1234       /* The comparison is of the form NAME COMP VAL, so the
1235 	 comparison code remains unchanged.  */
1236       comp_code = cond_code;
1237       val = cond_op1;
1238     }
1239   else
1240     gcc_unreachable ();
1241 
1242   /* Invert the comparison code as necessary.  */
1243   if (invert)
1244     comp_code = invert_tree_comparison (comp_code, 0);
1245 
1246   /* VRP only handles integral and pointer types.  */
1247   if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
1248       && ! POINTER_TYPE_P (TREE_TYPE (val)))
1249     return false;
1250 
1251   /* Do not register always-false predicates.
1252      FIXME:  this works around a limitation in fold() when dealing with
1253      enumerations.  Given 'enum { N1, N2 } x;', fold will not
1254      fold 'if (x > N2)' to 'if (0)'.  */
1255   if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
1256       && INTEGRAL_TYPE_P (TREE_TYPE (val)))
1257     {
1258       tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
1259       tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
1260 
1261       if (comp_code == GT_EXPR
1262 	  && (!max
1263 	      || compare_values (val, max) == 0))
1264 	return false;
1265 
1266       if (comp_code == LT_EXPR
1267 	  && (!min
1268 	      || compare_values (val, min) == 0))
1269 	return false;
1270     }
1271   *code_p = comp_code;
1272   *val_p = val;
1273   return true;
1274 }
1275 
1276 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
1277    (otherwise return VAL).  VAL and MASK must be zero-extended for
1278    precision PREC.  If SGNBIT is non-zero, first xor VAL with SGNBIT
1279    (to transform signed values into unsigned) and at the end xor
1280    SGNBIT back.  */
1281 
1282 wide_int
masked_increment(const wide_int & val_in,const wide_int & mask,const wide_int & sgnbit,unsigned int prec)1283 masked_increment (const wide_int &val_in, const wide_int &mask,
1284 		  const wide_int &sgnbit, unsigned int prec)
1285 {
1286   wide_int bit = wi::one (prec), res;
1287   unsigned int i;
1288 
1289   wide_int val = val_in ^ sgnbit;
1290   for (i = 0; i < prec; i++, bit += bit)
1291     {
1292       res = mask;
1293       if ((res & bit) == 0)
1294 	continue;
1295       res = bit - 1;
1296       res = wi::bit_and_not (val + bit, res);
1297       res &= mask;
1298       if (wi::gtu_p (res, val))
1299 	return res ^ sgnbit;
1300     }
1301   return val ^ sgnbit;
1302 }
1303 
1304 /* Helper for overflow_comparison_p
1305 
1306    OP0 CODE OP1 is a comparison.  Examine the comparison and potentially
1307    OP1's defining statement to see if it ultimately has the form
1308    OP0 CODE (OP0 PLUS INTEGER_CST)
1309 
1310    If so, return TRUE indicating this is an overflow test and store into
1311    *NEW_CST an updated constant that can be used in a narrowed range test.
1312 
1313    REVERSED indicates if the comparison was originally:
1314 
1315    OP1 CODE' OP0.
1316 
1317    This affects how we build the updated constant.  */
1318 
1319 static bool
overflow_comparison_p_1(enum tree_code code,tree op0,tree op1,bool follow_assert_exprs,bool reversed,tree * new_cst)1320 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
1321 		         bool follow_assert_exprs, bool reversed, tree *new_cst)
1322 {
1323   /* See if this is a relational operation between two SSA_NAMES with
1324      unsigned, overflow wrapping values.  If so, check it more deeply.  */
1325   if ((code == LT_EXPR || code == LE_EXPR
1326        || code == GE_EXPR || code == GT_EXPR)
1327       && TREE_CODE (op0) == SSA_NAME
1328       && TREE_CODE (op1) == SSA_NAME
1329       && INTEGRAL_TYPE_P (TREE_TYPE (op0))
1330       && TYPE_UNSIGNED (TREE_TYPE (op0))
1331       && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
1332     {
1333       gimple *op1_def = SSA_NAME_DEF_STMT (op1);
1334 
1335       /* If requested, follow any ASSERT_EXPRs backwards for OP1.  */
1336       if (follow_assert_exprs)
1337 	{
1338 	  while (gimple_assign_single_p (op1_def)
1339 		 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
1340 	    {
1341 	      op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
1342 	      if (TREE_CODE (op1) != SSA_NAME)
1343 		break;
1344 	      op1_def = SSA_NAME_DEF_STMT (op1);
1345 	    }
1346 	}
1347 
1348       /* Now look at the defining statement of OP1 to see if it adds
1349 	 or subtracts a nonzero constant from another operand.  */
1350       if (op1_def
1351 	  && is_gimple_assign (op1_def)
1352 	  && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
1353 	  && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
1354 	  && !integer_zerop (gimple_assign_rhs2 (op1_def)))
1355 	{
1356 	  tree target = gimple_assign_rhs1 (op1_def);
1357 
1358 	  /* If requested, follow ASSERT_EXPRs backwards for op0 looking
1359 	     for one where TARGET appears on the RHS.  */
1360 	  if (follow_assert_exprs)
1361 	    {
1362 	      /* Now see if that "other operand" is op0, following the chain
1363 		 of ASSERT_EXPRs if necessary.  */
1364 	      gimple *op0_def = SSA_NAME_DEF_STMT (op0);
1365 	      while (op0 != target
1366 		     && gimple_assign_single_p (op0_def)
1367 		     && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
1368 		{
1369 		  op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
1370 		  if (TREE_CODE (op0) != SSA_NAME)
1371 		    break;
1372 		  op0_def = SSA_NAME_DEF_STMT (op0);
1373 		}
1374 	    }
1375 
1376 	  /* If we did not find our target SSA_NAME, then this is not
1377 	     an overflow test.  */
1378 	  if (op0 != target)
1379 	    return false;
1380 
1381 	  tree type = TREE_TYPE (op0);
1382 	  wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
1383 	  tree inc = gimple_assign_rhs2 (op1_def);
1384 	  if (reversed)
1385 	    *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
1386 	  else
1387 	    *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
1388 	  return true;
1389 	}
1390     }
1391   return false;
1392 }
1393 
1394 /* OP0 CODE OP1 is a comparison.  Examine the comparison and potentially
1395    OP1's defining statement to see if it ultimately has the form
1396    OP0 CODE (OP0 PLUS INTEGER_CST)
1397 
1398    If so, return TRUE indicating this is an overflow test and store into
1399    *NEW_CST an updated constant that can be used in a narrowed range test.
1400 
1401    These statements are left as-is in the IL to facilitate discovery of
1402    {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline.  But
1403    the alternate range representation is often useful within VRP.  */
1404 
1405 bool
overflow_comparison_p(tree_code code,tree name,tree val,bool use_equiv_p,tree * new_cst)1406 overflow_comparison_p (tree_code code, tree name, tree val,
1407 		       bool use_equiv_p, tree *new_cst)
1408 {
1409   if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
1410     return true;
1411   return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
1412 				  use_equiv_p, true, new_cst);
1413 }
1414 
1415 
1416 /* Try to register an edge assertion for SSA name NAME on edge E for
1417    the condition COND contributing to the conditional jump pointed to by BSI.
1418    Invert the condition COND if INVERT is true.  */
1419 
1420 static void
register_edge_assert_for_2(tree name,edge e,enum tree_code cond_code,tree cond_op0,tree cond_op1,bool invert,vec<assert_info> & asserts)1421 register_edge_assert_for_2 (tree name, edge e,
1422 			    enum tree_code cond_code,
1423 			    tree cond_op0, tree cond_op1, bool invert,
1424 			    vec<assert_info> &asserts)
1425 {
1426   tree val;
1427   enum tree_code comp_code;
1428 
1429   if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
1430 						cond_op0,
1431 						cond_op1,
1432 						invert, &comp_code, &val))
1433     return;
1434 
1435   /* Queue the assert.  */
1436   tree x;
1437   if (overflow_comparison_p (comp_code, name, val, false, &x))
1438     {
1439       enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
1440 				 ? GT_EXPR : LE_EXPR);
1441       add_assert_info (asserts, name, name, new_code, x);
1442     }
1443   add_assert_info (asserts, name, name, comp_code, val);
1444 
1445   /* In the case of NAME <= CST and NAME being defined as
1446      NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
1447      and NAME2 <= CST - CST2.  We can do the same for NAME > CST.
1448      This catches range and anti-range tests.  */
1449   if ((comp_code == LE_EXPR
1450        || comp_code == GT_EXPR)
1451       && TREE_CODE (val) == INTEGER_CST
1452       && TYPE_UNSIGNED (TREE_TYPE (val)))
1453     {
1454       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
1455       tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
1456 
1457       /* Extract CST2 from the (optional) addition.  */
1458       if (is_gimple_assign (def_stmt)
1459 	  && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
1460 	{
1461 	  name2 = gimple_assign_rhs1 (def_stmt);
1462 	  cst2 = gimple_assign_rhs2 (def_stmt);
1463 	  if (TREE_CODE (name2) == SSA_NAME
1464 	      && TREE_CODE (cst2) == INTEGER_CST)
1465 	    def_stmt = SSA_NAME_DEF_STMT (name2);
1466 	}
1467 
1468       /* Extract NAME2 from the (optional) sign-changing cast.  */
1469       if (gassign *ass = dyn_cast <gassign *> (def_stmt))
1470 	{
1471 	  if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (ass))
1472 	      && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (ass)))
1473 	      && (TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (ass)))
1474 		  == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (ass)))))
1475 	    name3 = gimple_assign_rhs1 (ass);
1476 	}
1477 
1478       /* If name3 is used later, create an ASSERT_EXPR for it.  */
1479       if (name3 != NULL_TREE
1480       	  && TREE_CODE (name3) == SSA_NAME
1481 	  && (cst2 == NULL_TREE
1482 	      || TREE_CODE (cst2) == INTEGER_CST)
1483 	  && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
1484 	{
1485 	  tree tmp;
1486 
1487 	  /* Build an expression for the range test.  */
1488 	  tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
1489 	  if (cst2 != NULL_TREE)
1490 	    tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
1491 	  add_assert_info (asserts, name3, tmp, comp_code, val);
1492 	}
1493 
1494       /* If name2 is used later, create an ASSERT_EXPR for it.  */
1495       if (name2 != NULL_TREE
1496       	  && TREE_CODE (name2) == SSA_NAME
1497 	  && TREE_CODE (cst2) == INTEGER_CST
1498 	  && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
1499 	{
1500 	  tree tmp;
1501 
1502 	  /* Build an expression for the range test.  */
1503 	  tmp = name2;
1504 	  if (TREE_TYPE (name) != TREE_TYPE (name2))
1505 	    tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
1506 	  if (cst2 != NULL_TREE)
1507 	    tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
1508 	  add_assert_info (asserts, name2, tmp, comp_code, val);
1509 	}
1510     }
1511 
1512   /* In the case of post-in/decrement tests like if (i++) ... and uses
1513      of the in/decremented value on the edge the extra name we want to
1514      assert for is not on the def chain of the name compared.  Instead
1515      it is in the set of use stmts.
1516      Similar cases happen for conversions that were simplified through
1517      fold_{sign_changed,widened}_comparison.  */
1518   if ((comp_code == NE_EXPR
1519        || comp_code == EQ_EXPR)
1520       && TREE_CODE (val) == INTEGER_CST)
1521     {
1522       imm_use_iterator ui;
1523       gimple *use_stmt;
1524       FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
1525 	{
1526 	  if (!is_gimple_assign (use_stmt))
1527 	    continue;
1528 
1529 	  /* Cut off to use-stmts that are dominating the predecessor.  */
1530 	  if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
1531 	    continue;
1532 
1533 	  tree name2 = gimple_assign_lhs (use_stmt);
1534 	  if (TREE_CODE (name2) != SSA_NAME)
1535 	    continue;
1536 
1537 	  enum tree_code code = gimple_assign_rhs_code (use_stmt);
1538 	  tree cst;
1539 	  if (code == PLUS_EXPR
1540 	      || code == MINUS_EXPR)
1541 	    {
1542 	      cst = gimple_assign_rhs2 (use_stmt);
1543 	      if (TREE_CODE (cst) != INTEGER_CST)
1544 		continue;
1545 	      cst = int_const_binop (code, val, cst);
1546 	    }
1547 	  else if (CONVERT_EXPR_CODE_P (code))
1548 	    {
1549 	      /* For truncating conversions we cannot record
1550 		 an inequality.  */
1551 	      if (comp_code == NE_EXPR
1552 		  && (TYPE_PRECISION (TREE_TYPE (name2))
1553 		      < TYPE_PRECISION (TREE_TYPE (name))))
1554 		continue;
1555 	      cst = fold_convert (TREE_TYPE (name2), val);
1556 	    }
1557 	  else
1558 	    continue;
1559 
1560 	  if (TREE_OVERFLOW_P (cst))
1561 	    cst = drop_tree_overflow (cst);
1562 	  add_assert_info (asserts, name2, name2, comp_code, cst);
1563 	}
1564     }
1565 
1566   if (TREE_CODE_CLASS (comp_code) == tcc_comparison
1567       && TREE_CODE (val) == INTEGER_CST)
1568     {
1569       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
1570       tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
1571       tree val2 = NULL_TREE;
1572       unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
1573       wide_int mask = wi::zero (prec);
1574       unsigned int nprec = prec;
1575       enum tree_code rhs_code = ERROR_MARK;
1576 
1577       if (is_gimple_assign (def_stmt))
1578 	rhs_code = gimple_assign_rhs_code (def_stmt);
1579 
1580       /* In the case of NAME != CST1 where NAME = A +- CST2 we can
1581          assert that A != CST1 -+ CST2.  */
1582       if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
1583 	  && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
1584 	{
1585 	  tree op0 = gimple_assign_rhs1 (def_stmt);
1586 	  tree op1 = gimple_assign_rhs2 (def_stmt);
1587 	  if (TREE_CODE (op0) == SSA_NAME
1588 	      && TREE_CODE (op1) == INTEGER_CST)
1589 	    {
1590 	      enum tree_code reverse_op = (rhs_code == PLUS_EXPR
1591 					   ? MINUS_EXPR : PLUS_EXPR);
1592 	      op1 = int_const_binop (reverse_op, val, op1);
1593 	      if (TREE_OVERFLOW (op1))
1594 		op1 = drop_tree_overflow (op1);
1595 	      add_assert_info (asserts, op0, op0, comp_code, op1);
1596 	    }
1597 	}
1598 
1599       /* Add asserts for NAME cmp CST and NAME being defined
1600 	 as NAME = (int) NAME2.  */
1601       if (!TYPE_UNSIGNED (TREE_TYPE (val))
1602 	  && (comp_code == LE_EXPR || comp_code == LT_EXPR
1603 	      || comp_code == GT_EXPR || comp_code == GE_EXPR)
1604 	  && gimple_assign_cast_p (def_stmt))
1605 	{
1606 	  name2 = gimple_assign_rhs1 (def_stmt);
1607 	  if (CONVERT_EXPR_CODE_P (rhs_code)
1608 	      && TREE_CODE (name2) == SSA_NAME
1609 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
1610 	      && TYPE_UNSIGNED (TREE_TYPE (name2))
1611 	      && prec == TYPE_PRECISION (TREE_TYPE (name2))
1612 	      && (comp_code == LE_EXPR || comp_code == GT_EXPR
1613 		  || !tree_int_cst_equal (val,
1614 					  TYPE_MIN_VALUE (TREE_TYPE (val)))))
1615 	    {
1616 	      tree tmp, cst;
1617 	      enum tree_code new_comp_code = comp_code;
1618 
1619 	      cst = fold_convert (TREE_TYPE (name2),
1620 				  TYPE_MIN_VALUE (TREE_TYPE (val)));
1621 	      /* Build an expression for the range test.  */
1622 	      tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
1623 	      cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
1624 				 fold_convert (TREE_TYPE (name2), val));
1625 	      if (comp_code == LT_EXPR || comp_code == GE_EXPR)
1626 		{
1627 		  new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
1628 		  cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
1629 				     build_int_cst (TREE_TYPE (name2), 1));
1630 		}
1631 	      add_assert_info (asserts, name2, tmp, new_comp_code, cst);
1632 	    }
1633 	}
1634 
1635       /* Add asserts for NAME cmp CST and NAME being defined as
1636 	 NAME = NAME2 >> CST2.
1637 
1638 	 Extract CST2 from the right shift.  */
1639       if (rhs_code == RSHIFT_EXPR)
1640 	{
1641 	  name2 = gimple_assign_rhs1 (def_stmt);
1642 	  cst2 = gimple_assign_rhs2 (def_stmt);
1643 	  if (TREE_CODE (name2) == SSA_NAME
1644 	      && tree_fits_uhwi_p (cst2)
1645 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
1646 	      && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
1647 	      && type_has_mode_precision_p (TREE_TYPE (val)))
1648 	    {
1649 	      mask = wi::mask (tree_to_uhwi (cst2), false, prec);
1650 	      val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
1651 	    }
1652 	}
1653       if (val2 != NULL_TREE
1654 	  && TREE_CODE (val2) == INTEGER_CST
1655 	  && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
1656 					    TREE_TYPE (val),
1657 					    val2, cst2), val))
1658 	{
1659 	  enum tree_code new_comp_code = comp_code;
1660 	  tree tmp, new_val;
1661 
1662 	  tmp = name2;
1663 	  if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
1664 	    {
1665 	      if (!TYPE_UNSIGNED (TREE_TYPE (val)))
1666 		{
1667 		  tree type = build_nonstandard_integer_type (prec, 1);
1668 		  tmp = build1 (NOP_EXPR, type, name2);
1669 		  val2 = fold_convert (type, val2);
1670 		}
1671 	      tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
1672 	      new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
1673 	      new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
1674 	    }
1675 	  else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
1676 	    {
1677 	      wide_int minval
1678 		= wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
1679 	      new_val = val2;
1680 	      if (minval == wi::to_wide (new_val))
1681 		new_val = NULL_TREE;
1682 	    }
1683 	  else
1684 	    {
1685 	      wide_int maxval
1686 		= wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
1687 	      mask |= wi::to_wide (val2);
1688 	      if (wi::eq_p (mask, maxval))
1689 		new_val = NULL_TREE;
1690 	      else
1691 		new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
1692 	    }
1693 
1694 	  if (new_val)
1695 	    add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
1696 	}
1697 
1698       /* If we have a conversion that doesn't change the value of the source
1699          simply register the same assert for it.  */
1700       if (CONVERT_EXPR_CODE_P (rhs_code))
1701 	{
1702 	  value_range vr;
1703 	  tree rhs1 = gimple_assign_rhs1 (def_stmt);
1704 	  if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1705 	      && TREE_CODE (rhs1) == SSA_NAME
1706 	      /* Make sure the relation preserves the upper/lower boundary of
1707 	         the range conservatively.  */
1708 	      && (comp_code == NE_EXPR
1709 		  || comp_code == EQ_EXPR
1710 		  || (TYPE_SIGN (TREE_TYPE (name))
1711 		      == TYPE_SIGN (TREE_TYPE (rhs1)))
1712 		  || ((comp_code == LE_EXPR
1713 		       || comp_code == LT_EXPR)
1714 		      && !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
1715 		  || ((comp_code == GE_EXPR
1716 		       || comp_code == GT_EXPR)
1717 		      && TYPE_UNSIGNED (TREE_TYPE (rhs1))))
1718 	      /* And the conversion does not alter the value we compare
1719 	         against and all values in rhs1 can be represented in
1720 		 the converted to type.  */
1721 	      && int_fits_type_p (val, TREE_TYPE (rhs1))
1722 	      && ((TYPE_PRECISION (TREE_TYPE (name))
1723 		   > TYPE_PRECISION (TREE_TYPE (rhs1)))
1724 		  || ((get_range_query (cfun)->range_of_expr (vr, rhs1)
1725 		       && vr.kind () == VR_RANGE)
1726 		      && wi::fits_to_tree_p
1727 			   (widest_int::from (vr.lower_bound (),
1728 					      TYPE_SIGN (TREE_TYPE (rhs1))),
1729 			    TREE_TYPE (name))
1730 		      && wi::fits_to_tree_p
1731 			   (widest_int::from (vr.upper_bound (),
1732 					      TYPE_SIGN (TREE_TYPE (rhs1))),
1733 			    TREE_TYPE (name)))))
1734 	    add_assert_info (asserts, rhs1, rhs1,
1735 		 	     comp_code, fold_convert (TREE_TYPE (rhs1), val));
1736 	}
1737 
1738       /* Add asserts for NAME cmp CST and NAME being defined as
1739 	 NAME = NAME2 & CST2.
1740 
1741 	 Extract CST2 from the and.
1742 
1743 	 Also handle
1744 	 NAME = (unsigned) NAME2;
1745 	 casts where NAME's type is unsigned and has smaller precision
1746 	 than NAME2's type as if it was NAME = NAME2 & MASK.  */
1747       names[0] = NULL_TREE;
1748       names[1] = NULL_TREE;
1749       cst2 = NULL_TREE;
1750       if (rhs_code == BIT_AND_EXPR
1751 	  || (CONVERT_EXPR_CODE_P (rhs_code)
1752 	      && INTEGRAL_TYPE_P (TREE_TYPE (val))
1753 	      && TYPE_UNSIGNED (TREE_TYPE (val))
1754 	      && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
1755 		 > prec))
1756 	{
1757 	  name2 = gimple_assign_rhs1 (def_stmt);
1758 	  if (rhs_code == BIT_AND_EXPR)
1759 	    cst2 = gimple_assign_rhs2 (def_stmt);
1760 	  else
1761 	    {
1762 	      cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
1763 	      nprec = TYPE_PRECISION (TREE_TYPE (name2));
1764 	    }
1765 	  if (TREE_CODE (name2) == SSA_NAME
1766 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
1767 	      && TREE_CODE (cst2) == INTEGER_CST
1768 	      && !integer_zerop (cst2)
1769 	      && (nprec > 1
1770 		  || TYPE_UNSIGNED (TREE_TYPE (val))))
1771 	    {
1772 	      gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
1773 	      if (gimple_assign_cast_p (def_stmt2))
1774 		{
1775 		  names[1] = gimple_assign_rhs1 (def_stmt2);
1776 		  if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
1777 		      || TREE_CODE (names[1]) != SSA_NAME
1778 		      || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
1779 		      || (TYPE_PRECISION (TREE_TYPE (name2))
1780 			  != TYPE_PRECISION (TREE_TYPE (names[1]))))
1781 		    names[1] = NULL_TREE;
1782 		}
1783 	      names[0] = name2;
1784 	    }
1785 	}
1786       if (names[0] || names[1])
1787 	{
1788 	  wide_int minv, maxv, valv, cst2v;
1789 	  wide_int tem, sgnbit;
1790 	  bool valid_p = false, valn, cst2n;
1791 	  enum tree_code ccode = comp_code;
1792 
1793 	  valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
1794 	  cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
1795 	  valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
1796 	  cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
1797 	  /* If CST2 doesn't have most significant bit set,
1798 	     but VAL is negative, we have comparison like
1799 	     if ((x & 0x123) > -4) (always true).  Just give up.  */
1800 	  if (!cst2n && valn)
1801 	    ccode = ERROR_MARK;
1802 	  if (cst2n)
1803 	    sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
1804 	  else
1805 	    sgnbit = wi::zero (nprec);
1806 	  minv = valv & cst2v;
1807 	  switch (ccode)
1808 	    {
1809 	    case EQ_EXPR:
1810 	      /* Minimum unsigned value for equality is VAL & CST2
1811 		 (should be equal to VAL, otherwise we probably should
1812 		 have folded the comparison into false) and
1813 		 maximum unsigned value is VAL | ~CST2.  */
1814 	      maxv = valv | ~cst2v;
1815 	      valid_p = true;
1816 	      break;
1817 
1818 	    case NE_EXPR:
1819 	      tem = valv | ~cst2v;
1820 	      /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U.  */
1821 	      if (valv == 0)
1822 		{
1823 		  cst2n = false;
1824 		  sgnbit = wi::zero (nprec);
1825 		  goto gt_expr;
1826 		}
1827 	      /* If (VAL | ~CST2) is all ones, handle it as
1828 		 (X & CST2) < VAL.  */
1829 	      if (tem == -1)
1830 		{
1831 		  cst2n = false;
1832 		  valn = false;
1833 		  sgnbit = wi::zero (nprec);
1834 		  goto lt_expr;
1835 		}
1836 	      if (!cst2n && wi::neg_p (cst2v))
1837 		sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
1838 	      if (sgnbit != 0)
1839 		{
1840 		  if (valv == sgnbit)
1841 		    {
1842 		      cst2n = true;
1843 		      valn = true;
1844 		      goto gt_expr;
1845 		    }
1846 		  if (tem == wi::mask (nprec - 1, false, nprec))
1847 		    {
1848 		      cst2n = true;
1849 		      goto lt_expr;
1850 		    }
1851 		  if (!cst2n)
1852 		    sgnbit = wi::zero (nprec);
1853 		}
1854 	      break;
1855 
1856 	    case GE_EXPR:
1857 	      /* Minimum unsigned value for >= if (VAL & CST2) == VAL
1858 		 is VAL and maximum unsigned value is ~0.  For signed
1859 		 comparison, if CST2 doesn't have most significant bit
1860 		 set, handle it similarly.  If CST2 has MSB set,
1861 		 the minimum is the same, and maximum is ~0U/2.  */
1862 	      if (minv != valv)
1863 		{
1864 		  /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
1865 		     VAL.  */
1866 		  minv = masked_increment (valv, cst2v, sgnbit, nprec);
1867 		  if (minv == valv)
1868 		    break;
1869 		}
1870 	      maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
1871 	      valid_p = true;
1872 	      break;
1873 
1874 	    case GT_EXPR:
1875 	    gt_expr:
1876 	      /* Find out smallest MINV where MINV > VAL
1877 		 && (MINV & CST2) == MINV, if any.  If VAL is signed and
1878 		 CST2 has MSB set, compute it biased by 1 << (nprec - 1).  */
1879 	      minv = masked_increment (valv, cst2v, sgnbit, nprec);
1880 	      if (minv == valv)
1881 		break;
1882 	      maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
1883 	      valid_p = true;
1884 	      break;
1885 
1886 	    case LE_EXPR:
1887 	      /* Minimum unsigned value for <= is 0 and maximum
1888 		 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
1889 		 Otherwise, find smallest VAL2 where VAL2 > VAL
1890 		 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
1891 		 as maximum.
1892 		 For signed comparison, if CST2 doesn't have most
1893 		 significant bit set, handle it similarly.  If CST2 has
1894 		 MSB set, the maximum is the same and minimum is INT_MIN.  */
1895 	      if (minv == valv)
1896 		maxv = valv;
1897 	      else
1898 		{
1899 		  maxv = masked_increment (valv, cst2v, sgnbit, nprec);
1900 		  if (maxv == valv)
1901 		    break;
1902 		  maxv -= 1;
1903 		}
1904 	      maxv |= ~cst2v;
1905 	      minv = sgnbit;
1906 	      valid_p = true;
1907 	      break;
1908 
1909 	    case LT_EXPR:
1910 	    lt_expr:
1911 	      /* Minimum unsigned value for < is 0 and maximum
1912 		 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
1913 		 Otherwise, find smallest VAL2 where VAL2 > VAL
1914 		 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
1915 		 as maximum.
1916 		 For signed comparison, if CST2 doesn't have most
1917 		 significant bit set, handle it similarly.  If CST2 has
1918 		 MSB set, the maximum is the same and minimum is INT_MIN.  */
1919 	      if (minv == valv)
1920 		{
1921 		  if (valv == sgnbit)
1922 		    break;
1923 		  maxv = valv;
1924 		}
1925 	      else
1926 		{
1927 		  maxv = masked_increment (valv, cst2v, sgnbit, nprec);
1928 		  if (maxv == valv)
1929 		    break;
1930 		}
1931 	      maxv -= 1;
1932 	      maxv |= ~cst2v;
1933 	      minv = sgnbit;
1934 	      valid_p = true;
1935 	      break;
1936 
1937 	    default:
1938 	      break;
1939 	    }
1940 	  if (valid_p
1941 	      && (maxv - minv) != -1)
1942 	    {
1943 	      tree tmp, new_val, type;
1944 	      int i;
1945 
1946 	      for (i = 0; i < 2; i++)
1947 		if (names[i])
1948 		  {
1949 		    wide_int maxv2 = maxv;
1950 		    tmp = names[i];
1951 		    type = TREE_TYPE (names[i]);
1952 		    if (!TYPE_UNSIGNED (type))
1953 		      {
1954 			type = build_nonstandard_integer_type (nprec, 1);
1955 			tmp = build1 (NOP_EXPR, type, names[i]);
1956 		      }
1957 		    if (minv != 0)
1958 		      {
1959 			tmp = build2 (PLUS_EXPR, type, tmp,
1960 				      wide_int_to_tree (type, -minv));
1961 			maxv2 = maxv - minv;
1962 		      }
1963 		    new_val = wide_int_to_tree (type, maxv2);
1964 		    add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
1965 		  }
1966 	    }
1967 	}
1968     }
1969 }
1970 
1971 /* OP is an operand of a truth value expression which is known to have
1972    a particular value.  Register any asserts for OP and for any
1973    operands in OP's defining statement.
1974 
1975    If CODE is EQ_EXPR, then we want to register OP is zero (false),
1976    if CODE is NE_EXPR, then we want to register OP is nonzero (true).   */
1977 
1978 static void
register_edge_assert_for_1(tree op,enum tree_code code,edge e,vec<assert_info> & asserts)1979 register_edge_assert_for_1 (tree op, enum tree_code code,
1980 			    edge e, vec<assert_info> &asserts)
1981 {
1982   gimple *op_def;
1983   tree val;
1984   enum tree_code rhs_code;
1985 
1986   /* We only care about SSA_NAMEs.  */
1987   if (TREE_CODE (op) != SSA_NAME)
1988     return;
1989 
1990   /* We know that OP will have a zero or nonzero value.  */
1991   val = build_int_cst (TREE_TYPE (op), 0);
1992   add_assert_info (asserts, op, op, code, val);
1993 
1994   /* Now look at how OP is set.  If it's set from a comparison,
1995      a truth operation or some bit operations, then we may be able
1996      to register information about the operands of that assignment.  */
1997   op_def = SSA_NAME_DEF_STMT (op);
1998   if (gimple_code (op_def) != GIMPLE_ASSIGN)
1999     return;
2000 
2001   rhs_code = gimple_assign_rhs_code (op_def);
2002 
2003   if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
2004     {
2005       bool invert = (code == EQ_EXPR ? true : false);
2006       tree op0 = gimple_assign_rhs1 (op_def);
2007       tree op1 = gimple_assign_rhs2 (op_def);
2008 
2009       if (TREE_CODE (op0) == SSA_NAME)
2010         register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
2011       if (TREE_CODE (op1) == SSA_NAME)
2012         register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
2013     }
2014   else if ((code == NE_EXPR
2015 	    && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
2016 	   || (code == EQ_EXPR
2017 	       && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
2018     {
2019       /* Recurse on each operand.  */
2020       tree op0 = gimple_assign_rhs1 (op_def);
2021       tree op1 = gimple_assign_rhs2 (op_def);
2022       if (TREE_CODE (op0) == SSA_NAME
2023 	  && has_single_use (op0))
2024 	register_edge_assert_for_1 (op0, code, e, asserts);
2025       if (TREE_CODE (op1) == SSA_NAME
2026 	  && has_single_use (op1))
2027 	register_edge_assert_for_1 (op1, code, e, asserts);
2028     }
2029   else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
2030 	   && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
2031     {
2032       /* Recurse, flipping CODE.  */
2033       code = invert_tree_comparison (code, false);
2034       register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
2035     }
2036   else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
2037     {
2038       /* Recurse through the copy.  */
2039       register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
2040     }
2041   else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
2042     {
2043       /* Recurse through the type conversion, unless it is a narrowing
2044 	 conversion or conversion from non-integral type.  */
2045       tree rhs = gimple_assign_rhs1 (op_def);
2046       if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2047 	  && (TYPE_PRECISION (TREE_TYPE (rhs))
2048 	      <= TYPE_PRECISION (TREE_TYPE (op))))
2049 	register_edge_assert_for_1 (rhs, code, e, asserts);
2050     }
2051 }
2052 
2053 /* Check if comparison
2054      NAME COND_OP INTEGER_CST
2055    has a form of
2056      (X & 11...100..0) COND_OP XX...X00...0
2057    Such comparison can yield assertions like
2058      X >= XX...X00...0
2059      X <= XX...X11...1
2060    in case of COND_OP being EQ_EXPR or
2061      X < XX...X00...0
2062      X > XX...X11...1
2063    in case of NE_EXPR.  */
2064 
2065 static bool
is_masked_range_test(tree name,tree valt,enum tree_code cond_code,tree * new_name,tree * low,enum tree_code * low_code,tree * high,enum tree_code * high_code)2066 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
2067 		      tree *new_name, tree *low, enum tree_code *low_code,
2068 		      tree *high, enum tree_code *high_code)
2069 {
2070   gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2071 
2072   if (!is_gimple_assign (def_stmt)
2073       || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
2074     return false;
2075 
2076   tree t = gimple_assign_rhs1 (def_stmt);
2077   tree maskt = gimple_assign_rhs2 (def_stmt);
2078   if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
2079     return false;
2080 
2081   wi::tree_to_wide_ref mask = wi::to_wide (maskt);
2082   wide_int inv_mask = ~mask;
2083   /* Must have been removed by now so don't bother optimizing.  */
2084   if (mask == 0 || inv_mask == 0)
2085     return false;
2086 
2087   /* Assume VALT is INTEGER_CST.  */
2088   wi::tree_to_wide_ref val = wi::to_wide (valt);
2089 
2090   if ((inv_mask & (inv_mask + 1)) != 0
2091       || (val & mask) != val)
2092     return false;
2093 
2094   bool is_range = cond_code == EQ_EXPR;
2095 
2096   tree type = TREE_TYPE (t);
2097   wide_int min = wi::min_value (type),
2098     max = wi::max_value (type);
2099 
2100   if (is_range)
2101     {
2102       *low_code = val == min ? ERROR_MARK : GE_EXPR;
2103       *high_code = val == max ? ERROR_MARK : LE_EXPR;
2104     }
2105   else
2106     {
2107       /* We can still generate assertion if one of alternatives
2108 	 is known to always be false.  */
2109       if (val == min)
2110 	{
2111 	  *low_code = (enum tree_code) 0;
2112 	  *high_code = GT_EXPR;
2113 	}
2114       else if ((val | inv_mask) == max)
2115 	{
2116 	  *low_code = LT_EXPR;
2117 	  *high_code = (enum tree_code) 0;
2118 	}
2119       else
2120 	return false;
2121     }
2122 
2123   *new_name = t;
2124   *low = wide_int_to_tree (type, val);
2125   *high = wide_int_to_tree (type, val | inv_mask);
2126 
2127   return true;
2128 }
2129 
2130 /* Try to register an edge assertion for SSA name NAME on edge E for
2131    the condition COND contributing to the conditional jump pointed to by
2132    SI.  */
2133 
2134 void
register_edge_assert_for(tree name,edge e,enum tree_code cond_code,tree cond_op0,tree cond_op1,vec<assert_info> & asserts)2135 register_edge_assert_for (tree name, edge e,
2136 			  enum tree_code cond_code, tree cond_op0,
2137 			  tree cond_op1, vec<assert_info> &asserts)
2138 {
2139   tree val;
2140   enum tree_code comp_code;
2141   bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
2142 
2143   /* Do not attempt to infer anything in names that flow through
2144      abnormal edges.  */
2145   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
2146     return;
2147 
2148   if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
2149 						cond_op0, cond_op1,
2150 						is_else_edge,
2151 						&comp_code, &val))
2152     return;
2153 
2154   /* Register ASSERT_EXPRs for name.  */
2155   register_edge_assert_for_2 (name, e, cond_code, cond_op0,
2156 			      cond_op1, is_else_edge, asserts);
2157 
2158 
2159   /* If COND is effectively an equality test of an SSA_NAME against
2160      the value zero or one, then we may be able to assert values
2161      for SSA_NAMEs which flow into COND.  */
2162 
2163   /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
2164      statement of NAME we can assert both operands of the BIT_AND_EXPR
2165      have nonzero value.  */
2166   if ((comp_code == EQ_EXPR && integer_onep (val))
2167       || (comp_code == NE_EXPR && integer_zerop (val)))
2168     {
2169       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2170 
2171       if (is_gimple_assign (def_stmt)
2172 	  && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
2173 	{
2174 	  tree op0 = gimple_assign_rhs1 (def_stmt);
2175 	  tree op1 = gimple_assign_rhs2 (def_stmt);
2176 	  register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
2177 	  register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
2178 	}
2179       else if (is_gimple_assign (def_stmt)
2180 	       && (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt))
2181 		   == tcc_comparison))
2182 	register_edge_assert_for_1 (name, NE_EXPR, e, asserts);
2183     }
2184 
2185   /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
2186      statement of NAME we can assert both operands of the BIT_IOR_EXPR
2187      have zero value.  */
2188   if ((comp_code == EQ_EXPR && integer_zerop (val))
2189       || (comp_code == NE_EXPR
2190 	  && integer_onep (val)
2191 	  && TYPE_PRECISION (TREE_TYPE (name)) == 1))
2192     {
2193       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2194 
2195       /* For BIT_IOR_EXPR only if NAME == 0 both operands have
2196 	 necessarily zero value, or if type-precision is one.  */
2197       if (is_gimple_assign (def_stmt)
2198 	  && gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR)
2199 	{
2200 	  tree op0 = gimple_assign_rhs1 (def_stmt);
2201 	  tree op1 = gimple_assign_rhs2 (def_stmt);
2202 	  register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
2203 	  register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
2204 	}
2205       else if (is_gimple_assign (def_stmt)
2206 	       && (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt))
2207 		   == tcc_comparison))
2208 	register_edge_assert_for_1 (name, EQ_EXPR, e, asserts);
2209     }
2210 
2211   /* Sometimes we can infer ranges from (NAME & MASK) == VALUE.  */
2212   if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
2213       && TREE_CODE (val) == INTEGER_CST)
2214     {
2215       enum tree_code low_code, high_code;
2216       tree low, high;
2217       if (is_masked_range_test (name, val, comp_code, &name, &low,
2218 				&low_code, &high, &high_code))
2219 	{
2220 	  if (low_code != ERROR_MARK)
2221 	    register_edge_assert_for_2 (name, e, low_code, name,
2222 					low, /*invert*/false, asserts);
2223 	  if (high_code != ERROR_MARK)
2224 	    register_edge_assert_for_2 (name, e, high_code, name,
2225 					high, /*invert*/false, asserts);
2226 	}
2227     }
2228 }
2229 
2230 /* Handle
2231    _4 = x_3 & 31;
2232    if (_4 != 0)
2233      goto <bb 6>;
2234    else
2235      goto <bb 7>;
2236    <bb 6>:
2237    __builtin_unreachable ();
2238    <bb 7>:
2239    x_5 = ASSERT_EXPR <x_3, ...>;
2240    If x_3 has no other immediate uses (checked by caller),
2241    var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
2242    from the non-zero bitmask.  */
2243 
2244 void
maybe_set_nonzero_bits(edge e,tree var)2245 maybe_set_nonzero_bits (edge e, tree var)
2246 {
2247   basic_block cond_bb = e->src;
2248   gimple *stmt = last_stmt (cond_bb);
2249   tree cst;
2250 
2251   if (stmt == NULL
2252       || gimple_code (stmt) != GIMPLE_COND
2253       || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
2254 				     ? EQ_EXPR : NE_EXPR)
2255       || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
2256       || !integer_zerop (gimple_cond_rhs (stmt)))
2257     return;
2258 
2259   stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
2260   if (!is_gimple_assign (stmt)
2261       || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
2262       || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
2263     return;
2264   if (gimple_assign_rhs1 (stmt) != var)
2265     {
2266       gimple *stmt2;
2267 
2268       if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
2269 	return;
2270       stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
2271       if (!gimple_assign_cast_p (stmt2)
2272 	  || gimple_assign_rhs1 (stmt2) != var
2273 	  || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
2274 	  || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
2275 			      != TYPE_PRECISION (TREE_TYPE (var))))
2276 	return;
2277     }
2278   cst = gimple_assign_rhs2 (stmt);
2279   set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
2280 					  wi::to_wide (cst)));
2281 }
2282 
2283 /* Return true if STMT is interesting for VRP.  */
2284 
2285 bool
stmt_interesting_for_vrp(gimple * stmt)2286 stmt_interesting_for_vrp (gimple *stmt)
2287 {
2288   if (gimple_code (stmt) == GIMPLE_PHI)
2289     {
2290       tree res = gimple_phi_result (stmt);
2291       return (!virtual_operand_p (res)
2292 	      && (INTEGRAL_TYPE_P (TREE_TYPE (res))
2293 		  || POINTER_TYPE_P (TREE_TYPE (res))));
2294     }
2295   else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
2296     {
2297       tree lhs = gimple_get_lhs (stmt);
2298 
2299       /* In general, assignments with virtual operands are not useful
2300 	 for deriving ranges, with the obvious exception of calls to
2301 	 builtin functions.  */
2302       if (lhs && TREE_CODE (lhs) == SSA_NAME
2303 	  && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
2304 	      || POINTER_TYPE_P (TREE_TYPE (lhs)))
2305 	  && (is_gimple_call (stmt)
2306 	      || !gimple_vuse (stmt)))
2307 	return true;
2308       else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
2309 	switch (gimple_call_internal_fn (stmt))
2310 	  {
2311 	  case IFN_ADD_OVERFLOW:
2312 	  case IFN_SUB_OVERFLOW:
2313 	  case IFN_MUL_OVERFLOW:
2314 	  case IFN_ATOMIC_COMPARE_EXCHANGE:
2315 	    /* These internal calls return _Complex integer type,
2316 	       but are interesting to VRP nevertheless.  */
2317 	    if (lhs && TREE_CODE (lhs) == SSA_NAME)
2318 	      return true;
2319 	    break;
2320 	  default:
2321 	    break;
2322 	  }
2323     }
2324   else if (gimple_code (stmt) == GIMPLE_COND
2325 	   || gimple_code (stmt) == GIMPLE_SWITCH)
2326     return true;
2327 
2328   return false;
2329 }
2330 
2331 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
2332    that includes the value VAL.  The search is restricted to the range
2333    [START_IDX, n - 1] where n is the size of VEC.
2334 
2335    If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
2336    returned.
2337 
2338    If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
2339    it is placed in IDX and false is returned.
2340 
2341    If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
2342    returned. */
2343 
2344 bool
find_case_label_index(gswitch * stmt,size_t start_idx,tree val,size_t * idx)2345 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
2346 {
2347   size_t n = gimple_switch_num_labels (stmt);
2348   size_t low, high;
2349 
2350   /* Find case label for minimum of the value range or the next one.
2351      At each iteration we are searching in [low, high - 1]. */
2352 
2353   for (low = start_idx, high = n; high != low; )
2354     {
2355       tree t;
2356       int cmp;
2357       /* Note that i != high, so we never ask for n. */
2358       size_t i = (high + low) / 2;
2359       t = gimple_switch_label (stmt, i);
2360 
2361       /* Cache the result of comparing CASE_LOW and val.  */
2362       cmp = tree_int_cst_compare (CASE_LOW (t), val);
2363 
2364       if (cmp == 0)
2365 	{
2366 	  /* Ranges cannot be empty. */
2367 	  *idx = i;
2368 	  return true;
2369 	}
2370       else if (cmp > 0)
2371         high = i;
2372       else
2373 	{
2374 	  low = i + 1;
2375 	  if (CASE_HIGH (t) != NULL
2376 	      && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
2377 	    {
2378 	      *idx = i;
2379 	      return true;
2380 	    }
2381         }
2382     }
2383 
2384   *idx = high;
2385   return false;
2386 }
2387 
2388 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
2389    for values between MIN and MAX. The first index is placed in MIN_IDX. The
2390    last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
2391    then MAX_IDX < MIN_IDX.
2392    Returns true if the default label is not needed. */
2393 
2394 bool
find_case_label_range(gswitch * stmt,tree min,tree max,size_t * min_idx,size_t * max_idx)2395 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
2396 		       size_t *max_idx)
2397 {
2398   size_t i, j;
2399   bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
2400   bool max_take_default = !find_case_label_index (stmt, i, max, &j);
2401 
2402   if (i == j
2403       && min_take_default
2404       && max_take_default)
2405     {
2406       /* Only the default case label reached.
2407          Return an empty range. */
2408       *min_idx = 1;
2409       *max_idx = 0;
2410       return false;
2411     }
2412   else
2413     {
2414       bool take_default = min_take_default || max_take_default;
2415       tree low, high;
2416       size_t k;
2417 
2418       if (max_take_default)
2419 	j--;
2420 
2421       /* If the case label range is continuous, we do not need
2422 	 the default case label.  Verify that.  */
2423       high = CASE_LOW (gimple_switch_label (stmt, i));
2424       if (CASE_HIGH (gimple_switch_label (stmt, i)))
2425 	high = CASE_HIGH (gimple_switch_label (stmt, i));
2426       for (k = i + 1; k <= j; ++k)
2427 	{
2428 	  low = CASE_LOW (gimple_switch_label (stmt, k));
2429 	  if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
2430 	    {
2431 	      take_default = true;
2432 	      break;
2433 	    }
2434 	  high = low;
2435 	  if (CASE_HIGH (gimple_switch_label (stmt, k)))
2436 	    high = CASE_HIGH (gimple_switch_label (stmt, k));
2437 	}
2438 
2439       *min_idx = i;
2440       *max_idx = j;
2441       return !take_default;
2442     }
2443 }
2444 
2445 /* Given a SWITCH_STMT, return the case label that encompasses the
2446    known possible values for the switch operand.  RANGE_OF_OP is a
2447    range for the known values of the switch operand.  */
2448 
2449 tree
find_case_label_range(gswitch * switch_stmt,const irange * range_of_op)2450 find_case_label_range (gswitch *switch_stmt, const irange *range_of_op)
2451 {
2452   if (range_of_op->undefined_p ()
2453       || range_of_op->varying_p ()
2454       || range_of_op->symbolic_p ())
2455     return NULL_TREE;
2456 
2457   size_t i, j;
2458   tree op = gimple_switch_index (switch_stmt);
2459   tree type = TREE_TYPE (op);
2460   tree tmin = wide_int_to_tree (type, range_of_op->lower_bound ());
2461   tree tmax = wide_int_to_tree (type, range_of_op->upper_bound ());
2462   find_case_label_range (switch_stmt, tmin, tmax, &i, &j);
2463   if (i == j)
2464     {
2465       /* Look for exactly one label that encompasses the range of
2466 	 the operand.  */
2467       tree label = gimple_switch_label (switch_stmt, i);
2468       tree case_high
2469 	= CASE_HIGH (label) ? CASE_HIGH (label) : CASE_LOW (label);
2470       int_range_max label_range (CASE_LOW (label), case_high);
2471       if (!types_compatible_p (label_range.type (), range_of_op->type ()))
2472 	range_cast (label_range, range_of_op->type ());
2473       label_range.intersect (range_of_op);
2474       if (label_range == *range_of_op)
2475 	return label;
2476     }
2477   else if (i > j)
2478     {
2479       /* If there are no labels at all, take the default.  */
2480       return gimple_switch_label (switch_stmt, 0);
2481     }
2482   else
2483     {
2484       /* Otherwise, there are various labels that can encompass
2485 	 the range of operand.  In which case, see if the range of
2486 	 the operand is entirely *outside* the bounds of all the
2487 	 (non-default) case labels.  If so, take the default.  */
2488       unsigned n = gimple_switch_num_labels (switch_stmt);
2489       tree min_label = gimple_switch_label (switch_stmt, 1);
2490       tree max_label = gimple_switch_label (switch_stmt, n - 1);
2491       tree case_high = CASE_HIGH (max_label);
2492       if (!case_high)
2493 	case_high = CASE_LOW (max_label);
2494       int_range_max label_range (CASE_LOW (min_label), case_high);
2495       if (!types_compatible_p (label_range.type (), range_of_op->type ()))
2496 	range_cast (label_range, range_of_op->type ());
2497       label_range.intersect (range_of_op);
2498       if (label_range.undefined_p ())
2499 	return gimple_switch_label (switch_stmt, 0);
2500     }
2501   return NULL_TREE;
2502 }
2503 
2504 struct case_info
2505 {
2506   tree expr;
2507   basic_block bb;
2508 };
2509 
2510 /* Location information for ASSERT_EXPRs.  Each instance of this
2511    structure describes an ASSERT_EXPR for an SSA name.  Since a single
2512    SSA name may have more than one assertion associated with it, these
2513    locations are kept in a linked list attached to the corresponding
2514    SSA name.  */
2515 struct assert_locus
2516 {
2517   /* Basic block where the assertion would be inserted.  */
2518   basic_block bb;
2519 
2520   /* Some assertions need to be inserted on an edge (e.g., assertions
2521      generated by COND_EXPRs).  In those cases, BB will be NULL.  */
2522   edge e;
2523 
2524   /* Pointer to the statement that generated this assertion.  */
2525   gimple_stmt_iterator si;
2526 
2527   /* Predicate code for the ASSERT_EXPR.  Must be COMPARISON_CLASS_P.  */
2528   enum tree_code comp_code;
2529 
2530   /* Value being compared against.  */
2531   tree val;
2532 
2533   /* Expression to compare.  */
2534   tree expr;
2535 
2536   /* Next node in the linked list.  */
2537   assert_locus *next;
2538 };
2539 
2540 /* Class to traverse the flowgraph looking for conditional jumps to
2541    insert ASSERT_EXPR range expressions.  These range expressions are
2542    meant to provide information to optimizations that need to reason
2543    in terms of value ranges.  They will not be expanded into RTL.  */
2544 
2545 class vrp_asserts
2546 {
2547 public:
vrp_asserts(struct function * fn)2548   vrp_asserts (struct function *fn) : fun (fn) { }
2549 
2550   void insert_range_assertions ();
2551 
2552   /* Convert range assertion expressions into the implied copies and
2553      copy propagate away the copies.  */
2554   void remove_range_assertions ();
2555 
2556   /* Dump all the registered assertions for all the names to FILE.  */
2557   void dump (FILE *);
2558 
2559   /* Dump all the registered assertions for NAME to FILE.  */
2560   void dump (FILE *file, tree name);
2561 
2562   /* Dump all the registered assertions for NAME to stderr.  */
debug(tree name)2563   void debug (tree name)
2564   {
2565     dump (stderr, name);
2566   }
2567 
2568   /* Dump all the registered assertions for all the names to stderr.  */
debug()2569   void debug ()
2570   {
2571     dump (stderr);
2572   }
2573 
2574 private:
2575   /* Set of SSA names found live during the RPO traversal of the function
2576      for still active basic-blocks.  */
2577   live_names live;
2578 
2579   /* Function to work on.  */
2580   struct function *fun;
2581 
2582   /* If bit I is present, it means that SSA name N_i has a list of
2583      assertions that should be inserted in the IL.  */
2584   bitmap need_assert_for;
2585 
2586   /* Array of locations lists where to insert assertions.  ASSERTS_FOR[I]
2587      holds a list of ASSERT_LOCUS_T nodes that describe where
2588      ASSERT_EXPRs for SSA name N_I should be inserted.  */
2589   assert_locus **asserts_for;
2590 
2591   /* Finish found ASSERTS for E and register them at GSI.  */
2592   void finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
2593 					vec<assert_info> &asserts);
2594 
2595   /* Determine whether the outgoing edges of BB should receive an
2596      ASSERT_EXPR for each of the operands of BB's LAST statement.  The
2597      last statement of BB must be a SWITCH_EXPR.
2598 
2599      If any of the sub-graphs rooted at BB have an interesting use of
2600      the predicate operands, an assert location node is added to the
2601      list of assertions for the corresponding operands.  */
2602   void find_switch_asserts (basic_block bb, gswitch *last);
2603 
2604   /* Do an RPO walk over the function computing SSA name liveness
2605      on-the-fly and deciding on assert expressions to insert.  */
2606   void find_assert_locations ();
2607 
2608   /* Traverse all the statements in block BB looking for statements that
2609      may generate useful assertions for the SSA names in their operand.
2610      See method implementation comentary for more information.  */
2611   void find_assert_locations_in_bb (basic_block bb);
2612 
2613   /* Determine whether the outgoing edges of BB should receive an
2614      ASSERT_EXPR for each of the operands of BB's LAST statement.
2615      The last statement of BB must be a COND_EXPR.
2616 
2617      If any of the sub-graphs rooted at BB have an interesting use of
2618      the predicate operands, an assert location node is added to the
2619      list of assertions for the corresponding operands.  */
2620   void find_conditional_asserts (basic_block bb, gcond *last);
2621 
2622   /* Process all the insertions registered for every name N_i registered
2623      in NEED_ASSERT_FOR.  The list of assertions to be inserted are
2624      found in ASSERTS_FOR[i].  */
2625   void process_assert_insertions ();
2626 
2627   /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2628      'EXPR COMP_CODE VAL' at a location that dominates block BB or
2629      E->DEST, then register this location as a possible insertion point
2630      for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2631 
2632      BB, E and SI provide the exact insertion point for the new
2633      ASSERT_EXPR.  If BB is NULL, then the ASSERT_EXPR is to be inserted
2634      on edge E.  Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2635      BB.  If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2636      must not be NULL.  */
2637   void register_new_assert_for (tree name, tree expr,
2638 				enum tree_code comp_code,
2639 				tree val, basic_block bb,
2640 				edge e, gimple_stmt_iterator si);
2641 
2642   /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2643      create a new SSA name N and return the assertion assignment
2644      'N = ASSERT_EXPR <V, V OP W>'.  */
2645   gimple *build_assert_expr_for (tree cond, tree v);
2646 
2647   /* Create an ASSERT_EXPR for NAME and insert it in the location
2648      indicated by LOC.  Return true if we made any edge insertions.  */
2649   bool process_assert_insertions_for (tree name, assert_locus *loc);
2650 
2651   /* Qsort callback for sorting assert locations.  */
2652   template <bool stable> static int compare_assert_loc (const void *,
2653 							const void *);
2654 
2655   /* Return false if EXPR is a predicate expression involving floating
2656      point values.  */
fp_predicate(gimple * stmt)2657   bool fp_predicate (gimple *stmt)
2658   {
2659     GIMPLE_CHECK (stmt, GIMPLE_COND);
2660     return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
2661   }
2662 
2663   bool all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt,
2664 					  basic_block cond_bb);
2665 
2666   static int compare_case_labels (const void *, const void *);
2667 };
2668 
2669 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2670    create a new SSA name N and return the assertion assignment
2671    'N = ASSERT_EXPR <V, V OP W>'.  */
2672 
2673 gimple *
build_assert_expr_for(tree cond,tree v)2674 vrp_asserts::build_assert_expr_for (tree cond, tree v)
2675 {
2676   tree a;
2677   gassign *assertion;
2678 
2679   gcc_assert (TREE_CODE (v) == SSA_NAME
2680 	      && COMPARISON_CLASS_P (cond));
2681 
2682   a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
2683   assertion = gimple_build_assign (NULL_TREE, a);
2684 
2685   /* The new ASSERT_EXPR, creates a new SSA name that replaces the
2686      operand of the ASSERT_EXPR.  Create it so the new name and the old one
2687      are registered in the replacement table so that we can fix the SSA web
2688      after adding all the ASSERT_EXPRs.  */
2689   tree new_def = create_new_def_for (v, assertion, NULL);
2690   /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
2691      given we have to be able to fully propagate those out to re-create
2692      valid SSA when removing the asserts.  */
2693   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
2694     SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
2695 
2696   return assertion;
2697 }
2698 
2699 /* Dump all the registered assertions for NAME to FILE.  */
2700 
2701 void
dump(FILE * file,tree name)2702 vrp_asserts::dump (FILE *file, tree name)
2703 {
2704   assert_locus *loc;
2705 
2706   fprintf (file, "Assertions to be inserted for ");
2707   print_generic_expr (file, name);
2708   fprintf (file, "\n");
2709 
2710   loc = asserts_for[SSA_NAME_VERSION (name)];
2711   while (loc)
2712     {
2713       fprintf (file, "\t");
2714       print_gimple_stmt (file, gsi_stmt (loc->si), 0);
2715       fprintf (file, "\n\tBB #%d", loc->bb->index);
2716       if (loc->e)
2717 	{
2718 	  fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
2719 	           loc->e->dest->index);
2720 	  dump_edge_info (file, loc->e, dump_flags, 0);
2721 	}
2722       fprintf (file, "\n\tPREDICATE: ");
2723       print_generic_expr (file, loc->expr);
2724       fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
2725       print_generic_expr (file, loc->val);
2726       fprintf (file, "\n\n");
2727       loc = loc->next;
2728     }
2729 
2730   fprintf (file, "\n");
2731 }
2732 
2733 /* Dump all the registered assertions for all the names to FILE.  */
2734 
2735 void
dump(FILE * file)2736 vrp_asserts::dump (FILE *file)
2737 {
2738   unsigned i;
2739   bitmap_iterator bi;
2740 
2741   fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
2742   EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
2743     dump (file, ssa_name (i));
2744   fprintf (file, "\n");
2745 }
2746 
2747 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2748    'EXPR COMP_CODE VAL' at a location that dominates block BB or
2749    E->DEST, then register this location as a possible insertion point
2750    for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2751 
2752    BB, E and SI provide the exact insertion point for the new
2753    ASSERT_EXPR.  If BB is NULL, then the ASSERT_EXPR is to be inserted
2754    on edge E.  Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2755    BB.  If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2756    must not be NULL.  */
2757 
2758 void
register_new_assert_for(tree name,tree expr,enum tree_code comp_code,tree val,basic_block bb,edge e,gimple_stmt_iterator si)2759 vrp_asserts::register_new_assert_for (tree name, tree expr,
2760 				      enum tree_code comp_code,
2761 				      tree val,
2762 				      basic_block bb,
2763 				      edge e,
2764 				      gimple_stmt_iterator si)
2765 {
2766   assert_locus *n, *loc, *last_loc;
2767   basic_block dest_bb;
2768 
2769   gcc_checking_assert (bb == NULL || e == NULL);
2770 
2771   if (e == NULL)
2772     gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
2773 			 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
2774 
2775   /* Never build an assert comparing against an integer constant with
2776      TREE_OVERFLOW set.  This confuses our undefined overflow warning
2777      machinery.  */
2778   if (TREE_OVERFLOW_P (val))
2779     val = drop_tree_overflow (val);
2780 
2781   /* The new assertion A will be inserted at BB or E.  We need to
2782      determine if the new location is dominated by a previously
2783      registered location for A.  If we are doing an edge insertion,
2784      assume that A will be inserted at E->DEST.  Note that this is not
2785      necessarily true.
2786 
2787      If E is a critical edge, it will be split.  But even if E is
2788      split, the new block will dominate the same set of blocks that
2789      E->DEST dominates.
2790 
2791      The reverse, however, is not true, blocks dominated by E->DEST
2792      will not be dominated by the new block created to split E.  So,
2793      if the insertion location is on a critical edge, we will not use
2794      the new location to move another assertion previously registered
2795      at a block dominated by E->DEST.  */
2796   dest_bb = (bb) ? bb : e->dest;
2797 
2798   /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2799      VAL at a block dominating DEST_BB, then we don't need to insert a new
2800      one.  Similarly, if the same assertion already exists at a block
2801      dominated by DEST_BB and the new location is not on a critical
2802      edge, then update the existing location for the assertion (i.e.,
2803      move the assertion up in the dominance tree).
2804 
2805      Note, this is implemented as a simple linked list because there
2806      should not be more than a handful of assertions registered per
2807      name.  If this becomes a performance problem, a table hashed by
2808      COMP_CODE and VAL could be implemented.  */
2809   loc = asserts_for[SSA_NAME_VERSION (name)];
2810   last_loc = loc;
2811   while (loc)
2812     {
2813       if (loc->comp_code == comp_code
2814 	  && (loc->val == val
2815 	      || operand_equal_p (loc->val, val, 0))
2816 	  && (loc->expr == expr
2817 	      || operand_equal_p (loc->expr, expr, 0)))
2818 	{
2819 	  /* If E is not a critical edge and DEST_BB
2820 	     dominates the existing location for the assertion, move
2821 	     the assertion up in the dominance tree by updating its
2822 	     location information.  */
2823 	  if ((e == NULL || !EDGE_CRITICAL_P (e))
2824 	      && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
2825 	    {
2826 	      loc->bb = dest_bb;
2827 	      loc->e = e;
2828 	      loc->si = si;
2829 	      return;
2830 	    }
2831 	}
2832 
2833       /* Update the last node of the list and move to the next one.  */
2834       last_loc = loc;
2835       loc = loc->next;
2836     }
2837 
2838   /* If we didn't find an assertion already registered for
2839      NAME COMP_CODE VAL, add a new one at the end of the list of
2840      assertions associated with NAME.  */
2841   n = XNEW (struct assert_locus);
2842   n->bb = dest_bb;
2843   n->e = e;
2844   n->si = si;
2845   n->comp_code = comp_code;
2846   n->val = val;
2847   n->expr = expr;
2848   n->next = NULL;
2849 
2850   if (last_loc)
2851     last_loc->next = n;
2852   else
2853     asserts_for[SSA_NAME_VERSION (name)] = n;
2854 
2855   bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
2856 }
2857 
2858 /* Finish found ASSERTS for E and register them at GSI.  */
2859 
2860 void
finish_register_edge_assert_for(edge e,gimple_stmt_iterator gsi,vec<assert_info> & asserts)2861 vrp_asserts::finish_register_edge_assert_for (edge e,
2862 					      gimple_stmt_iterator gsi,
2863 					      vec<assert_info> &asserts)
2864 {
2865   for (unsigned i = 0; i < asserts.length (); ++i)
2866     /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
2867        reachable from E.  */
2868     if (live.live_on_edge_p (asserts[i].name, e))
2869       register_new_assert_for (asserts[i].name, asserts[i].expr,
2870 			       asserts[i].comp_code, asserts[i].val,
2871 			       NULL, e, gsi);
2872 }
2873 
2874 /* Determine whether the outgoing edges of BB should receive an
2875    ASSERT_EXPR for each of the operands of BB's LAST statement.
2876    The last statement of BB must be a COND_EXPR.
2877 
2878    If any of the sub-graphs rooted at BB have an interesting use of
2879    the predicate operands, an assert location node is added to the
2880    list of assertions for the corresponding operands.  */
2881 
2882 void
find_conditional_asserts(basic_block bb,gcond * last)2883 vrp_asserts::find_conditional_asserts (basic_block bb, gcond *last)
2884 {
2885   gimple_stmt_iterator bsi;
2886   tree op;
2887   edge_iterator ei;
2888   edge e;
2889   ssa_op_iter iter;
2890 
2891   bsi = gsi_for_stmt (last);
2892 
2893   /* Look for uses of the operands in each of the sub-graphs
2894      rooted at BB.  We need to check each of the outgoing edges
2895      separately, so that we know what kind of ASSERT_EXPR to
2896      insert.  */
2897   FOR_EACH_EDGE (e, ei, bb->succs)
2898     {
2899       if (e->dest == bb)
2900 	continue;
2901 
2902       /* Register the necessary assertions for each operand in the
2903 	 conditional predicate.  */
2904       auto_vec<assert_info, 8> asserts;
2905       FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
2906 	register_edge_assert_for (op, e,
2907 				  gimple_cond_code (last),
2908 				  gimple_cond_lhs (last),
2909 				  gimple_cond_rhs (last), asserts);
2910       finish_register_edge_assert_for (e, bsi, asserts);
2911     }
2912 }
2913 
2914 /* Compare two case labels sorting first by the destination bb index
2915    and then by the case value.  */
2916 
2917 int
compare_case_labels(const void * p1,const void * p2)2918 vrp_asserts::compare_case_labels (const void *p1, const void *p2)
2919 {
2920   const struct case_info *ci1 = (const struct case_info *) p1;
2921   const struct case_info *ci2 = (const struct case_info *) p2;
2922   int idx1 = ci1->bb->index;
2923   int idx2 = ci2->bb->index;
2924 
2925   if (idx1 < idx2)
2926     return -1;
2927   else if (idx1 == idx2)
2928     {
2929       /* Make sure the default label is first in a group.  */
2930       if (!CASE_LOW (ci1->expr))
2931 	return -1;
2932       else if (!CASE_LOW (ci2->expr))
2933 	return 1;
2934       else
2935 	return tree_int_cst_compare (CASE_LOW (ci1->expr),
2936 				     CASE_LOW (ci2->expr));
2937     }
2938   else
2939     return 1;
2940 }
2941 
2942 /* Determine whether the outgoing edges of BB should receive an
2943    ASSERT_EXPR for each of the operands of BB's LAST statement.
2944    The last statement of BB must be a SWITCH_EXPR.
2945 
2946    If any of the sub-graphs rooted at BB have an interesting use of
2947    the predicate operands, an assert location node is added to the
2948    list of assertions for the corresponding operands.  */
2949 
2950 void
find_switch_asserts(basic_block bb,gswitch * last)2951 vrp_asserts::find_switch_asserts (basic_block bb, gswitch *last)
2952 {
2953   gimple_stmt_iterator bsi;
2954   tree op;
2955   edge e;
2956   struct case_info *ci;
2957   size_t n = gimple_switch_num_labels (last);
2958 #if GCC_VERSION >= 4000
2959   unsigned int idx;
2960 #else
2961   /* Work around GCC 3.4 bug (PR 37086).  */
2962   volatile unsigned int idx;
2963 #endif
2964 
2965   bsi = gsi_for_stmt (last);
2966   op = gimple_switch_index (last);
2967   if (TREE_CODE (op) != SSA_NAME)
2968     return;
2969 
2970   /* Build a vector of case labels sorted by destination label.  */
2971   ci = XNEWVEC (struct case_info, n);
2972   for (idx = 0; idx < n; ++idx)
2973     {
2974       ci[idx].expr = gimple_switch_label (last, idx);
2975       ci[idx].bb = label_to_block (fun, CASE_LABEL (ci[idx].expr));
2976     }
2977   edge default_edge = find_edge (bb, ci[0].bb);
2978   qsort (ci, n, sizeof (struct case_info), compare_case_labels);
2979 
2980   for (idx = 0; idx < n; ++idx)
2981     {
2982       tree min, max;
2983       tree cl = ci[idx].expr;
2984       basic_block cbb = ci[idx].bb;
2985 
2986       min = CASE_LOW (cl);
2987       max = CASE_HIGH (cl);
2988 
2989       /* If there are multiple case labels with the same destination
2990 	 we need to combine them to a single value range for the edge.  */
2991       if (idx + 1 < n && cbb == ci[idx + 1].bb)
2992 	{
2993 	  /* Skip labels until the last of the group.  */
2994 	  do {
2995 	    ++idx;
2996 	  } while (idx < n && cbb == ci[idx].bb);
2997 	  --idx;
2998 
2999 	  /* Pick up the maximum of the case label range.  */
3000 	  if (CASE_HIGH (ci[idx].expr))
3001 	    max = CASE_HIGH (ci[idx].expr);
3002 	  else
3003 	    max = CASE_LOW (ci[idx].expr);
3004 	}
3005 
3006       /* Can't extract a useful assertion out of a range that includes the
3007 	 default label.  */
3008       if (min == NULL_TREE)
3009 	continue;
3010 
3011       /* Find the edge to register the assert expr on.  */
3012       e = find_edge (bb, cbb);
3013 
3014       /* Register the necessary assertions for the operand in the
3015 	 SWITCH_EXPR.  */
3016       auto_vec<assert_info, 8> asserts;
3017       register_edge_assert_for (op, e,
3018 				max ? GE_EXPR : EQ_EXPR,
3019 				op, fold_convert (TREE_TYPE (op), min),
3020 				asserts);
3021       if (max)
3022 	register_edge_assert_for (op, e, LE_EXPR, op,
3023 				  fold_convert (TREE_TYPE (op), max),
3024 				  asserts);
3025       finish_register_edge_assert_for (e, bsi, asserts);
3026     }
3027 
3028   XDELETEVEC (ci);
3029 
3030   if (!live.live_on_edge_p (op, default_edge))
3031     return;
3032 
3033   /* Now register along the default label assertions that correspond to the
3034      anti-range of each label.  */
3035   int insertion_limit = param_max_vrp_switch_assertions;
3036   if (insertion_limit == 0)
3037     return;
3038 
3039   /* We can't do this if the default case shares a label with another case.  */
3040   tree default_cl = gimple_switch_default_label (last);
3041   for (idx = 1; idx < n; idx++)
3042     {
3043       tree min, max;
3044       tree cl = gimple_switch_label (last, idx);
3045       if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
3046 	continue;
3047 
3048       min = CASE_LOW (cl);
3049       max = CASE_HIGH (cl);
3050 
3051       /* Combine contiguous case ranges to reduce the number of assertions
3052 	 to insert.  */
3053       for (idx = idx + 1; idx < n; idx++)
3054 	{
3055 	  tree next_min, next_max;
3056 	  tree next_cl = gimple_switch_label (last, idx);
3057 	  if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
3058 	    break;
3059 
3060 	  next_min = CASE_LOW (next_cl);
3061 	  next_max = CASE_HIGH (next_cl);
3062 
3063 	  wide_int difference = (wi::to_wide (next_min)
3064 				 - wi::to_wide (max ? max : min));
3065 	  if (wi::eq_p (difference, 1))
3066 	    max = next_max ? next_max : next_min;
3067 	  else
3068 	    break;
3069 	}
3070       idx--;
3071 
3072       if (max == NULL_TREE)
3073 	{
3074 	  /* Register the assertion OP != MIN.  */
3075 	  auto_vec<assert_info, 8> asserts;
3076 	  min = fold_convert (TREE_TYPE (op), min);
3077 	  register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
3078 				    asserts);
3079 	  finish_register_edge_assert_for (default_edge, bsi, asserts);
3080 	}
3081       else
3082 	{
3083 	  /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
3084 	     which will give OP the anti-range ~[MIN,MAX].  */
3085 	  tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
3086 	  min = fold_convert (TREE_TYPE (uop), min);
3087 	  max = fold_convert (TREE_TYPE (uop), max);
3088 
3089 	  tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
3090 	  tree rhs = int_const_binop (MINUS_EXPR, max, min);
3091 	  register_new_assert_for (op, lhs, GT_EXPR, rhs,
3092 				   NULL, default_edge, bsi);
3093 	}
3094 
3095       if (--insertion_limit == 0)
3096 	break;
3097     }
3098 }
3099 
3100 /* Traverse all the statements in block BB looking for statements that
3101    may generate useful assertions for the SSA names in their operand.
3102    If a statement produces a useful assertion A for name N_i, then the
3103    list of assertions already generated for N_i is scanned to
3104    determine if A is actually needed.
3105 
3106    If N_i already had the assertion A at a location dominating the
3107    current location, then nothing needs to be done.  Otherwise, the
3108    new location for A is recorded instead.
3109 
3110    1- For every statement S in BB, all the variables used by S are
3111       added to bitmap FOUND_IN_SUBGRAPH.
3112 
3113    2- If statement S uses an operand N in a way that exposes a known
3114       value range for N, then if N was not already generated by an
3115       ASSERT_EXPR, create a new assert location for N.  For instance,
3116       if N is a pointer and the statement dereferences it, we can
3117       assume that N is not NULL.
3118 
3119    3- COND_EXPRs are a special case of #2.  We can derive range
3120       information from the predicate but need to insert different
3121       ASSERT_EXPRs for each of the sub-graphs rooted at the
3122       conditional block.  If the last statement of BB is a conditional
3123       expression of the form 'X op Y', then
3124 
3125       a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
3126 
3127       b) If the conditional is the only entry point to the sub-graph
3128 	 corresponding to the THEN_CLAUSE, recurse into it.  On
3129 	 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
3130 	 an ASSERT_EXPR is added for the corresponding variable.
3131 
3132       c) Repeat step (b) on the ELSE_CLAUSE.
3133 
3134       d) Mark X and Y in FOUND_IN_SUBGRAPH.
3135 
3136       For instance,
3137 
3138 	    if (a == 9)
3139 	      b = a;
3140 	    else
3141 	      b = c + 1;
3142 
3143       In this case, an assertion on the THEN clause is useful to
3144       determine that 'a' is always 9 on that edge.  However, an assertion
3145       on the ELSE clause would be unnecessary.
3146 
3147    4- If BB does not end in a conditional expression, then we recurse
3148       into BB's dominator children.
3149 
3150    At the end of the recursive traversal, every SSA name will have a
3151    list of locations where ASSERT_EXPRs should be added.  When a new
3152    location for name N is found, it is registered by calling
3153    register_new_assert_for.  That function keeps track of all the
3154    registered assertions to prevent adding unnecessary assertions.
3155    For instance, if a pointer P_4 is dereferenced more than once in a
3156    dominator tree, only the location dominating all the dereference of
3157    P_4 will receive an ASSERT_EXPR.  */
3158 
3159 void
find_assert_locations_in_bb(basic_block bb)3160 vrp_asserts::find_assert_locations_in_bb (basic_block bb)
3161 {
3162   gimple *last;
3163 
3164   last = last_stmt (bb);
3165 
3166   /* If BB's last statement is a conditional statement involving integer
3167      operands, determine if we need to add ASSERT_EXPRs.  */
3168   if (last
3169       && gimple_code (last) == GIMPLE_COND
3170       && !fp_predicate (last)
3171       && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3172     find_conditional_asserts (bb, as_a <gcond *> (last));
3173 
3174   /* If BB's last statement is a switch statement involving integer
3175      operands, determine if we need to add ASSERT_EXPRs.  */
3176   if (last
3177       && gimple_code (last) == GIMPLE_SWITCH
3178       && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3179     find_switch_asserts (bb, as_a <gswitch *> (last));
3180 
3181   /* Traverse all the statements in BB marking used names and looking
3182      for statements that may infer assertions for their used operands.  */
3183   for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
3184        gsi_prev (&si))
3185     {
3186       gimple *stmt;
3187       tree op;
3188       ssa_op_iter i;
3189 
3190       stmt = gsi_stmt (si);
3191 
3192       if (is_gimple_debug (stmt))
3193 	continue;
3194 
3195       /* See if we can derive an assertion for any of STMT's operands.  */
3196       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3197 	{
3198 	  tree value;
3199 	  enum tree_code comp_code;
3200 
3201 	  /* If op is not live beyond this stmt, do not bother to insert
3202 	     asserts for it.  */
3203 	  if (!live.live_on_block_p (op, bb))
3204 	    continue;
3205 
3206 	  /* If OP is used in such a way that we can infer a value
3207 	     range for it, and we don't find a previous assertion for
3208 	     it, create a new assertion location node for OP.  */
3209 	  if (infer_value_range (stmt, op, &comp_code, &value))
3210 	    {
3211 	      /* If we are able to infer a nonzero value range for OP,
3212 		 then walk backwards through the use-def chain to see if OP
3213 		 was set via a typecast.
3214 
3215 		 If so, then we can also infer a nonzero value range
3216 		 for the operand of the NOP_EXPR.  */
3217 	      if (comp_code == NE_EXPR && integer_zerop (value))
3218 		{
3219 		  tree t = op;
3220 		  gimple *def_stmt = SSA_NAME_DEF_STMT (t);
3221 
3222 		  while (is_gimple_assign (def_stmt)
3223 			 && CONVERT_EXPR_CODE_P
3224 			     (gimple_assign_rhs_code (def_stmt))
3225 			 && TREE_CODE
3226 			     (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
3227 			 && POINTER_TYPE_P
3228 			     (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
3229 		    {
3230 		      t = gimple_assign_rhs1 (def_stmt);
3231 		      def_stmt = SSA_NAME_DEF_STMT (t);
3232 
3233 		      /* Note we want to register the assert for the
3234 			 operand of the NOP_EXPR after SI, not after the
3235 			 conversion.  */
3236 		      if (live.live_on_block_p (t, bb))
3237 			register_new_assert_for (t, t, comp_code, value,
3238 						 bb, NULL, si);
3239 		    }
3240 		}
3241 
3242 	      register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
3243 	    }
3244 	}
3245 
3246       /* Update live.  */
3247       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3248 	live.set (op, bb);
3249       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
3250 	live.clear (op, bb);
3251     }
3252 
3253   /* Traverse all PHI nodes in BB, updating live.  */
3254   for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3255        gsi_next (&si))
3256     {
3257       use_operand_p arg_p;
3258       ssa_op_iter i;
3259       gphi *phi = si.phi ();
3260       tree res = gimple_phi_result (phi);
3261 
3262       if (virtual_operand_p (res))
3263 	continue;
3264 
3265       FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
3266 	{
3267 	  tree arg = USE_FROM_PTR (arg_p);
3268 	  if (TREE_CODE (arg) == SSA_NAME)
3269 	    live.set (arg, bb);
3270 	}
3271 
3272       live.clear (res, bb);
3273     }
3274 }
3275 
3276 /* Do an RPO walk over the function computing SSA name liveness
3277    on-the-fly and deciding on assert expressions to insert.  */
3278 
3279 void
find_assert_locations(void)3280 vrp_asserts::find_assert_locations (void)
3281 {
3282   int *rpo = XNEWVEC (int, last_basic_block_for_fn (fun));
3283   int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (fun));
3284   int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (fun));
3285   int rpo_cnt, i;
3286 
3287   rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3288   for (i = 0; i < rpo_cnt; ++i)
3289     bb_rpo[rpo[i]] = i;
3290 
3291   /* Pre-seed loop latch liveness from loop header PHI nodes.  Due to
3292      the order we compute liveness and insert asserts we otherwise
3293      fail to insert asserts into the loop latch.  */
3294   for (auto loop : loops_list (cfun, 0))
3295     {
3296       i = loop->latch->index;
3297       unsigned int j = single_succ_edge (loop->latch)->dest_idx;
3298       for (gphi_iterator gsi = gsi_start_phis (loop->header);
3299 	   !gsi_end_p (gsi); gsi_next (&gsi))
3300 	{
3301 	  gphi *phi = gsi.phi ();
3302 	  if (virtual_operand_p (gimple_phi_result (phi)))
3303 	    continue;
3304 	  tree arg = gimple_phi_arg_def (phi, j);
3305 	  if (TREE_CODE (arg) == SSA_NAME)
3306 	    live.set (arg, loop->latch);
3307 	}
3308     }
3309 
3310   for (i = rpo_cnt - 1; i >= 0; --i)
3311     {
3312       basic_block bb = BASIC_BLOCK_FOR_FN (fun, rpo[i]);
3313       edge e;
3314       edge_iterator ei;
3315 
3316       /* Process BB and update the live information with uses in
3317          this block.  */
3318       find_assert_locations_in_bb (bb);
3319 
3320       /* Merge liveness into the predecessor blocks and free it.  */
3321       if (!live.block_has_live_names_p (bb))
3322 	{
3323 	  int pred_rpo = i;
3324 	  FOR_EACH_EDGE (e, ei, bb->preds)
3325 	    {
3326 	      int pred = e->src->index;
3327 	      if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
3328 		continue;
3329 
3330 	      live.merge (e->src, bb);
3331 
3332 	      if (bb_rpo[pred] < pred_rpo)
3333 		pred_rpo = bb_rpo[pred];
3334 	    }
3335 
3336 	  /* Record the RPO number of the last visited block that needs
3337 	     live information from this block.  */
3338 	  last_rpo[rpo[i]] = pred_rpo;
3339 	}
3340       else
3341 	live.clear_block (bb);
3342 
3343       /* We can free all successors live bitmaps if all their
3344          predecessors have been visited already.  */
3345       FOR_EACH_EDGE (e, ei, bb->succs)
3346 	if (last_rpo[e->dest->index] == i)
3347 	  live.clear_block (e->dest);
3348     }
3349 
3350   XDELETEVEC (rpo);
3351   XDELETEVEC (bb_rpo);
3352   XDELETEVEC (last_rpo);
3353 }
3354 
3355 /* Create an ASSERT_EXPR for NAME and insert it in the location
3356    indicated by LOC.  Return true if we made any edge insertions.  */
3357 
3358 bool
process_assert_insertions_for(tree name,assert_locus * loc)3359 vrp_asserts::process_assert_insertions_for (tree name, assert_locus *loc)
3360 {
3361   /* Build the comparison expression NAME_i COMP_CODE VAL.  */
3362   gimple *stmt;
3363   tree cond;
3364   gimple *assert_stmt;
3365   edge_iterator ei;
3366   edge e;
3367 
3368   /* If we have X <=> X do not insert an assert expr for that.  */
3369   if (loc->expr == loc->val)
3370     return false;
3371 
3372   cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
3373   assert_stmt = build_assert_expr_for (cond, name);
3374   if (loc->e)
3375     {
3376       /* We have been asked to insert the assertion on an edge.  This
3377 	 is used only by COND_EXPR and SWITCH_EXPR assertions.  */
3378       gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
3379 			   || (gimple_code (gsi_stmt (loc->si))
3380 			       == GIMPLE_SWITCH));
3381 
3382       gsi_insert_on_edge (loc->e, assert_stmt);
3383       return true;
3384     }
3385 
3386   /* If the stmt iterator points at the end then this is an insertion
3387      at the beginning of a block.  */
3388   if (gsi_end_p (loc->si))
3389     {
3390       gimple_stmt_iterator si = gsi_after_labels (loc->bb);
3391       gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
3392       return false;
3393 
3394     }
3395   /* Otherwise, we can insert right after LOC->SI iff the
3396      statement must not be the last statement in the block.  */
3397   stmt = gsi_stmt (loc->si);
3398   if (!stmt_ends_bb_p (stmt))
3399     {
3400       gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
3401       return false;
3402     }
3403 
3404   /* If STMT must be the last statement in BB, we can only insert new
3405      assertions on the non-abnormal edge out of BB.  Note that since
3406      STMT is not control flow, there may only be one non-abnormal/eh edge
3407      out of BB.  */
3408   FOR_EACH_EDGE (e, ei, loc->bb->succs)
3409     if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
3410       {
3411 	gsi_insert_on_edge (e, assert_stmt);
3412 	return true;
3413       }
3414 
3415   gcc_unreachable ();
3416 }
3417 
3418 /* Qsort helper for sorting assert locations.  If stable is true, don't
3419    use iterative_hash_expr because it can be unstable for -fcompare-debug,
3420    on the other side some pointers might be NULL.  */
3421 
3422 template <bool stable>
3423 int
compare_assert_loc(const void * pa,const void * pb)3424 vrp_asserts::compare_assert_loc (const void *pa, const void *pb)
3425 {
3426   assert_locus * const a = *(assert_locus * const *)pa;
3427   assert_locus * const b = *(assert_locus * const *)pb;
3428 
3429   /* If stable, some asserts might be optimized away already, sort
3430      them last.  */
3431   if (stable)
3432     {
3433       if (a == NULL)
3434 	return b != NULL;
3435       else if (b == NULL)
3436 	return -1;
3437     }
3438 
3439   if (a->e == NULL && b->e != NULL)
3440     return 1;
3441   else if (a->e != NULL && b->e == NULL)
3442     return -1;
3443 
3444   /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
3445      no need to test both a->e and b->e.  */
3446 
3447   /* Sort after destination index.  */
3448   if (a->e == NULL)
3449     ;
3450   else if (a->e->dest->index > b->e->dest->index)
3451     return 1;
3452   else if (a->e->dest->index < b->e->dest->index)
3453     return -1;
3454 
3455   /* Sort after comp_code.  */
3456   if (a->comp_code > b->comp_code)
3457     return 1;
3458   else if (a->comp_code < b->comp_code)
3459     return -1;
3460 
3461   hashval_t ha, hb;
3462 
3463   /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
3464      uses DECL_UID of the VAR_DECL, so sorting might differ between
3465      -g and -g0.  When doing the removal of redundant assert exprs
3466      and commonization to successors, this does not matter, but for
3467      the final sort needs to be stable.  */
3468   if (stable)
3469     {
3470       ha = 0;
3471       hb = 0;
3472     }
3473   else
3474     {
3475       ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
3476       hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
3477     }
3478 
3479   /* Break the tie using hashing and source/bb index.  */
3480   if (ha == hb)
3481     return (a->e != NULL
3482 	    ? a->e->src->index - b->e->src->index
3483 	    : a->bb->index - b->bb->index);
3484   return ha > hb ? 1 : -1;
3485 }
3486 
3487 /* Process all the insertions registered for every name N_i registered
3488    in NEED_ASSERT_FOR.  The list of assertions to be inserted are
3489    found in ASSERTS_FOR[i].  */
3490 
3491 void
process_assert_insertions()3492 vrp_asserts::process_assert_insertions ()
3493 {
3494   unsigned i;
3495   bitmap_iterator bi;
3496   bool update_edges_p = false;
3497   int num_asserts = 0;
3498 
3499   if (dump_file && (dump_flags & TDF_DETAILS))
3500     dump (dump_file);
3501 
3502   EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
3503     {
3504       assert_locus *loc = asserts_for[i];
3505       gcc_assert (loc);
3506 
3507       auto_vec<assert_locus *, 16> asserts;
3508       for (; loc; loc = loc->next)
3509 	asserts.safe_push (loc);
3510       asserts.qsort (compare_assert_loc<false>);
3511 
3512       /* Push down common asserts to successors and remove redundant ones.  */
3513       unsigned ecnt = 0;
3514       assert_locus *common = NULL;
3515       unsigned commonj = 0;
3516       for (unsigned j = 0; j < asserts.length (); ++j)
3517 	{
3518 	  loc = asserts[j];
3519 	  if (! loc->e)
3520 	    common = NULL;
3521 	  else if (! common
3522 		   || loc->e->dest != common->e->dest
3523 		   || loc->comp_code != common->comp_code
3524 		   || ! operand_equal_p (loc->val, common->val, 0)
3525 		   || ! operand_equal_p (loc->expr, common->expr, 0))
3526 	    {
3527 	      commonj = j;
3528 	      common = loc;
3529 	      ecnt = 1;
3530 	    }
3531 	  else if (loc->e == asserts[j-1]->e)
3532 	    {
3533 	      /* Remove duplicate asserts.  */
3534 	      if (commonj == j - 1)
3535 		{
3536 		  commonj = j;
3537 		  common = loc;
3538 		}
3539 	      free (asserts[j-1]);
3540 	      asserts[j-1] = NULL;
3541 	    }
3542 	  else
3543 	    {
3544 	      ecnt++;
3545 	      if (EDGE_COUNT (common->e->dest->preds) == ecnt)
3546 		{
3547 		  /* We have the same assertion on all incoming edges of a BB.
3548 		     Insert it at the beginning of that block.  */
3549 		  loc->bb = loc->e->dest;
3550 		  loc->e = NULL;
3551 		  loc->si = gsi_none ();
3552 		  common = NULL;
3553 		  /* Clear asserts commoned.  */
3554 		  for (; commonj != j; ++commonj)
3555 		    if (asserts[commonj])
3556 		      {
3557 			free (asserts[commonj]);
3558 			asserts[commonj] = NULL;
3559 		      }
3560 		}
3561 	    }
3562 	}
3563 
3564       /* The asserts vector sorting above might be unstable for
3565 	 -fcompare-debug, sort again to ensure a stable sort.  */
3566       asserts.qsort (compare_assert_loc<true>);
3567       for (unsigned j = 0; j < asserts.length (); ++j)
3568 	{
3569 	  loc = asserts[j];
3570 	  if (! loc)
3571 	    break;
3572 	  update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
3573 	  num_asserts++;
3574 	  free (loc);
3575 	}
3576     }
3577 
3578   if (update_edges_p)
3579     gsi_commit_edge_inserts ();
3580 
3581   statistics_counter_event (fun, "Number of ASSERT_EXPR expressions inserted",
3582 			    num_asserts);
3583 }
3584 
3585 /* Traverse the flowgraph looking for conditional jumps to insert range
3586    expressions.  These range expressions are meant to provide information
3587    to optimizations that need to reason in terms of value ranges.  They
3588    will not be expanded into RTL.  For instance, given:
3589 
3590    x = ...
3591    y = ...
3592    if (x < y)
3593      y = x - 2;
3594    else
3595      x = y + 3;
3596 
3597    this pass will transform the code into:
3598 
3599    x = ...
3600    y = ...
3601    if (x < y)
3602     {
3603       x = ASSERT_EXPR <x, x < y>
3604       y = x - 2
3605     }
3606    else
3607     {
3608       y = ASSERT_EXPR <y, x >= y>
3609       x = y + 3
3610     }
3611 
3612    The idea is that once copy and constant propagation have run, other
3613    optimizations will be able to determine what ranges of values can 'x'
3614    take in different paths of the code, simply by checking the reaching
3615    definition of 'x'.  */
3616 
3617 void
insert_range_assertions(void)3618 vrp_asserts::insert_range_assertions (void)
3619 {
3620   need_assert_for = BITMAP_ALLOC (NULL);
3621   asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
3622 
3623   calculate_dominance_info (CDI_DOMINATORS);
3624 
3625   find_assert_locations ();
3626   if (!bitmap_empty_p (need_assert_for))
3627     {
3628       process_assert_insertions ();
3629       update_ssa (TODO_update_ssa_no_phi);
3630     }
3631 
3632   if (dump_file && (dump_flags & TDF_DETAILS))
3633     {
3634       fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
3635       dump_function_to_file (current_function_decl, dump_file, dump_flags);
3636     }
3637 
3638   free (asserts_for);
3639   BITMAP_FREE (need_assert_for);
3640 }
3641 
3642 /* Return true if all imm uses of VAR are either in STMT, or
3643    feed (optionally through a chain of single imm uses) GIMPLE_COND
3644    in basic block COND_BB.  */
3645 
3646 bool
all_imm_uses_in_stmt_or_feed_cond(tree var,gimple * stmt,basic_block cond_bb)3647 vrp_asserts::all_imm_uses_in_stmt_or_feed_cond (tree var,
3648 						gimple *stmt,
3649 						basic_block cond_bb)
3650 {
3651   use_operand_p use_p, use2_p;
3652   imm_use_iterator iter;
3653 
3654   FOR_EACH_IMM_USE_FAST (use_p, iter, var)
3655     if (USE_STMT (use_p) != stmt)
3656       {
3657 	gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
3658 	if (is_gimple_debug (use_stmt))
3659 	  continue;
3660 	while (is_gimple_assign (use_stmt)
3661 	       && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
3662 	       && single_imm_use (gimple_assign_lhs (use_stmt),
3663 				  &use2_p, &use_stmt2))
3664 	  use_stmt = use_stmt2;
3665 	if (gimple_code (use_stmt) != GIMPLE_COND
3666 	    || gimple_bb (use_stmt) != cond_bb)
3667 	  return false;
3668       }
3669   return true;
3670 }
3671 
3672 /* Convert range assertion expressions into the implied copies and
3673    copy propagate away the copies.  Doing the trivial copy propagation
3674    here avoids the need to run the full copy propagation pass after
3675    VRP.
3676 
3677    FIXME, this will eventually lead to copy propagation removing the
3678    names that had useful range information attached to them.  For
3679    instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
3680    then N_i will have the range [3, +INF].
3681 
3682    However, by converting the assertion into the implied copy
3683    operation N_i = N_j, we will then copy-propagate N_j into the uses
3684    of N_i and lose the range information.
3685 
3686    The problem with keeping ASSERT_EXPRs around is that passes after
3687    VRP need to handle them appropriately.
3688 
3689    Another approach would be to make the range information a first
3690    class property of the SSA_NAME so that it can be queried from
3691    any pass.  This is made somewhat more complex by the need for
3692    multiple ranges to be associated with one SSA_NAME.  */
3693 
3694 void
remove_range_assertions()3695 vrp_asserts::remove_range_assertions ()
3696 {
3697   basic_block bb;
3698   gimple_stmt_iterator si;
3699   /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
3700      a basic block preceeded by GIMPLE_COND branching to it and
3701      __builtin_trap, -1 if not yet checked, 0 otherwise.  */
3702   int is_unreachable;
3703 
3704   /* Note that the BSI iterator bump happens at the bottom of the
3705      loop and no bump is necessary if we're removing the statement
3706      referenced by the current BSI.  */
3707   FOR_EACH_BB_FN (bb, fun)
3708     for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
3709       {
3710 	gimple *stmt = gsi_stmt (si);
3711 
3712 	if (is_gimple_assign (stmt)
3713 	    && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
3714 	  {
3715 	    tree lhs = gimple_assign_lhs (stmt);
3716 	    tree rhs = gimple_assign_rhs1 (stmt);
3717 	    tree var;
3718 
3719 	    var = ASSERT_EXPR_VAR (rhs);
3720 
3721 	    if (TREE_CODE (var) == SSA_NAME
3722 		&& !POINTER_TYPE_P (TREE_TYPE (lhs))
3723 		&& SSA_NAME_RANGE_INFO (lhs))
3724 	      {
3725 		if (is_unreachable == -1)
3726 		  {
3727 		    is_unreachable = 0;
3728 		    if (single_pred_p (bb)
3729 			&& assert_unreachable_fallthru_edge_p
3730 						    (single_pred_edge (bb)))
3731 		      is_unreachable = 1;
3732 		  }
3733 		/* Handle
3734 		   if (x_7 >= 10 && x_7 < 20)
3735 		     __builtin_unreachable ();
3736 		   x_8 = ASSERT_EXPR <x_7, ...>;
3737 		   if the only uses of x_7 are in the ASSERT_EXPR and
3738 		   in the condition.  In that case, we can copy the
3739 		   range info from x_8 computed in this pass also
3740 		   for x_7.  */
3741 		if (is_unreachable
3742 		    && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
3743 							  single_pred (bb)))
3744 		  {
3745 		    set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
3746 				    SSA_NAME_RANGE_INFO (lhs)->get_min (),
3747 				    SSA_NAME_RANGE_INFO (lhs)->get_max ());
3748 		    maybe_set_nonzero_bits (single_pred_edge (bb), var);
3749 		  }
3750 	      }
3751 
3752 	    /* Propagate the RHS into every use of the LHS.  For SSA names
3753 	       also propagate abnormals as it merely restores the original
3754 	       IL in this case (an replace_uses_by would assert).  */
3755 	    if (TREE_CODE (var) == SSA_NAME)
3756 	      {
3757 		imm_use_iterator iter;
3758 		use_operand_p use_p;
3759 		gimple *use_stmt;
3760 		FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3761 		  FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3762 		    SET_USE (use_p, var);
3763 	      }
3764 	    else
3765 	      replace_uses_by (lhs, var);
3766 
3767 	    /* And finally, remove the copy, it is not needed.  */
3768 	    gsi_remove (&si, true);
3769 	    release_defs (stmt);
3770 	  }
3771 	else
3772 	  {
3773 	    if (!is_gimple_debug (gsi_stmt (si)))
3774 	      is_unreachable = 0;
3775 	    gsi_next (&si);
3776 	  }
3777       }
3778 }
3779 
3780 class vrp_prop : public ssa_propagation_engine
3781 {
3782 public:
vrp_prop(vr_values * v)3783   vrp_prop (vr_values *v)
3784     : ssa_propagation_engine (),
3785       m_vr_values (v) { }
3786 
3787   void initialize (struct function *);
3788   void finalize ();
3789 
3790 private:
3791   enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
3792   enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
3793 
3794   struct function *fun;
3795   vr_values *m_vr_values;
3796 };
3797 
3798 /* Initialization required by ssa_propagate engine.  */
3799 
3800 void
initialize(struct function * fn)3801 vrp_prop::initialize (struct function *fn)
3802 {
3803   basic_block bb;
3804   fun = fn;
3805 
3806   FOR_EACH_BB_FN (bb, fun)
3807     {
3808       for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3809 	   gsi_next (&si))
3810 	{
3811 	  gphi *phi = si.phi ();
3812 	  if (!stmt_interesting_for_vrp (phi))
3813 	    {
3814 	      tree lhs = PHI_RESULT (phi);
3815 	      m_vr_values->set_def_to_varying (lhs);
3816 	      prop_set_simulate_again (phi, false);
3817 	    }
3818 	  else
3819 	    prop_set_simulate_again (phi, true);
3820 	}
3821 
3822       for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
3823 	   gsi_next (&si))
3824         {
3825 	  gimple *stmt = gsi_stmt (si);
3826 
3827  	  /* If the statement is a control insn, then we do not
3828  	     want to avoid simulating the statement once.  Failure
3829  	     to do so means that those edges will never get added.  */
3830 	  if (stmt_ends_bb_p (stmt))
3831 	    prop_set_simulate_again (stmt, true);
3832 	  else if (!stmt_interesting_for_vrp (stmt))
3833 	    {
3834 	      m_vr_values->set_defs_to_varying (stmt);
3835 	      prop_set_simulate_again (stmt, false);
3836 	    }
3837 	  else
3838 	    prop_set_simulate_again (stmt, true);
3839 	}
3840     }
3841 }
3842 
3843 /* Evaluate statement STMT.  If the statement produces a useful range,
3844    return SSA_PROP_INTERESTING and record the SSA name with the
3845    interesting range into *OUTPUT_P.
3846 
3847    If STMT is a conditional branch and we can determine its truth
3848    value, the taken edge is recorded in *TAKEN_EDGE_P.
3849 
3850    If STMT produces a varying value, return SSA_PROP_VARYING.  */
3851 
3852 enum ssa_prop_result
visit_stmt(gimple * stmt,edge * taken_edge_p,tree * output_p)3853 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
3854 {
3855   tree lhs = gimple_get_lhs (stmt);
3856   value_range_equiv vr;
3857   m_vr_values->extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
3858 
3859   if (*output_p)
3860     {
3861       if (m_vr_values->update_value_range (*output_p, &vr))
3862 	{
3863 	  if (dump_file && (dump_flags & TDF_DETAILS))
3864 	    {
3865 	      fprintf (dump_file, "Found new range for ");
3866 	      print_generic_expr (dump_file, *output_p);
3867 	      fprintf (dump_file, ": ");
3868 	      dump_value_range (dump_file, &vr);
3869 	      fprintf (dump_file, "\n");
3870 	    }
3871 
3872 	  if (vr.varying_p ())
3873 	    return SSA_PROP_VARYING;
3874 
3875 	  return SSA_PROP_INTERESTING;
3876 	}
3877       return SSA_PROP_NOT_INTERESTING;
3878     }
3879 
3880   if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
3881     switch (gimple_call_internal_fn (stmt))
3882       {
3883       case IFN_ADD_OVERFLOW:
3884       case IFN_SUB_OVERFLOW:
3885       case IFN_MUL_OVERFLOW:
3886       case IFN_ATOMIC_COMPARE_EXCHANGE:
3887 	/* These internal calls return _Complex integer type,
3888 	   which VRP does not track, but the immediate uses
3889 	   thereof might be interesting.  */
3890 	if (lhs && TREE_CODE (lhs) == SSA_NAME)
3891 	  {
3892 	    imm_use_iterator iter;
3893 	    use_operand_p use_p;
3894 	    enum ssa_prop_result res = SSA_PROP_VARYING;
3895 
3896 	    m_vr_values->set_def_to_varying (lhs);
3897 
3898 	    FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
3899 	      {
3900 		gimple *use_stmt = USE_STMT (use_p);
3901 		if (!is_gimple_assign (use_stmt))
3902 		  continue;
3903 		enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
3904 		if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
3905 		  continue;
3906 		tree rhs1 = gimple_assign_rhs1 (use_stmt);
3907 		tree use_lhs = gimple_assign_lhs (use_stmt);
3908 		if (TREE_CODE (rhs1) != rhs_code
3909 		    || TREE_OPERAND (rhs1, 0) != lhs
3910 		    || TREE_CODE (use_lhs) != SSA_NAME
3911 		    || !stmt_interesting_for_vrp (use_stmt)
3912 		    || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
3913 			|| !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
3914 			|| !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
3915 		  continue;
3916 
3917 		/* If there is a change in the value range for any of the
3918 		   REALPART_EXPR/IMAGPART_EXPR immediate uses, return
3919 		   SSA_PROP_INTERESTING.  If there are any REALPART_EXPR
3920 		   or IMAGPART_EXPR immediate uses, but none of them have
3921 		   a change in their value ranges, return
3922 		   SSA_PROP_NOT_INTERESTING.  If there are no
3923 		   {REAL,IMAG}PART_EXPR uses at all,
3924 		   return SSA_PROP_VARYING.  */
3925 		value_range_equiv new_vr;
3926 		m_vr_values->extract_range_basic (&new_vr, use_stmt);
3927 		const value_range_equiv *old_vr
3928 		  = m_vr_values->get_value_range (use_lhs);
3929 		if (!old_vr->equal_p (new_vr, /*ignore_equivs=*/false))
3930 		  res = SSA_PROP_INTERESTING;
3931 		else
3932 		  res = SSA_PROP_NOT_INTERESTING;
3933 		new_vr.equiv_clear ();
3934 		if (res == SSA_PROP_INTERESTING)
3935 		  {
3936 		    *output_p = lhs;
3937 		    return res;
3938 		  }
3939 	      }
3940 
3941 	    return res;
3942 	  }
3943 	break;
3944       default:
3945 	break;
3946       }
3947 
3948   /* All other statements produce nothing of interest for VRP, so mark
3949      their outputs varying and prevent further simulation.  */
3950   m_vr_values->set_defs_to_varying (stmt);
3951 
3952   return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
3953 }
3954 
3955 /* Visit all arguments for PHI node PHI that flow through executable
3956    edges.  If a valid value range can be derived from all the incoming
3957    value ranges, set a new range for the LHS of PHI.  */
3958 
3959 enum ssa_prop_result
visit_phi(gphi * phi)3960 vrp_prop::visit_phi (gphi *phi)
3961 {
3962   tree lhs = PHI_RESULT (phi);
3963   value_range_equiv vr_result;
3964   m_vr_values->extract_range_from_phi_node (phi, &vr_result);
3965   if (m_vr_values->update_value_range (lhs, &vr_result))
3966     {
3967       if (dump_file && (dump_flags & TDF_DETAILS))
3968 	{
3969 	  fprintf (dump_file, "Found new range for ");
3970 	  print_generic_expr (dump_file, lhs);
3971 	  fprintf (dump_file, ": ");
3972 	  dump_value_range (dump_file, &vr_result);
3973 	  fprintf (dump_file, "\n");
3974 	}
3975 
3976       if (vr_result.varying_p ())
3977 	return SSA_PROP_VARYING;
3978 
3979       return SSA_PROP_INTERESTING;
3980     }
3981 
3982   /* Nothing changed, don't add outgoing edges.  */
3983   return SSA_PROP_NOT_INTERESTING;
3984 }
3985 
3986 /* Traverse all the blocks folding conditionals with known ranges.  */
3987 
3988 void
finalize()3989 vrp_prop::finalize ()
3990 {
3991   size_t i;
3992 
3993   /* We have completed propagating through the lattice.  */
3994   m_vr_values->set_lattice_propagation_complete ();
3995 
3996   if (dump_file)
3997     {
3998       fprintf (dump_file, "\nValue ranges after VRP:\n\n");
3999       m_vr_values->dump (dump_file);
4000       fprintf (dump_file, "\n");
4001     }
4002 
4003   /* Set value range to non pointer SSA_NAMEs.  */
4004   for (i = 0; i < num_ssa_names; i++)
4005     {
4006       tree name = ssa_name (i);
4007       if (!name)
4008 	continue;
4009 
4010       const value_range_equiv *vr = m_vr_values->get_value_range (name);
4011       if (!name || vr->varying_p () || !vr->constant_p ())
4012 	continue;
4013 
4014       if (POINTER_TYPE_P (TREE_TYPE (name))
4015 	  && range_includes_zero_p (vr) == 0)
4016 	set_ptr_nonnull (name);
4017       else if (!POINTER_TYPE_P (TREE_TYPE (name)))
4018 	set_range_info (name, *vr);
4019     }
4020 }
4021 
4022 class vrp_folder : public substitute_and_fold_engine
4023 {
4024  public:
vrp_folder(vr_values * v)4025   vrp_folder (vr_values *v)
4026     : substitute_and_fold_engine (/* Fold all stmts.  */ true),
4027       m_vr_values (v), simplifier (v)
4028     {  }
4029   void simplify_casted_conds (function *fun);
4030 
4031 private:
value_of_expr(tree name,gimple * stmt)4032   tree value_of_expr (tree name, gimple *stmt) OVERRIDE
4033     {
4034       return m_vr_values->value_of_expr (name, stmt);
4035     }
4036   bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
4037   bool fold_predicate_in (gimple_stmt_iterator *);
4038 
4039   vr_values *m_vr_values;
4040   simplify_using_ranges simplifier;
4041 };
4042 
4043 /* If the statement pointed by SI has a predicate whose value can be
4044    computed using the value range information computed by VRP, compute
4045    its value and return true.  Otherwise, return false.  */
4046 
4047 bool
fold_predicate_in(gimple_stmt_iterator * si)4048 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
4049 {
4050   bool assignment_p = false;
4051   tree val;
4052   gimple *stmt = gsi_stmt (*si);
4053 
4054   if (is_gimple_assign (stmt)
4055       && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
4056     {
4057       assignment_p = true;
4058       val = simplifier.vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
4059 						 gimple_assign_rhs1 (stmt),
4060 						 gimple_assign_rhs2 (stmt),
4061 						 stmt);
4062     }
4063   else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
4064     val = simplifier.vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
4065 					       gimple_cond_lhs (cond_stmt),
4066 					       gimple_cond_rhs (cond_stmt),
4067 					       stmt);
4068   else
4069     return false;
4070 
4071   if (val)
4072     {
4073       if (assignment_p)
4074 	val = fold_convert (TREE_TYPE (gimple_assign_lhs (stmt)), val);
4075 
4076       if (dump_file)
4077 	{
4078 	  fprintf (dump_file, "Folding predicate ");
4079 	  print_gimple_expr (dump_file, stmt, 0);
4080 	  fprintf (dump_file, " to ");
4081 	  print_generic_expr (dump_file, val);
4082 	  fprintf (dump_file, "\n");
4083 	}
4084 
4085       if (is_gimple_assign (stmt))
4086 	gimple_assign_set_rhs_from_tree (si, val);
4087       else
4088 	{
4089 	  gcc_assert (gimple_code (stmt) == GIMPLE_COND);
4090 	  gcond *cond_stmt = as_a <gcond *> (stmt);
4091 	  if (integer_zerop (val))
4092 	    gimple_cond_make_false (cond_stmt);
4093 	  else if (integer_onep (val))
4094 	    gimple_cond_make_true (cond_stmt);
4095 	  else
4096 	    gcc_unreachable ();
4097 	}
4098 
4099       return true;
4100     }
4101 
4102   return false;
4103 }
4104 
4105 /* Callback for substitute_and_fold folding the stmt at *SI.  */
4106 
4107 bool
fold_stmt(gimple_stmt_iterator * si)4108 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
4109 {
4110   if (fold_predicate_in (si))
4111     return true;
4112 
4113   return simplifier.simplify (si);
4114 }
4115 
4116 /* A comparison of an SSA_NAME against a constant where the SSA_NAME
4117    was set by a type conversion can often be rewritten to use the RHS
4118    of the type conversion.  Do this optimization for all conditionals
4119    in FUN.  */
4120 
4121 void
simplify_casted_conds(function * fun)4122 vrp_folder::simplify_casted_conds (function *fun)
4123 {
4124   basic_block bb;
4125   FOR_EACH_BB_FN (bb, fun)
4126     {
4127       gimple *last = last_stmt (bb);
4128       if (last && gimple_code (last) == GIMPLE_COND)
4129 	{
4130 	  if (simplifier.simplify_casted_cond (as_a <gcond *> (last)))
4131 	    {
4132 	      if (dump_file && (dump_flags & TDF_DETAILS))
4133 		{
4134 		  fprintf (dump_file, "Folded into: ");
4135 		  print_gimple_stmt (dump_file, last, 0, TDF_SLIM);
4136 		  fprintf (dump_file, "\n");
4137 		}
4138 	    }
4139 	}
4140     }
4141 }
4142 
4143 /* Main entry point to VRP (Value Range Propagation).  This pass is
4144    loosely based on J. R. C. Patterson, ``Accurate Static Branch
4145    Prediction by Value Range Propagation,'' in SIGPLAN Conference on
4146    Programming Language Design and Implementation, pp. 67-78, 1995.
4147    Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
4148 
4149    This is essentially an SSA-CCP pass modified to deal with ranges
4150    instead of constants.
4151 
4152    While propagating ranges, we may find that two or more SSA name
4153    have equivalent, though distinct ranges.  For instance,
4154 
4155      1	x_9 = p_3->a;
4156      2	p_4 = ASSERT_EXPR <p_3, p_3 != 0>
4157      3	if (p_4 == q_2)
4158      4	  p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
4159      5	endif
4160      6	if (q_2)
4161 
4162    In the code above, pointer p_5 has range [q_2, q_2], but from the
4163    code we can also determine that p_5 cannot be NULL and, if q_2 had
4164    a non-varying range, p_5's range should also be compatible with it.
4165 
4166    These equivalences are created by two expressions: ASSERT_EXPR and
4167    copy operations.  Since p_5 is an assertion on p_4, and p_4 was the
4168    result of another assertion, then we can use the fact that p_5 and
4169    p_4 are equivalent when evaluating p_5's range.
4170 
4171    Together with value ranges, we also propagate these equivalences
4172    between names so that we can take advantage of information from
4173    multiple ranges when doing final replacement.  Note that this
4174    equivalency relation is transitive but not symmetric.
4175 
4176    In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
4177    cannot assert that q_2 is equivalent to p_5 because q_2 may be used
4178    in contexts where that assertion does not hold (e.g., in line 6).
4179 
4180    TODO, the main difference between this pass and Patterson's is that
4181    we do not propagate edge probabilities.  We only compute whether
4182    edges can be taken or not.  That is, instead of having a spectrum
4183    of jump probabilities between 0 and 1, we only deal with 0, 1 and
4184    DON'T KNOW.  In the future, it may be worthwhile to propagate
4185    probabilities to aid branch prediction.  */
4186 
4187 static unsigned int
execute_vrp(struct function * fun,bool warn_array_bounds_p)4188 execute_vrp (struct function *fun, bool warn_array_bounds_p)
4189 {
4190   loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
4191   rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
4192   scev_initialize ();
4193 
4194   /* ???  This ends up using stale EDGE_DFS_BACK for liveness computation.
4195      Inserting assertions may split edges which will invalidate
4196      EDGE_DFS_BACK.  */
4197   vrp_asserts assert_engine (fun);
4198   assert_engine.insert_range_assertions ();
4199 
4200   /* For visiting PHI nodes we need EDGE_DFS_BACK computed.  */
4201   mark_dfs_back_edges ();
4202 
4203   vr_values vrp_vr_values;
4204 
4205   class vrp_prop vrp_prop (&vrp_vr_values);
4206   vrp_prop.initialize (fun);
4207   vrp_prop.ssa_propagate ();
4208 
4209   /* Instantiate the folder here, so that edge cleanups happen at the
4210      end of this function.  */
4211   vrp_folder folder (&vrp_vr_values);
4212   vrp_prop.finalize ();
4213 
4214   /* If we're checking array refs, we want to merge information on
4215      the executability of each edge between vrp_folder and the
4216      check_array_bounds_dom_walker: each can clear the
4217      EDGE_EXECUTABLE flag on edges, in different ways.
4218 
4219      Hence, if we're going to call check_all_array_refs, set
4220      the flag on every edge now, rather than in
4221      check_array_bounds_dom_walker's ctor; vrp_folder may clear
4222      it from some edges.  */
4223   if (warn_array_bounds && warn_array_bounds_p)
4224     set_all_edges_as_executable (fun);
4225 
4226   folder.substitute_and_fold ();
4227 
4228   if (warn_array_bounds && warn_array_bounds_p)
4229     {
4230       array_bounds_checker array_checker (fun, &vrp_vr_values);
4231       array_checker.check ();
4232     }
4233 
4234   folder.simplify_casted_conds (fun);
4235 
4236   free_numbers_of_iterations_estimates (fun);
4237 
4238   assert_engine.remove_range_assertions ();
4239 
4240   scev_finalize ();
4241   loop_optimizer_finalize ();
4242   return 0;
4243 }
4244 
4245 // This is a ranger based folder which continues to use the dominator
4246 // walk to access the substitute and fold machinery.  Ranges are calculated
4247 // on demand.
4248 
4249 class rvrp_folder : public substitute_and_fold_engine
4250 {
4251 public:
4252 
rvrp_folder(gimple_ranger * r)4253   rvrp_folder (gimple_ranger *r) : substitute_and_fold_engine (),
4254 				   m_simplifier (r, r->non_executable_edge_flag)
4255   {
4256     m_ranger = r;
4257     m_pta = new pointer_equiv_analyzer (m_ranger);
4258   }
4259 
~rvrp_folder()4260   ~rvrp_folder ()
4261   {
4262     delete m_pta;
4263   }
4264 
value_of_expr(tree name,gimple * s=NULL)4265   tree value_of_expr (tree name, gimple *s = NULL) OVERRIDE
4266   {
4267     // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
4268     if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4269       return NULL;
4270     tree ret = m_ranger->value_of_expr (name, s);
4271     if (!ret && supported_pointer_equiv_p (name))
4272       ret = m_pta->get_equiv (name);
4273     return ret;
4274   }
4275 
value_on_edge(edge e,tree name)4276   tree value_on_edge (edge e, tree name) OVERRIDE
4277   {
4278     // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
4279     if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4280       return NULL;
4281     tree ret = m_ranger->value_on_edge (e, name);
4282     if (!ret && supported_pointer_equiv_p (name))
4283       ret = m_pta->get_equiv (name);
4284     return ret;
4285   }
4286 
value_of_stmt(gimple * s,tree name=NULL)4287   tree value_of_stmt (gimple *s, tree name = NULL) OVERRIDE
4288   {
4289     // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
4290     if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4291       return NULL;
4292     return m_ranger->value_of_stmt (s, name);
4293   }
4294 
pre_fold_bb(basic_block bb)4295   void pre_fold_bb (basic_block bb) OVERRIDE
4296   {
4297     m_pta->enter (bb);
4298   }
4299 
post_fold_bb(basic_block bb)4300   void post_fold_bb (basic_block bb) OVERRIDE
4301   {
4302     m_pta->leave (bb);
4303   }
4304 
pre_fold_stmt(gimple * stmt)4305   void pre_fold_stmt (gimple *stmt) OVERRIDE
4306   {
4307     m_pta->visit_stmt (stmt);
4308   }
4309 
fold_stmt(gimple_stmt_iterator * gsi)4310   bool fold_stmt (gimple_stmt_iterator *gsi) OVERRIDE
4311   {
4312     bool ret = m_simplifier.simplify (gsi);
4313     if (!ret)
4314       ret = m_ranger->fold_stmt (gsi, follow_single_use_edges);
4315     m_ranger->register_side_effects (gsi_stmt (*gsi));
4316     return ret;
4317   }
4318 
4319 private:
4320   DISABLE_COPY_AND_ASSIGN (rvrp_folder);
4321   gimple_ranger *m_ranger;
4322   simplify_using_ranges m_simplifier;
4323   pointer_equiv_analyzer *m_pta;
4324 };
4325 
4326 /* Main entry point for a VRP pass using just ranger. This can be called
4327   from anywhere to perform a VRP pass, including from EVRP.  */
4328 
4329 unsigned int
execute_ranger_vrp(struct function * fun,bool warn_array_bounds_p)4330 execute_ranger_vrp (struct function *fun, bool warn_array_bounds_p)
4331 {
4332   loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
4333   rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
4334   scev_initialize ();
4335   calculate_dominance_info (CDI_DOMINATORS);
4336 
4337   set_all_edges_as_executable (fun);
4338   gimple_ranger *ranger = enable_ranger (fun);
4339   rvrp_folder folder (ranger);
4340   folder.substitute_and_fold ();
4341   ranger->export_global_ranges ();
4342   if (dump_file && (dump_flags & TDF_DETAILS))
4343     ranger->dump (dump_file);
4344 
4345   if (warn_array_bounds && warn_array_bounds_p)
4346     {
4347       // Set all edges as executable, except those ranger says aren't.
4348       int non_exec_flag = ranger->non_executable_edge_flag;
4349       basic_block bb;
4350       FOR_ALL_BB_FN (bb, fun)
4351 	{
4352 	  edge_iterator ei;
4353 	  edge e;
4354 	  FOR_EACH_EDGE (e, ei, bb->succs)
4355 	    if (e->flags & non_exec_flag)
4356 	      e->flags &= ~EDGE_EXECUTABLE;
4357 	    else
4358 	      e->flags |= EDGE_EXECUTABLE;
4359 	}
4360       scev_reset ();
4361       array_bounds_checker array_checker (fun, ranger);
4362       array_checker.check ();
4363     }
4364 
4365   disable_ranger (fun);
4366   scev_finalize ();
4367   loop_optimizer_finalize ();
4368   return 0;
4369 }
4370 
4371 namespace {
4372 
4373 const pass_data pass_data_vrp =
4374 {
4375   GIMPLE_PASS, /* type */
4376   "vrp", /* name */
4377   OPTGROUP_NONE, /* optinfo_flags */
4378   TV_TREE_VRP, /* tv_id */
4379   PROP_ssa, /* properties_required */
4380   0, /* properties_provided */
4381   0, /* properties_destroyed */
4382   0, /* todo_flags_start */
4383   ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
4384 };
4385 
4386 static int vrp_pass_num = 0;
4387 class pass_vrp : public gimple_opt_pass
4388 {
4389 public:
pass_vrp(gcc::context * ctxt)4390   pass_vrp (gcc::context *ctxt)
4391     : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false),
4392       my_pass (++vrp_pass_num)
4393   {}
4394 
4395   /* opt_pass methods: */
clone()4396   opt_pass * clone () { return new pass_vrp (m_ctxt); }
set_pass_param(unsigned int n,bool param)4397   void set_pass_param (unsigned int n, bool param)
4398     {
4399       gcc_assert (n == 0);
4400       warn_array_bounds_p = param;
4401     }
gate(function *)4402   virtual bool gate (function *) { return flag_tree_vrp != 0; }
execute(function * fun)4403   virtual unsigned int execute (function *fun)
4404     {
4405       if ((my_pass == 1 && param_vrp1_mode == VRP_MODE_RANGER)
4406 	  || (my_pass == 2 && param_vrp2_mode == VRP_MODE_RANGER))
4407 	return execute_ranger_vrp (fun, warn_array_bounds_p);
4408       return execute_vrp (fun, warn_array_bounds_p);
4409     }
4410 
4411  private:
4412   bool warn_array_bounds_p;
4413   int my_pass;
4414 }; // class pass_vrp
4415 
4416 } // anon namespace
4417 
4418 gimple_opt_pass *
make_pass_vrp(gcc::context * ctxt)4419 make_pass_vrp (gcc::context *ctxt)
4420 {
4421   return new pass_vrp (ctxt);
4422 }
4423