xref: /dflybsd-src/contrib/gcc-8.0/gcc/tree-vrp.c (revision fb3c2c0caaaa0e5d800e088b7dd7983e9ab009ec)
1 /* Support routines for Value Range Propagation (VRP).
2    Copyright (C) 2005-2018 Free Software Foundation, Inc.
3    Contributed by Diego Novillo <dnovillo@redhat.com>.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11 
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 GNU General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
44 #include "tree-cfg.h"
45 #include "tree-dfa.h"
46 #include "tree-ssa-loop-manip.h"
47 #include "tree-ssa-loop-niter.h"
48 #include "tree-ssa-loop.h"
49 #include "tree-into-ssa.h"
50 #include "tree-ssa.h"
51 #include "intl.h"
52 #include "cfgloop.h"
53 #include "tree-scalar-evolution.h"
54 #include "tree-ssa-propagate.h"
55 #include "tree-chrec.h"
56 #include "tree-ssa-threadupdate.h"
57 #include "tree-ssa-scopedtables.h"
58 #include "tree-ssa-threadedge.h"
59 #include "omp-general.h"
60 #include "target.h"
61 #include "case-cfn-macros.h"
62 #include "params.h"
63 #include "alloc-pool.h"
64 #include "domwalk.h"
65 #include "tree-cfgcleanup.h"
66 #include "stringpool.h"
67 #include "attribs.h"
68 #include "vr-values.h"
69 #include "builtins.h"
70 
71 /* Set of SSA names found live during the RPO traversal of the function
72    for still active basic-blocks.  */
73 static sbitmap *live;
74 
75 /* Return true if the SSA name NAME is live on the edge E.  */
76 
77 static bool
78 live_on_edge (edge e, tree name)
79 {
80   return (live[e->dest->index]
81 	  && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
82 }
83 
84 /* Location information for ASSERT_EXPRs.  Each instance of this
85    structure describes an ASSERT_EXPR for an SSA name.  Since a single
86    SSA name may have more than one assertion associated with it, these
87    locations are kept in a linked list attached to the corresponding
88    SSA name.  */
89 struct assert_locus
90 {
91   /* Basic block where the assertion would be inserted.  */
92   basic_block bb;
93 
94   /* Some assertions need to be inserted on an edge (e.g., assertions
95      generated by COND_EXPRs).  In those cases, BB will be NULL.  */
96   edge e;
97 
98   /* Pointer to the statement that generated this assertion.  */
99   gimple_stmt_iterator si;
100 
101   /* Predicate code for the ASSERT_EXPR.  Must be COMPARISON_CLASS_P.  */
102   enum tree_code comp_code;
103 
104   /* Value being compared against.  */
105   tree val;
106 
107   /* Expression to compare.  */
108   tree expr;
109 
110   /* Next node in the linked list.  */
111   assert_locus *next;
112 };
113 
114 /* If bit I is present, it means that SSA name N_i has a list of
115    assertions that should be inserted in the IL.  */
116 static bitmap need_assert_for;
117 
118 /* Array of locations lists where to insert assertions.  ASSERTS_FOR[I]
119    holds a list of ASSERT_LOCUS_T nodes that describe where
120    ASSERT_EXPRs for SSA name N_I should be inserted.  */
121 static assert_locus **asserts_for;
122 
123 vec<edge> to_remove_edges;
124 vec<switch_update> to_update_switch_stmts;
125 
126 
127 /* Return the maximum value for TYPE.  */
128 
129 tree
130 vrp_val_max (const_tree type)
131 {
132   if (!INTEGRAL_TYPE_P (type))
133     return NULL_TREE;
134 
135   return TYPE_MAX_VALUE (type);
136 }
137 
138 /* Return the minimum value for TYPE.  */
139 
140 tree
141 vrp_val_min (const_tree type)
142 {
143   if (!INTEGRAL_TYPE_P (type))
144     return NULL_TREE;
145 
146   return TYPE_MIN_VALUE (type);
147 }
148 
149 /* Return whether VAL is equal to the maximum value of its type.
150    We can't do a simple equality comparison with TYPE_MAX_VALUE because
151    C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE
152    is not == to the integer constant with the same value in the type.  */
153 
154 bool
155 vrp_val_is_max (const_tree val)
156 {
157   tree type_max = vrp_val_max (TREE_TYPE (val));
158   return (val == type_max
159 	  || (type_max != NULL_TREE
160 	      && operand_equal_p (val, type_max, 0)));
161 }
162 
163 /* Return whether VAL is equal to the minimum value of its type.  */
164 
165 bool
166 vrp_val_is_min (const_tree val)
167 {
168   tree type_min = vrp_val_min (TREE_TYPE (val));
169   return (val == type_min
170 	  || (type_min != NULL_TREE
171 	      && operand_equal_p (val, type_min, 0)));
172 }
173 
174 /* VR_TYPE describes a range with mininum value *MIN and maximum
175    value *MAX.  Restrict the range to the set of values that have
176    no bits set outside NONZERO_BITS.  Update *MIN and *MAX and
177    return the new range type.
178 
179    SGN gives the sign of the values described by the range.  */
180 
181 enum value_range_type
182 intersect_range_with_nonzero_bits (enum value_range_type vr_type,
183 				   wide_int *min, wide_int *max,
184 				   const wide_int &nonzero_bits,
185 				   signop sgn)
186 {
187   if (vr_type == VR_ANTI_RANGE)
188     {
189       /* The VR_ANTI_RANGE is equivalent to the union of the ranges
190 	 A: [-INF, *MIN) and B: (*MAX, +INF].  First use NONZERO_BITS
191 	 to create an inclusive upper bound for A and an inclusive lower
192 	 bound for B.  */
193       wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
194       wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
195 
196       /* If the calculation of A_MAX wrapped, A is effectively empty
197 	 and A_MAX is the highest value that satisfies NONZERO_BITS.
198 	 Likewise if the calculation of B_MIN wrapped, B is effectively
199 	 empty and B_MIN is the lowest value that satisfies NONZERO_BITS.  */
200       bool a_empty = wi::ge_p (a_max, *min, sgn);
201       bool b_empty = wi::le_p (b_min, *max, sgn);
202 
203       /* If both A and B are empty, there are no valid values.  */
204       if (a_empty && b_empty)
205 	return VR_UNDEFINED;
206 
207       /* If exactly one of A or B is empty, return a VR_RANGE for the
208 	 other one.  */
209       if (a_empty || b_empty)
210 	{
211 	  *min = b_min;
212 	  *max = a_max;
213 	  gcc_checking_assert (wi::le_p (*min, *max, sgn));
214 	  return VR_RANGE;
215 	}
216 
217       /* Update the VR_ANTI_RANGE bounds.  */
218       *min = a_max + 1;
219       *max = b_min - 1;
220       gcc_checking_assert (wi::le_p (*min, *max, sgn));
221 
222       /* Now check whether the excluded range includes any values that
223 	 satisfy NONZERO_BITS.  If not, switch to a full VR_RANGE.  */
224       if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
225 	{
226 	  unsigned int precision = min->get_precision ();
227 	  *min = wi::min_value (precision, sgn);
228 	  *max = wi::max_value (precision, sgn);
229 	  vr_type = VR_RANGE;
230 	}
231     }
232   if (vr_type == VR_RANGE)
233     {
234       *max = wi::round_down_for_mask (*max, nonzero_bits);
235 
236       /* Check that the range contains at least one valid value.  */
237       if (wi::gt_p (*min, *max, sgn))
238 	return VR_UNDEFINED;
239 
240       *min = wi::round_up_for_mask (*min, nonzero_bits);
241       gcc_checking_assert (wi::le_p (*min, *max, sgn));
242     }
243   return vr_type;
244 }
245 
246 /* Set value range VR to VR_UNDEFINED.  */
247 
248 static inline void
249 set_value_range_to_undefined (value_range *vr)
250 {
251   vr->type = VR_UNDEFINED;
252   vr->min = vr->max = NULL_TREE;
253   if (vr->equiv)
254     bitmap_clear (vr->equiv);
255 }
256 
257 /* Set value range VR to VR_VARYING.  */
258 
259 void
260 set_value_range_to_varying (value_range *vr)
261 {
262   vr->type = VR_VARYING;
263   vr->min = vr->max = NULL_TREE;
264   if (vr->equiv)
265     bitmap_clear (vr->equiv);
266 }
267 
268 /* Set value range VR to {T, MIN, MAX, EQUIV}.  */
269 
270 void
271 set_value_range (value_range *vr, enum value_range_type t, tree min,
272 		 tree max, bitmap equiv)
273 {
274   /* Check the validity of the range.  */
275   if (flag_checking
276       && (t == VR_RANGE || t == VR_ANTI_RANGE))
277     {
278       int cmp;
279 
280       gcc_assert (min && max);
281 
282       gcc_assert (!TREE_OVERFLOW_P (min) && !TREE_OVERFLOW_P (max));
283 
284       if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
285 	gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
286 
287       cmp = compare_values (min, max);
288       gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
289     }
290 
291   if (flag_checking
292       && (t == VR_UNDEFINED || t == VR_VARYING))
293     {
294       gcc_assert (min == NULL_TREE && max == NULL_TREE);
295       gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
296     }
297 
298   vr->type = t;
299   vr->min = min;
300   vr->max = max;
301 
302   /* Since updating the equivalence set involves deep copying the
303      bitmaps, only do it if absolutely necessary.
304 
305      All equivalence bitmaps are allocated from the same obstack.  So
306      we can use the obstack associated with EQUIV to allocate vr->equiv.  */
307   if (vr->equiv == NULL
308       && equiv != NULL)
309     vr->equiv = BITMAP_ALLOC (equiv->obstack);
310 
311   if (equiv != vr->equiv)
312     {
313       if (equiv && !bitmap_empty_p (equiv))
314 	bitmap_copy (vr->equiv, equiv);
315       else
316 	bitmap_clear (vr->equiv);
317     }
318 }
319 
320 
321 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
322    This means adjusting T, MIN and MAX representing the case of a
323    wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
324    as anti-rage ~[MAX+1, MIN-1].  Likewise for wrapping anti-ranges.
325    In corner cases where MAX+1 or MIN-1 wraps this will fall back
326    to varying.
327    This routine exists to ease canonicalization in the case where we
328    extract ranges from var + CST op limit.  */
329 
330 void
331 set_and_canonicalize_value_range (value_range *vr, enum value_range_type t,
332 				  tree min, tree max, bitmap equiv)
333 {
334   /* Use the canonical setters for VR_UNDEFINED and VR_VARYING.  */
335   if (t == VR_UNDEFINED)
336     {
337       set_value_range_to_undefined (vr);
338       return;
339     }
340   else if (t == VR_VARYING)
341     {
342       set_value_range_to_varying (vr);
343       return;
344     }
345 
346   /* Nothing to canonicalize for symbolic ranges.  */
347   if (TREE_CODE (min) != INTEGER_CST
348       || TREE_CODE (max) != INTEGER_CST)
349     {
350       set_value_range (vr, t, min, max, equiv);
351       return;
352     }
353 
354   /* Wrong order for min and max, to swap them and the VR type we need
355      to adjust them.  */
356   if (tree_int_cst_lt (max, min))
357     {
358       tree one, tmp;
359 
360       /* For one bit precision if max < min, then the swapped
361 	 range covers all values, so for VR_RANGE it is varying and
362 	 for VR_ANTI_RANGE empty range, so drop to varying as well.  */
363       if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
364 	{
365 	  set_value_range_to_varying (vr);
366 	  return;
367 	}
368 
369       one = build_int_cst (TREE_TYPE (min), 1);
370       tmp = int_const_binop (PLUS_EXPR, max, one);
371       max = int_const_binop (MINUS_EXPR, min, one);
372       min = tmp;
373 
374       /* There's one corner case, if we had [C+1, C] before we now have
375 	 that again.  But this represents an empty value range, so drop
376 	 to varying in this case.  */
377       if (tree_int_cst_lt (max, min))
378 	{
379 	  set_value_range_to_varying (vr);
380 	  return;
381 	}
382 
383       t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
384     }
385 
386   /* Anti-ranges that can be represented as ranges should be so.  */
387   if (t == VR_ANTI_RANGE)
388     {
389       /* For -fstrict-enums we may receive out-of-range ranges so consider
390          values < -INF and values > INF as -INF/INF as well.  */
391       tree type = TREE_TYPE (min);
392       bool is_min = (INTEGRAL_TYPE_P (type)
393 		     && tree_int_cst_compare (min, TYPE_MIN_VALUE (type)) <= 0);
394       bool is_max = (INTEGRAL_TYPE_P (type)
395 		     && tree_int_cst_compare (max, TYPE_MAX_VALUE (type)) >= 0);
396 
397       if (is_min && is_max)
398 	{
399 	  /* We cannot deal with empty ranges, drop to varying.
400 	     ???  This could be VR_UNDEFINED instead.  */
401 	  set_value_range_to_varying (vr);
402 	  return;
403 	}
404       else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
405 	       && (is_min || is_max))
406 	{
407 	  /* Non-empty boolean ranges can always be represented
408 	     as a singleton range.  */
409 	  if (is_min)
410 	    min = max = vrp_val_max (TREE_TYPE (min));
411 	  else
412 	    min = max = vrp_val_min (TREE_TYPE (min));
413 	  t = VR_RANGE;
414 	}
415       else if (is_min
416 	       /* As a special exception preserve non-null ranges.  */
417 	       && !(TYPE_UNSIGNED (TREE_TYPE (min))
418 		    && integer_zerop (max)))
419         {
420 	  tree one = build_int_cst (TREE_TYPE (max), 1);
421 	  min = int_const_binop (PLUS_EXPR, max, one);
422 	  max = vrp_val_max (TREE_TYPE (max));
423 	  t = VR_RANGE;
424         }
425       else if (is_max)
426         {
427 	  tree one = build_int_cst (TREE_TYPE (min), 1);
428 	  max = int_const_binop (MINUS_EXPR, min, one);
429 	  min = vrp_val_min (TREE_TYPE (min));
430 	  t = VR_RANGE;
431         }
432     }
433 
434   /* Do not drop [-INF(OVF), +INF(OVF)] to varying.  (OVF) has to be sticky
435      to make sure VRP iteration terminates, otherwise we can get into
436      oscillations.  */
437 
438   set_value_range (vr, t, min, max, equiv);
439 }
440 
441 /* Copy value range FROM into value range TO.  */
442 
443 void
444 copy_value_range (value_range *to, value_range *from)
445 {
446   set_value_range (to, from->type, from->min, from->max, from->equiv);
447 }
448 
449 /* Set value range VR to a single value.  This function is only called
450    with values we get from statements, and exists to clear the
451    TREE_OVERFLOW flag.  */
452 
453 void
454 set_value_range_to_value (value_range *vr, tree val, bitmap equiv)
455 {
456   gcc_assert (is_gimple_min_invariant (val));
457   if (TREE_OVERFLOW_P (val))
458     val = drop_tree_overflow (val);
459   set_value_range (vr, VR_RANGE, val, val, equiv);
460 }
461 
462 /* Set value range VR to a non-NULL range of type TYPE.  */
463 
464 void
465 set_value_range_to_nonnull (value_range *vr, tree type)
466 {
467   tree zero = build_int_cst (type, 0);
468   set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
469 }
470 
471 
472 /* Set value range VR to a NULL range of type TYPE.  */
473 
474 void
475 set_value_range_to_null (value_range *vr, tree type)
476 {
477   set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
478 }
479 
480 
481 /* If abs (min) < abs (max), set VR to [-max, max], if
482    abs (min) >= abs (max), set VR to [-min, min].  */
483 
484 static void
485 abs_extent_range (value_range *vr, tree min, tree max)
486 {
487   int cmp;
488 
489   gcc_assert (TREE_CODE (min) == INTEGER_CST);
490   gcc_assert (TREE_CODE (max) == INTEGER_CST);
491   gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
492   gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
493   min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
494   max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
495   if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
496     {
497       set_value_range_to_varying (vr);
498       return;
499     }
500   cmp = compare_values (min, max);
501   if (cmp == -1)
502     min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
503   else if (cmp == 0 || cmp == 1)
504     {
505       max = min;
506       min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
507     }
508   else
509     {
510       set_value_range_to_varying (vr);
511       return;
512     }
513   set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
514 }
515 
516 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes.  */
517 
518 bool
519 vrp_operand_equal_p (const_tree val1, const_tree val2)
520 {
521   if (val1 == val2)
522     return true;
523   if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
524     return false;
525   return true;
526 }
527 
528 /* Return true, if the bitmaps B1 and B2 are equal.  */
529 
530 bool
531 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
532 {
533   return (b1 == b2
534 	  || ((!b1 || bitmap_empty_p (b1))
535 	      && (!b2 || bitmap_empty_p (b2)))
536 	  || (b1 && b2
537 	      && bitmap_equal_p (b1, b2)));
538 }
539 
540 /* Return true if VR is ~[0, 0].  */
541 
542 bool
543 range_is_nonnull (value_range *vr)
544 {
545   return vr->type == VR_ANTI_RANGE
546 	 && integer_zerop (vr->min)
547 	 && integer_zerop (vr->max);
548 }
549 
550 
551 /* Return true if VR is [0, 0].  */
552 
553 static inline bool
554 range_is_null (value_range *vr)
555 {
556   return vr->type == VR_RANGE
557 	 && integer_zerop (vr->min)
558 	 && integer_zerop (vr->max);
559 }
560 
561 /* Return true if max and min of VR are INTEGER_CST.  It's not necessary
562    a singleton.  */
563 
564 bool
565 range_int_cst_p (value_range *vr)
566 {
567   return (vr->type == VR_RANGE
568 	  && TREE_CODE (vr->max) == INTEGER_CST
569 	  && TREE_CODE (vr->min) == INTEGER_CST);
570 }
571 
572 /* Return true if VR is a INTEGER_CST singleton.  */
573 
574 bool
575 range_int_cst_singleton_p (value_range *vr)
576 {
577   return (range_int_cst_p (vr)
578 	  && tree_int_cst_equal (vr->min, vr->max));
579 }
580 
581 /* Return true if value range VR involves at least one symbol.  */
582 
583 bool
584 symbolic_range_p (value_range *vr)
585 {
586   return (!is_gimple_min_invariant (vr->min)
587           || !is_gimple_min_invariant (vr->max));
588 }
589 
590 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
591    otherwise.  We only handle additive operations and set NEG to true if the
592    symbol is negated and INV to the invariant part, if any.  */
593 
594 tree
595 get_single_symbol (tree t, bool *neg, tree *inv)
596 {
597   bool neg_;
598   tree inv_;
599 
600   *inv = NULL_TREE;
601   *neg = false;
602 
603   if (TREE_CODE (t) == PLUS_EXPR
604       || TREE_CODE (t) == POINTER_PLUS_EXPR
605       || TREE_CODE (t) == MINUS_EXPR)
606     {
607       if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
608 	{
609 	  neg_ = (TREE_CODE (t) == MINUS_EXPR);
610 	  inv_ = TREE_OPERAND (t, 0);
611 	  t = TREE_OPERAND (t, 1);
612 	}
613       else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
614 	{
615 	  neg_ = false;
616 	  inv_ = TREE_OPERAND (t, 1);
617 	  t = TREE_OPERAND (t, 0);
618 	}
619       else
620         return NULL_TREE;
621     }
622   else
623     {
624       neg_ = false;
625       inv_ = NULL_TREE;
626     }
627 
628   if (TREE_CODE (t) == NEGATE_EXPR)
629     {
630       t = TREE_OPERAND (t, 0);
631       neg_ = !neg_;
632     }
633 
634   if (TREE_CODE (t) != SSA_NAME)
635     return NULL_TREE;
636 
637   if (inv_ && TREE_OVERFLOW_P (inv_))
638     inv_ = drop_tree_overflow (inv_);
639 
640   *neg = neg_;
641   *inv = inv_;
642   return t;
643 }
644 
645 /* The reverse operation: build a symbolic expression with TYPE
646    from symbol SYM, negated according to NEG, and invariant INV.  */
647 
648 static tree
649 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
650 {
651   const bool pointer_p = POINTER_TYPE_P (type);
652   tree t = sym;
653 
654   if (neg)
655     t = build1 (NEGATE_EXPR, type, t);
656 
657   if (integer_zerop (inv))
658     return t;
659 
660   return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
661 }
662 
663 /* Return
664    1 if VAL < VAL2
665    0 if !(VAL < VAL2)
666    -2 if those are incomparable.  */
667 int
668 operand_less_p (tree val, tree val2)
669 {
670   /* LT is folded faster than GE and others.  Inline the common case.  */
671   if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
672     return tree_int_cst_lt (val, val2);
673   else
674     {
675       tree tcmp;
676 
677       fold_defer_overflow_warnings ();
678 
679       tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
680 
681       fold_undefer_and_ignore_overflow_warnings ();
682 
683       if (!tcmp
684 	  || TREE_CODE (tcmp) != INTEGER_CST)
685 	return -2;
686 
687       if (!integer_zerop (tcmp))
688 	return 1;
689     }
690 
691   return 0;
692 }
693 
694 /* Compare two values VAL1 and VAL2.  Return
695 
696    	-2 if VAL1 and VAL2 cannot be compared at compile-time,
697    	-1 if VAL1 < VAL2,
698    	 0 if VAL1 == VAL2,
699 	+1 if VAL1 > VAL2, and
700 	+2 if VAL1 != VAL2
701 
702    This is similar to tree_int_cst_compare but supports pointer values
703    and values that cannot be compared at compile time.
704 
705    If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
706    true if the return value is only valid if we assume that signed
707    overflow is undefined.  */
708 
709 int
710 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
711 {
712   if (val1 == val2)
713     return 0;
714 
715   /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
716      both integers.  */
717   gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
718 	      == POINTER_TYPE_P (TREE_TYPE (val2)));
719 
720   /* Convert the two values into the same type.  This is needed because
721      sizetype causes sign extension even for unsigned types.  */
722   val2 = fold_convert (TREE_TYPE (val1), val2);
723   STRIP_USELESS_TYPE_CONVERSION (val2);
724 
725   const bool overflow_undefined
726     = INTEGRAL_TYPE_P (TREE_TYPE (val1))
727       && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
728   tree inv1, inv2;
729   bool neg1, neg2;
730   tree sym1 = get_single_symbol (val1, &neg1, &inv1);
731   tree sym2 = get_single_symbol (val2, &neg2, &inv2);
732 
733   /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
734      accordingly.  If VAL1 and VAL2 don't use the same name, return -2.  */
735   if (sym1 && sym2)
736     {
737       /* Both values must use the same name with the same sign.  */
738       if (sym1 != sym2 || neg1 != neg2)
739 	return -2;
740 
741       /* [-]NAME + CST == [-]NAME + CST.  */
742       if (inv1 == inv2)
743 	return 0;
744 
745       /* If overflow is defined we cannot simplify more.  */
746       if (!overflow_undefined)
747 	return -2;
748 
749       if (strict_overflow_p != NULL
750 	  /* Symbolic range building sets TREE_NO_WARNING to declare
751 	     that overflow doesn't happen.  */
752 	  && (!inv1 || !TREE_NO_WARNING (val1))
753 	  && (!inv2 || !TREE_NO_WARNING (val2)))
754 	*strict_overflow_p = true;
755 
756       if (!inv1)
757 	inv1 = build_int_cst (TREE_TYPE (val1), 0);
758       if (!inv2)
759 	inv2 = build_int_cst (TREE_TYPE (val2), 0);
760 
761       return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
762 		      TYPE_SIGN (TREE_TYPE (val1)));
763     }
764 
765   const bool cst1 = is_gimple_min_invariant (val1);
766   const bool cst2 = is_gimple_min_invariant (val2);
767 
768   /* If one is of the form '[-]NAME + CST' and the other is constant, then
769      it might be possible to say something depending on the constants.  */
770   if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
771     {
772       if (!overflow_undefined)
773 	return -2;
774 
775       if (strict_overflow_p != NULL
776 	  /* Symbolic range building sets TREE_NO_WARNING to declare
777 	     that overflow doesn't happen.  */
778 	  && (!sym1 || !TREE_NO_WARNING (val1))
779 	  && (!sym2 || !TREE_NO_WARNING (val2)))
780 	*strict_overflow_p = true;
781 
782       const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
783       tree cst = cst1 ? val1 : val2;
784       tree inv = cst1 ? inv2 : inv1;
785 
786       /* Compute the difference between the constants.  If it overflows or
787 	 underflows, this means that we can trivially compare the NAME with
788 	 it and, consequently, the two values with each other.  */
789       wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
790       if (wi::cmp (0, wi::to_wide (inv), sgn)
791 	  != wi::cmp (diff, wi::to_wide (cst), sgn))
792 	{
793 	  const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
794 	  return cst1 ? res : -res;
795 	}
796 
797       return -2;
798     }
799 
800   /* We cannot say anything more for non-constants.  */
801   if (!cst1 || !cst2)
802     return -2;
803 
804   if (!POINTER_TYPE_P (TREE_TYPE (val1)))
805     {
806       /* We cannot compare overflowed values.  */
807       if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
808 	return -2;
809 
810       if (TREE_CODE (val1) == INTEGER_CST
811 	  && TREE_CODE (val2) == INTEGER_CST)
812 	return tree_int_cst_compare (val1, val2);
813 
814       if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
815 	{
816 	  if (known_eq (wi::to_poly_widest (val1),
817 			wi::to_poly_widest (val2)))
818 	    return 0;
819 	  if (known_lt (wi::to_poly_widest (val1),
820 			wi::to_poly_widest (val2)))
821 	    return -1;
822 	  if (known_gt (wi::to_poly_widest (val1),
823 			wi::to_poly_widest (val2)))
824 	    return 1;
825 	}
826 
827       return -2;
828     }
829   else
830     {
831       tree t;
832 
833       /* First see if VAL1 and VAL2 are not the same.  */
834       if (val1 == val2 || operand_equal_p (val1, val2, 0))
835 	return 0;
836 
837       /* If VAL1 is a lower address than VAL2, return -1.  */
838       if (operand_less_p (val1, val2) == 1)
839 	return -1;
840 
841       /* If VAL1 is a higher address than VAL2, return +1.  */
842       if (operand_less_p (val2, val1) == 1)
843 	return 1;
844 
845       /* If VAL1 is different than VAL2, return +2.
846 	 For integer constants we either have already returned -1 or 1
847 	 or they are equivalent.  We still might succeed in proving
848 	 something about non-trivial operands.  */
849       if (TREE_CODE (val1) != INTEGER_CST
850 	  || TREE_CODE (val2) != INTEGER_CST)
851 	{
852           t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
853 	  if (t && integer_onep (t))
854 	    return 2;
855 	}
856 
857       return -2;
858     }
859 }
860 
861 /* Compare values like compare_values_warnv.  */
862 
863 int
864 compare_values (tree val1, tree val2)
865 {
866   bool sop;
867   return compare_values_warnv (val1, val2, &sop);
868 }
869 
870 
871 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
872           0 if VAL is not inside [MIN, MAX],
873 	 -2 if we cannot tell either way.
874 
875    Benchmark compile/20001226-1.c compilation time after changing this
876    function.  */
877 
878 int
879 value_inside_range (tree val, tree min, tree max)
880 {
881   int cmp1, cmp2;
882 
883   cmp1 = operand_less_p (val, min);
884   if (cmp1 == -2)
885     return -2;
886   if (cmp1 == 1)
887     return 0;
888 
889   cmp2 = operand_less_p (max, val);
890   if (cmp2 == -2)
891     return -2;
892 
893   return !cmp2;
894 }
895 
896 
897 /* Return true if value ranges VR0 and VR1 have a non-empty
898    intersection.
899 
900    Benchmark compile/20001226-1.c compilation time after changing this
901    function.
902    */
903 
904 static inline bool
905 value_ranges_intersect_p (value_range *vr0, value_range *vr1)
906 {
907   /* The value ranges do not intersect if the maximum of the first range is
908      less than the minimum of the second range or vice versa.
909      When those relations are unknown, we can't do any better.  */
910   if (operand_less_p (vr0->max, vr1->min) != 0)
911     return false;
912   if (operand_less_p (vr1->max, vr0->min) != 0)
913     return false;
914   return true;
915 }
916 
917 
918 /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not
919    include the value zero, -2 if we cannot tell.  */
920 
921 int
922 range_includes_zero_p (tree min, tree max)
923 {
924   tree zero = build_int_cst (TREE_TYPE (min), 0);
925   return value_inside_range (zero, min, max);
926 }
927 
928 /* Return true if *VR is know to only contain nonnegative values.  */
929 
930 static inline bool
931 value_range_nonnegative_p (value_range *vr)
932 {
933   /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
934      which would return a useful value should be encoded as a
935      VR_RANGE.  */
936   if (vr->type == VR_RANGE)
937     {
938       int result = compare_values (vr->min, integer_zero_node);
939       return (result == 0 || result == 1);
940     }
941 
942   return false;
943 }
944 
945 /* If *VR has a value rante that is a single constant value return that,
946    otherwise return NULL_TREE.  */
947 
948 tree
949 value_range_constant_singleton (value_range *vr)
950 {
951   if (vr->type == VR_RANGE
952       && vrp_operand_equal_p (vr->min, vr->max)
953       && is_gimple_min_invariant (vr->min))
954     return vr->min;
955 
956   return NULL_TREE;
957 }
958 
959 /* Wrapper around int_const_binop.  Return true if we can compute the
960    result; i.e. if the operation doesn't overflow or if the overflow is
961    undefined.  In the latter case (if the operation overflows and
962    overflow is undefined), then adjust the result to be -INF or +INF
963    depending on CODE, VAL1 and VAL2.  Return the value in *RES.
964 
965    Return false for division by zero, for which the result is
966    indeterminate.  */
967 
968 static bool
969 vrp_int_const_binop (enum tree_code code, tree val1, tree val2, wide_int *res)
970 {
971   bool overflow = false;
972   signop sign = TYPE_SIGN (TREE_TYPE (val1));
973 
974   switch (code)
975     {
976     case RSHIFT_EXPR:
977     case LSHIFT_EXPR:
978       {
979 	wide_int wval2 = wi::to_wide (val2, TYPE_PRECISION (TREE_TYPE (val1)));
980 	if (wi::neg_p (wval2))
981 	  {
982 	    wval2 = -wval2;
983 	    if (code == RSHIFT_EXPR)
984 	      code = LSHIFT_EXPR;
985 	    else
986 	      code = RSHIFT_EXPR;
987 	  }
988 
989 	if (code == RSHIFT_EXPR)
990 	  /* It's unclear from the C standard whether shifts can overflow.
991 	     The following code ignores overflow; perhaps a C standard
992 	     interpretation ruling is needed.  */
993 	  *res = wi::rshift (wi::to_wide (val1), wval2, sign);
994 	else
995 	  *res = wi::lshift (wi::to_wide (val1), wval2);
996 	break;
997       }
998 
999     case MULT_EXPR:
1000       *res = wi::mul (wi::to_wide (val1),
1001 		      wi::to_wide (val2), sign, &overflow);
1002       break;
1003 
1004     case TRUNC_DIV_EXPR:
1005     case EXACT_DIV_EXPR:
1006       if (val2 == 0)
1007 	return false;
1008       else
1009 	*res = wi::div_trunc (wi::to_wide (val1),
1010 			      wi::to_wide (val2), sign, &overflow);
1011       break;
1012 
1013     case FLOOR_DIV_EXPR:
1014       if (val2 == 0)
1015 	return false;
1016       *res = wi::div_floor (wi::to_wide (val1),
1017 			    wi::to_wide (val2), sign, &overflow);
1018       break;
1019 
1020     case CEIL_DIV_EXPR:
1021       if (val2 == 0)
1022 	return false;
1023       *res = wi::div_ceil (wi::to_wide (val1),
1024 			   wi::to_wide (val2), sign, &overflow);
1025       break;
1026 
1027     case ROUND_DIV_EXPR:
1028       if (val2 == 0)
1029 	return false;
1030       *res = wi::div_round (wi::to_wide (val1),
1031 			    wi::to_wide (val2), sign, &overflow);
1032       break;
1033 
1034     default:
1035       gcc_unreachable ();
1036     }
1037 
1038   if (overflow
1039       && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1040     {
1041       /* If the operation overflowed return -INF or +INF depending
1042 	 on the operation and the combination of signs of the operands.  */
1043       int sgn1 = tree_int_cst_sgn (val1);
1044       int sgn2 = tree_int_cst_sgn (val2);
1045 
1046       /* Notice that we only need to handle the restricted set of
1047 	 operations handled by extract_range_from_binary_expr.
1048 	 Among them, only multiplication, addition and subtraction
1049 	 can yield overflow without overflown operands because we
1050 	 are working with integral types only... except in the
1051 	 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
1052 	 for division too.  */
1053 
1054       /* For multiplication, the sign of the overflow is given
1055 	 by the comparison of the signs of the operands.  */
1056       if ((code == MULT_EXPR && sgn1 == sgn2)
1057           /* For addition, the operands must be of the same sign
1058 	     to yield an overflow.  Its sign is therefore that
1059 	     of one of the operands, for example the first.  */
1060 	  || (code == PLUS_EXPR && sgn1 >= 0)
1061 	  /* For subtraction, operands must be of
1062 	     different signs to yield an overflow.  Its sign is
1063 	     therefore that of the first operand or the opposite of
1064 	     that of the second operand.  A first operand of 0 counts
1065 	     as positive here, for the corner case 0 - (-INF), which
1066 	     overflows, but must yield +INF.  */
1067 	  || (code == MINUS_EXPR && sgn1 >= 0)
1068 	  /* For division, the only case is -INF / -1 = +INF.  */
1069 	  || code == TRUNC_DIV_EXPR
1070 	  || code == FLOOR_DIV_EXPR
1071 	  || code == CEIL_DIV_EXPR
1072 	  || code == EXACT_DIV_EXPR
1073 	  || code == ROUND_DIV_EXPR)
1074 	*res = wi::max_value (TYPE_PRECISION (TREE_TYPE (val1)),
1075 			      TYPE_SIGN (TREE_TYPE (val1)));
1076       else
1077 	*res = wi::min_value (TYPE_PRECISION (TREE_TYPE (val1)),
1078 			      TYPE_SIGN (TREE_TYPE (val1)));
1079       return true;
1080     }
1081 
1082   return !overflow;
1083 }
1084 
1085 
1086 /* For range VR compute two wide_int bitmasks.  In *MAY_BE_NONZERO
1087    bitmask if some bit is unset, it means for all numbers in the range
1088    the bit is 0, otherwise it might be 0 or 1.  In *MUST_BE_NONZERO
1089    bitmask if some bit is set, it means for all numbers in the range
1090    the bit is 1, otherwise it might be 0 or 1.  */
1091 
1092 bool
1093 zero_nonzero_bits_from_vr (const tree expr_type,
1094 			   value_range *vr,
1095 			   wide_int *may_be_nonzero,
1096 			   wide_int *must_be_nonzero)
1097 {
1098   *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
1099   *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
1100   if (!range_int_cst_p (vr))
1101     return false;
1102 
1103   if (range_int_cst_singleton_p (vr))
1104     {
1105       *may_be_nonzero = wi::to_wide (vr->min);
1106       *must_be_nonzero = *may_be_nonzero;
1107     }
1108   else if (tree_int_cst_sgn (vr->min) >= 0
1109 	   || tree_int_cst_sgn (vr->max) < 0)
1110     {
1111       wide_int xor_mask = wi::to_wide (vr->min) ^ wi::to_wide (vr->max);
1112       *may_be_nonzero = wi::to_wide (vr->min) | wi::to_wide (vr->max);
1113       *must_be_nonzero = wi::to_wide (vr->min) & wi::to_wide (vr->max);
1114       if (xor_mask != 0)
1115 	{
1116 	  wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
1117 				    may_be_nonzero->get_precision ());
1118 	  *may_be_nonzero = *may_be_nonzero | mask;
1119 	  *must_be_nonzero = wi::bit_and_not (*must_be_nonzero, mask);
1120 	}
1121     }
1122 
1123   return true;
1124 }
1125 
1126 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
1127    so that *VR0 U *VR1 == *AR.  Returns true if that is possible,
1128    false otherwise.  If *AR can be represented with a single range
1129    *VR1 will be VR_UNDEFINED.  */
1130 
1131 static bool
1132 ranges_from_anti_range (value_range *ar,
1133 			value_range *vr0, value_range *vr1)
1134 {
1135   tree type = TREE_TYPE (ar->min);
1136 
1137   vr0->type = VR_UNDEFINED;
1138   vr1->type = VR_UNDEFINED;
1139 
1140   if (ar->type != VR_ANTI_RANGE
1141       || TREE_CODE (ar->min) != INTEGER_CST
1142       || TREE_CODE (ar->max) != INTEGER_CST
1143       || !vrp_val_min (type)
1144       || !vrp_val_max (type))
1145     return false;
1146 
1147   if (!vrp_val_is_min (ar->min))
1148     {
1149       vr0->type = VR_RANGE;
1150       vr0->min = vrp_val_min (type);
1151       vr0->max = wide_int_to_tree (type, wi::to_wide (ar->min) - 1);
1152     }
1153   if (!vrp_val_is_max (ar->max))
1154     {
1155       vr1->type = VR_RANGE;
1156       vr1->min = wide_int_to_tree (type, wi::to_wide (ar->max) + 1);
1157       vr1->max = vrp_val_max (type);
1158     }
1159   if (vr0->type == VR_UNDEFINED)
1160     {
1161       *vr0 = *vr1;
1162       vr1->type = VR_UNDEFINED;
1163     }
1164 
1165   return vr0->type != VR_UNDEFINED;
1166 }
1167 
1168 /* Helper to extract a value-range *VR for a multiplicative operation
1169    *VR0 CODE *VR1.  */
1170 
1171 static void
1172 extract_range_from_multiplicative_op_1 (value_range *vr,
1173 					enum tree_code code,
1174 					value_range *vr0, value_range *vr1)
1175 {
1176   enum value_range_type rtype;
1177   wide_int val, min, max;
1178   tree type;
1179 
1180   /* Multiplications, divisions and shifts are a bit tricky to handle,
1181      depending on the mix of signs we have in the two ranges, we
1182      need to operate on different values to get the minimum and
1183      maximum values for the new range.  One approach is to figure
1184      out all the variations of range combinations and do the
1185      operations.
1186 
1187      However, this involves several calls to compare_values and it
1188      is pretty convoluted.  It's simpler to do the 4 operations
1189      (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
1190      MAX1) and then figure the smallest and largest values to form
1191      the new range.  */
1192   gcc_assert (code == MULT_EXPR
1193 	      || code == TRUNC_DIV_EXPR
1194 	      || code == FLOOR_DIV_EXPR
1195 	      || code == CEIL_DIV_EXPR
1196 	      || code == EXACT_DIV_EXPR
1197 	      || code == ROUND_DIV_EXPR
1198 	      || code == RSHIFT_EXPR
1199 	      || code == LSHIFT_EXPR);
1200   gcc_assert (vr0->type == VR_RANGE
1201 	      && vr0->type == vr1->type);
1202 
1203   rtype = vr0->type;
1204   type = TREE_TYPE (vr0->min);
1205   signop sgn = TYPE_SIGN (type);
1206 
1207   /* Compute the 4 cross operations and their minimum and maximum value.  */
1208   if (!vrp_int_const_binop (code, vr0->min, vr1->min, &val))
1209     {
1210       set_value_range_to_varying (vr);
1211       return;
1212     }
1213   min = max = val;
1214 
1215   if (vr1->max != vr1->min)
1216     {
1217       if (!vrp_int_const_binop (code, vr0->min, vr1->max, &val))
1218 	{
1219 	  set_value_range_to_varying (vr);
1220 	  return;
1221 	}
1222       if (wi::lt_p (val, min, sgn))
1223 	min = val;
1224       else if (wi::gt_p (val, max, sgn))
1225 	max = val;
1226     }
1227 
1228   if (vr0->max != vr0->min)
1229     {
1230       if (!vrp_int_const_binop (code, vr0->max, vr1->min, &val))
1231 	{
1232 	  set_value_range_to_varying (vr);
1233 	  return;
1234 	}
1235       if (wi::lt_p (val, min, sgn))
1236 	min = val;
1237       else if (wi::gt_p (val, max, sgn))
1238 	max = val;
1239     }
1240 
1241   if (vr0->min != vr0->max && vr1->min != vr1->max)
1242     {
1243       if (!vrp_int_const_binop (code, vr0->max, vr1->max, &val))
1244 	{
1245 	  set_value_range_to_varying (vr);
1246 	  return;
1247 	}
1248       if (wi::lt_p (val, min, sgn))
1249 	min = val;
1250       else if (wi::gt_p (val, max, sgn))
1251 	max = val;
1252     }
1253 
1254   /* If the new range has its limits swapped around (MIN > MAX),
1255      then the operation caused one of them to wrap around, mark
1256      the new range VARYING.  */
1257   if (wi::gt_p (min, max, sgn))
1258     {
1259       set_value_range_to_varying (vr);
1260       return;
1261     }
1262 
1263   /* We punt for [-INF, +INF].
1264      We learn nothing when we have INF on both sides.
1265      Note that we do accept [-INF, -INF] and [+INF, +INF].  */
1266   if (wi::eq_p (min, wi::min_value (TYPE_PRECISION (type), sgn))
1267       && wi::eq_p (max, wi::max_value (TYPE_PRECISION (type), sgn)))
1268     {
1269       set_value_range_to_varying (vr);
1270       return;
1271     }
1272 
1273   set_value_range (vr, rtype,
1274 		   wide_int_to_tree (type, min),
1275 		   wide_int_to_tree (type, max), NULL);
1276 }
1277 
1278 /* Extract range information from a binary operation CODE based on
1279    the ranges of each of its operands *VR0 and *VR1 with resulting
1280    type EXPR_TYPE.  The resulting range is stored in *VR.  */
1281 
1282 void
1283 extract_range_from_binary_expr_1 (value_range *vr,
1284 				  enum tree_code code, tree expr_type,
1285 				  value_range *vr0_, value_range *vr1_)
1286 {
1287   value_range vr0 = *vr0_, vr1 = *vr1_;
1288   value_range vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
1289   enum value_range_type type;
1290   tree min = NULL_TREE, max = NULL_TREE;
1291   int cmp;
1292 
1293   if (!INTEGRAL_TYPE_P (expr_type)
1294       && !POINTER_TYPE_P (expr_type))
1295     {
1296       set_value_range_to_varying (vr);
1297       return;
1298     }
1299 
1300   /* Not all binary expressions can be applied to ranges in a
1301      meaningful way.  Handle only arithmetic operations.  */
1302   if (code != PLUS_EXPR
1303       && code != MINUS_EXPR
1304       && code != POINTER_PLUS_EXPR
1305       && code != MULT_EXPR
1306       && code != TRUNC_DIV_EXPR
1307       && code != FLOOR_DIV_EXPR
1308       && code != CEIL_DIV_EXPR
1309       && code != EXACT_DIV_EXPR
1310       && code != ROUND_DIV_EXPR
1311       && code != TRUNC_MOD_EXPR
1312       && code != RSHIFT_EXPR
1313       && code != LSHIFT_EXPR
1314       && code != MIN_EXPR
1315       && code != MAX_EXPR
1316       && code != BIT_AND_EXPR
1317       && code != BIT_IOR_EXPR
1318       && code != BIT_XOR_EXPR)
1319     {
1320       set_value_range_to_varying (vr);
1321       return;
1322     }
1323 
1324   /* If both ranges are UNDEFINED, so is the result.  */
1325   if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
1326     {
1327       set_value_range_to_undefined (vr);
1328       return;
1329     }
1330   /* If one of the ranges is UNDEFINED drop it to VARYING for the following
1331      code.  At some point we may want to special-case operations that
1332      have UNDEFINED result for all or some value-ranges of the not UNDEFINED
1333      operand.  */
1334   else if (vr0.type == VR_UNDEFINED)
1335     set_value_range_to_varying (&vr0);
1336   else if (vr1.type == VR_UNDEFINED)
1337     set_value_range_to_varying (&vr1);
1338 
1339   /* We get imprecise results from ranges_from_anti_range when
1340      code is EXACT_DIV_EXPR.  We could mask out bits in the resulting
1341      range, but then we also need to hack up vrp_meet.  It's just
1342      easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR.  */
1343   if (code == EXACT_DIV_EXPR
1344       && vr0.type == VR_ANTI_RANGE
1345       && vr0.min == vr0.max
1346       && integer_zerop (vr0.min))
1347     {
1348       set_value_range_to_nonnull (vr, expr_type);
1349       return;
1350     }
1351 
1352   /* Now canonicalize anti-ranges to ranges when they are not symbolic
1353      and express ~[] op X as ([]' op X) U ([]'' op X).  */
1354   if (vr0.type == VR_ANTI_RANGE
1355       && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
1356     {
1357       extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
1358       if (vrtem1.type != VR_UNDEFINED)
1359 	{
1360 	  value_range vrres = VR_INITIALIZER;
1361 	  extract_range_from_binary_expr_1 (&vrres, code, expr_type,
1362 					    &vrtem1, vr1_);
1363 	  vrp_meet (vr, &vrres);
1364 	}
1365       return;
1366     }
1367   /* Likewise for X op ~[].  */
1368   if (vr1.type == VR_ANTI_RANGE
1369       && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
1370     {
1371       extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
1372       if (vrtem1.type != VR_UNDEFINED)
1373 	{
1374 	  value_range vrres = VR_INITIALIZER;
1375 	  extract_range_from_binary_expr_1 (&vrres, code, expr_type,
1376 					    vr0_, &vrtem1);
1377 	  vrp_meet (vr, &vrres);
1378 	}
1379       return;
1380     }
1381 
1382   /* The type of the resulting value range defaults to VR0.TYPE.  */
1383   type = vr0.type;
1384 
1385   /* Refuse to operate on VARYING ranges, ranges of different kinds
1386      and symbolic ranges.  As an exception, we allow BIT_{AND,IOR}
1387      because we may be able to derive a useful range even if one of
1388      the operands is VR_VARYING or symbolic range.  Similarly for
1389      divisions, MIN/MAX and PLUS/MINUS.
1390 
1391      TODO, we may be able to derive anti-ranges in some cases.  */
1392   if (code != BIT_AND_EXPR
1393       && code != BIT_IOR_EXPR
1394       && code != TRUNC_DIV_EXPR
1395       && code != FLOOR_DIV_EXPR
1396       && code != CEIL_DIV_EXPR
1397       && code != EXACT_DIV_EXPR
1398       && code != ROUND_DIV_EXPR
1399       && code != TRUNC_MOD_EXPR
1400       && code != MIN_EXPR
1401       && code != MAX_EXPR
1402       && code != PLUS_EXPR
1403       && code != MINUS_EXPR
1404       && code != RSHIFT_EXPR
1405       && (vr0.type == VR_VARYING
1406 	  || vr1.type == VR_VARYING
1407 	  || vr0.type != vr1.type
1408 	  || symbolic_range_p (&vr0)
1409 	  || symbolic_range_p (&vr1)))
1410     {
1411       set_value_range_to_varying (vr);
1412       return;
1413     }
1414 
1415   /* Now evaluate the expression to determine the new range.  */
1416   if (POINTER_TYPE_P (expr_type))
1417     {
1418       if (code == MIN_EXPR || code == MAX_EXPR)
1419 	{
1420 	  /* For MIN/MAX expressions with pointers, we only care about
1421 	     nullness, if both are non null, then the result is nonnull.
1422 	     If both are null, then the result is null. Otherwise they
1423 	     are varying.  */
1424 	  if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
1425 	    set_value_range_to_nonnull (vr, expr_type);
1426 	  else if (range_is_null (&vr0) && range_is_null (&vr1))
1427 	    set_value_range_to_null (vr, expr_type);
1428 	  else
1429 	    set_value_range_to_varying (vr);
1430 	}
1431       else if (code == POINTER_PLUS_EXPR)
1432 	{
1433 	  /* For pointer types, we are really only interested in asserting
1434 	     whether the expression evaluates to non-NULL.  */
1435 	  if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
1436 	    set_value_range_to_nonnull (vr, expr_type);
1437 	  else if (range_is_null (&vr0) && range_is_null (&vr1))
1438 	    set_value_range_to_null (vr, expr_type);
1439 	  else
1440 	    set_value_range_to_varying (vr);
1441 	}
1442       else if (code == BIT_AND_EXPR)
1443 	{
1444 	  /* For pointer types, we are really only interested in asserting
1445 	     whether the expression evaluates to non-NULL.  */
1446 	  if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
1447 	    set_value_range_to_nonnull (vr, expr_type);
1448 	  else if (range_is_null (&vr0) || range_is_null (&vr1))
1449 	    set_value_range_to_null (vr, expr_type);
1450 	  else
1451 	    set_value_range_to_varying (vr);
1452 	}
1453       else
1454 	set_value_range_to_varying (vr);
1455 
1456       return;
1457     }
1458 
1459   /* For integer ranges, apply the operation to each end of the
1460      range and see what we end up with.  */
1461   if (code == PLUS_EXPR || code == MINUS_EXPR)
1462     {
1463       const bool minus_p = (code == MINUS_EXPR);
1464       tree min_op0 = vr0.min;
1465       tree min_op1 = minus_p ? vr1.max : vr1.min;
1466       tree max_op0 = vr0.max;
1467       tree max_op1 = minus_p ? vr1.min : vr1.max;
1468       tree sym_min_op0 = NULL_TREE;
1469       tree sym_min_op1 = NULL_TREE;
1470       tree sym_max_op0 = NULL_TREE;
1471       tree sym_max_op1 = NULL_TREE;
1472       bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
1473 
1474       /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
1475 	 single-symbolic ranges, try to compute the precise resulting range,
1476 	 but only if we know that this resulting range will also be constant
1477 	 or single-symbolic.  */
1478       if (vr0.type == VR_RANGE && vr1.type == VR_RANGE
1479 	  && (TREE_CODE (min_op0) == INTEGER_CST
1480 	      || (sym_min_op0
1481 		  = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
1482 	  && (TREE_CODE (min_op1) == INTEGER_CST
1483 	      || (sym_min_op1
1484 		  = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
1485 	  && (!(sym_min_op0 && sym_min_op1)
1486 	      || (sym_min_op0 == sym_min_op1
1487 		  && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
1488 	  && (TREE_CODE (max_op0) == INTEGER_CST
1489 	      || (sym_max_op0
1490 		  = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
1491 	  && (TREE_CODE (max_op1) == INTEGER_CST
1492 	      || (sym_max_op1
1493 		  = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
1494 	  && (!(sym_max_op0 && sym_max_op1)
1495 	      || (sym_max_op0 == sym_max_op1
1496 		  && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
1497 	{
1498 	  const signop sgn = TYPE_SIGN (expr_type);
1499 	  const unsigned int prec = TYPE_PRECISION (expr_type);
1500 	  wide_int type_min, type_max, wmin, wmax;
1501 	  int min_ovf = 0;
1502 	  int max_ovf = 0;
1503 
1504 	  /* Get the lower and upper bounds of the type.  */
1505 	  if (TYPE_OVERFLOW_WRAPS (expr_type))
1506 	    {
1507 	      type_min = wi::min_value (prec, sgn);
1508 	      type_max = wi::max_value (prec, sgn);
1509 	    }
1510 	  else
1511 	    {
1512 	      type_min = wi::to_wide (vrp_val_min (expr_type));
1513 	      type_max = wi::to_wide (vrp_val_max (expr_type));
1514 	    }
1515 
1516 	  /* Combine the lower bounds, if any.  */
1517 	  if (min_op0 && min_op1)
1518 	    {
1519 	      if (minus_p)
1520 		{
1521 		  wmin = wi::to_wide (min_op0) - wi::to_wide (min_op1);
1522 
1523 		  /* Check for overflow.  */
1524 		  if (wi::cmp (0, wi::to_wide (min_op1), sgn)
1525 		      != wi::cmp (wmin, wi::to_wide (min_op0), sgn))
1526 		    min_ovf = wi::cmp (wi::to_wide (min_op0),
1527 				       wi::to_wide (min_op1), sgn);
1528 		}
1529 	      else
1530 		{
1531 		  wmin = wi::to_wide (min_op0) + wi::to_wide (min_op1);
1532 
1533 		  /* Check for overflow.  */
1534 		  if (wi::cmp (wi::to_wide (min_op1), 0, sgn)
1535 		      != wi::cmp (wmin, wi::to_wide (min_op0), sgn))
1536 		    min_ovf = wi::cmp (wi::to_wide (min_op0), wmin, sgn);
1537 		}
1538 	    }
1539 	  else if (min_op0)
1540 	    wmin = wi::to_wide (min_op0);
1541 	  else if (min_op1)
1542 	    {
1543 	      if (minus_p)
1544 		{
1545 		  wmin = -wi::to_wide (min_op1);
1546 
1547 		  /* Check for overflow.  */
1548 		  if (sgn == SIGNED
1549 		      && wi::neg_p (wi::to_wide (min_op1))
1550 		      && wi::neg_p (wmin))
1551 		    min_ovf = 1;
1552 		  else if (sgn == UNSIGNED && wi::to_wide (min_op1) != 0)
1553 		    min_ovf = -1;
1554 		}
1555 	      else
1556 		wmin = wi::to_wide (min_op1);
1557 	    }
1558 	  else
1559 	    wmin = wi::shwi (0, prec);
1560 
1561 	  /* Combine the upper bounds, if any.  */
1562 	  if (max_op0 && max_op1)
1563 	    {
1564 	      if (minus_p)
1565 		{
1566 		  wmax = wi::to_wide (max_op0) - wi::to_wide (max_op1);
1567 
1568 		  /* Check for overflow.  */
1569 		  if (wi::cmp (0, wi::to_wide (max_op1), sgn)
1570 		      != wi::cmp (wmax, wi::to_wide (max_op0), sgn))
1571 		    max_ovf = wi::cmp (wi::to_wide (max_op0),
1572 				       wi::to_wide (max_op1), sgn);
1573 		}
1574 	      else
1575 		{
1576 		  wmax = wi::to_wide (max_op0) + wi::to_wide (max_op1);
1577 
1578 		  if (wi::cmp (wi::to_wide (max_op1), 0, sgn)
1579 		      != wi::cmp (wmax, wi::to_wide (max_op0), sgn))
1580 		    max_ovf = wi::cmp (wi::to_wide (max_op0), wmax, sgn);
1581 		}
1582 	    }
1583 	  else if (max_op0)
1584 	    wmax = wi::to_wide (max_op0);
1585 	  else if (max_op1)
1586 	    {
1587 	      if (minus_p)
1588 		{
1589 		  wmax = -wi::to_wide (max_op1);
1590 
1591 		  /* Check for overflow.  */
1592 		  if (sgn == SIGNED
1593 		      && wi::neg_p (wi::to_wide (max_op1))
1594 		      && wi::neg_p (wmax))
1595 		    max_ovf = 1;
1596 		  else if (sgn == UNSIGNED && wi::to_wide (max_op1) != 0)
1597 		    max_ovf = -1;
1598 		}
1599 	      else
1600 		wmax = wi::to_wide (max_op1);
1601 	    }
1602 	  else
1603 	    wmax = wi::shwi (0, prec);
1604 
1605 	  /* Check for type overflow.  */
1606 	  if (min_ovf == 0)
1607 	    {
1608 	      if (wi::cmp (wmin, type_min, sgn) == -1)
1609 		min_ovf = -1;
1610 	      else if (wi::cmp (wmin, type_max, sgn) == 1)
1611 		min_ovf = 1;
1612 	    }
1613 	  if (max_ovf == 0)
1614 	    {
1615 	      if (wi::cmp (wmax, type_min, sgn) == -1)
1616 		max_ovf = -1;
1617 	      else if (wi::cmp (wmax, type_max, sgn) == 1)
1618 		max_ovf = 1;
1619 	    }
1620 
1621 	  /* If we have overflow for the constant part and the resulting
1622 	     range will be symbolic, drop to VR_VARYING.  */
1623 	  if ((min_ovf && sym_min_op0 != sym_min_op1)
1624 	      || (max_ovf && sym_max_op0 != sym_max_op1))
1625 	    {
1626 	      set_value_range_to_varying (vr);
1627 	      return;
1628 	    }
1629 
1630 	  if (TYPE_OVERFLOW_WRAPS (expr_type))
1631 	    {
1632 	      /* If overflow wraps, truncate the values and adjust the
1633 		 range kind and bounds appropriately.  */
1634 	      wide_int tmin = wide_int::from (wmin, prec, sgn);
1635 	      wide_int tmax = wide_int::from (wmax, prec, sgn);
1636 	      if (min_ovf == max_ovf)
1637 		{
1638 		  /* No overflow or both overflow or underflow.  The
1639 		     range kind stays VR_RANGE.  */
1640 		  min = wide_int_to_tree (expr_type, tmin);
1641 		  max = wide_int_to_tree (expr_type, tmax);
1642 		}
1643 	      else if ((min_ovf == -1 && max_ovf == 0)
1644 		       || (max_ovf == 1 && min_ovf == 0))
1645 		{
1646 		  /* Min underflow or max overflow.  The range kind
1647 		     changes to VR_ANTI_RANGE.  */
1648 		  bool covers = false;
1649 		  wide_int tem = tmin;
1650 		  type = VR_ANTI_RANGE;
1651 		  tmin = tmax + 1;
1652 		  if (wi::cmp (tmin, tmax, sgn) < 0)
1653 		    covers = true;
1654 		  tmax = tem - 1;
1655 		  if (wi::cmp (tmax, tem, sgn) > 0)
1656 		    covers = true;
1657 		  /* If the anti-range would cover nothing, drop to varying.
1658 		     Likewise if the anti-range bounds are outside of the
1659 		     types values.  */
1660 		  if (covers || wi::cmp (tmin, tmax, sgn) > 0)
1661 		    {
1662 		      set_value_range_to_varying (vr);
1663 		      return;
1664 		    }
1665 		  min = wide_int_to_tree (expr_type, tmin);
1666 		  max = wide_int_to_tree (expr_type, tmax);
1667 		}
1668 	      else
1669 		{
1670 		  /* Other underflow and/or overflow, drop to VR_VARYING.  */
1671 		  set_value_range_to_varying (vr);
1672 		  return;
1673 		}
1674 	    }
1675 	  else
1676 	    {
1677 	      /* If overflow does not wrap, saturate to the types min/max
1678 	         value.  */
1679 	      if (min_ovf == -1)
1680 		min = wide_int_to_tree (expr_type, type_min);
1681 	      else if (min_ovf == 1)
1682 		min = wide_int_to_tree (expr_type, type_max);
1683 	      else
1684 		min = wide_int_to_tree (expr_type, wmin);
1685 
1686 	      if (max_ovf == -1)
1687 		max = wide_int_to_tree (expr_type, type_min);
1688 	      else if (max_ovf == 1)
1689 		max = wide_int_to_tree (expr_type, type_max);
1690 	      else
1691 		max = wide_int_to_tree (expr_type, wmax);
1692 	    }
1693 
1694 	  /* If the result lower bound is constant, we're done;
1695 	     otherwise, build the symbolic lower bound.  */
1696 	  if (sym_min_op0 == sym_min_op1)
1697 	    ;
1698 	  else if (sym_min_op0)
1699 	    min = build_symbolic_expr (expr_type, sym_min_op0,
1700 				       neg_min_op0, min);
1701 	  else if (sym_min_op1)
1702 	    {
1703 	      /* We may not negate if that might introduce
1704 		 undefined overflow.  */
1705 	      if (! minus_p
1706 		  || neg_min_op1
1707 		  || TYPE_OVERFLOW_WRAPS (expr_type))
1708 		min = build_symbolic_expr (expr_type, sym_min_op1,
1709 					   neg_min_op1 ^ minus_p, min);
1710 	      else
1711 		min = NULL_TREE;
1712 	    }
1713 
1714 	  /* Likewise for the upper bound.  */
1715 	  if (sym_max_op0 == sym_max_op1)
1716 	    ;
1717 	  else if (sym_max_op0)
1718 	    max = build_symbolic_expr (expr_type, sym_max_op0,
1719 				       neg_max_op0, max);
1720 	  else if (sym_max_op1)
1721 	    {
1722 	      /* We may not negate if that might introduce
1723 		 undefined overflow.  */
1724 	      if (! minus_p
1725 		  || neg_max_op1
1726 		  || TYPE_OVERFLOW_WRAPS (expr_type))
1727 		max = build_symbolic_expr (expr_type, sym_max_op1,
1728 					   neg_max_op1 ^ minus_p, max);
1729 	      else
1730 		max = NULL_TREE;
1731 	    }
1732 	}
1733       else
1734 	{
1735 	  /* For other cases, for example if we have a PLUS_EXPR with two
1736 	     VR_ANTI_RANGEs, drop to VR_VARYING.  It would take more effort
1737 	     to compute a precise range for such a case.
1738 	     ???  General even mixed range kind operations can be expressed
1739 	     by for example transforming ~[3, 5] + [1, 2] to range-only
1740 	     operations and a union primitive:
1741 	       [-INF, 2] + [1, 2]  U  [5, +INF] + [1, 2]
1742 	           [-INF+1, 4]     U    [6, +INF(OVF)]
1743 	     though usually the union is not exactly representable with
1744 	     a single range or anti-range as the above is
1745 		 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
1746 	     but one could use a scheme similar to equivalences for this. */
1747 	  set_value_range_to_varying (vr);
1748 	  return;
1749 	}
1750     }
1751   else if (code == MIN_EXPR
1752 	   || code == MAX_EXPR)
1753     {
1754       if (vr0.type == VR_RANGE
1755 	  && !symbolic_range_p (&vr0))
1756 	{
1757 	  type = VR_RANGE;
1758 	  if (vr1.type == VR_RANGE
1759 	      && !symbolic_range_p (&vr1))
1760 	    {
1761 	      /* For operations that make the resulting range directly
1762 		 proportional to the original ranges, apply the operation to
1763 		 the same end of each range.  */
1764 	      min = int_const_binop (code, vr0.min, vr1.min);
1765 	      max = int_const_binop (code, vr0.max, vr1.max);
1766 	    }
1767 	  else if (code == MIN_EXPR)
1768 	    {
1769 	      min = vrp_val_min (expr_type);
1770 	      max = vr0.max;
1771 	    }
1772 	  else if (code == MAX_EXPR)
1773 	    {
1774 	      min = vr0.min;
1775 	      max = vrp_val_max (expr_type);
1776 	    }
1777 	}
1778       else if (vr1.type == VR_RANGE
1779 	       && !symbolic_range_p (&vr1))
1780 	{
1781 	  type = VR_RANGE;
1782 	  if (code == MIN_EXPR)
1783 	    {
1784 	      min = vrp_val_min (expr_type);
1785 	      max = vr1.max;
1786 	    }
1787 	  else if (code == MAX_EXPR)
1788 	    {
1789 	      min = vr1.min;
1790 	      max = vrp_val_max (expr_type);
1791 	    }
1792 	}
1793       else
1794 	{
1795 	  set_value_range_to_varying (vr);
1796 	  return;
1797 	}
1798     }
1799   else if (code == MULT_EXPR)
1800     {
1801       /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not
1802 	 drop to varying.  This test requires 2*prec bits if both
1803 	 operands are signed and 2*prec + 2 bits if either is not.  */
1804 
1805       signop sign = TYPE_SIGN (expr_type);
1806       unsigned int prec = TYPE_PRECISION (expr_type);
1807 
1808       if (!range_int_cst_p (&vr0)
1809 	  || !range_int_cst_p (&vr1))
1810 	{
1811 	  set_value_range_to_varying (vr);
1812 	  return;
1813 	}
1814 
1815       if (TYPE_OVERFLOW_WRAPS (expr_type))
1816 	{
1817 	  typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION * 2) vrp_int;
1818 	  typedef generic_wide_int
1819              <wi::extended_tree <WIDE_INT_MAX_PRECISION * 2> > vrp_int_cst;
1820 	  vrp_int sizem1 = wi::mask <vrp_int> (prec, false);
1821 	  vrp_int size = sizem1 + 1;
1822 
1823 	  /* Extend the values using the sign of the result to PREC2.
1824 	     From here on out, everthing is just signed math no matter
1825 	     what the input types were.  */
1826           vrp_int min0 = vrp_int_cst (vr0.min);
1827           vrp_int max0 = vrp_int_cst (vr0.max);
1828           vrp_int min1 = vrp_int_cst (vr1.min);
1829           vrp_int max1 = vrp_int_cst (vr1.max);
1830 	  /* Canonicalize the intervals.  */
1831 	  if (sign == UNSIGNED)
1832 	    {
1833 	      if (wi::ltu_p (size, min0 + max0))
1834 		{
1835 		  min0 -= size;
1836 		  max0 -= size;
1837 		}
1838 
1839 	      if (wi::ltu_p (size, min1 + max1))
1840 		{
1841 		  min1 -= size;
1842 		  max1 -= size;
1843 		}
1844 	    }
1845 
1846 	  vrp_int prod0 = min0 * min1;
1847 	  vrp_int prod1 = min0 * max1;
1848 	  vrp_int prod2 = max0 * min1;
1849 	  vrp_int prod3 = max0 * max1;
1850 
1851 	  /* Sort the 4 products so that min is in prod0 and max is in
1852 	     prod3.  */
1853 	  /* min0min1 > max0max1 */
1854 	  if (prod0 > prod3)
1855 	    std::swap (prod0, prod3);
1856 
1857 	  /* min0max1 > max0min1 */
1858 	  if (prod1 > prod2)
1859 	    std::swap (prod1, prod2);
1860 
1861 	  if (prod0 > prod1)
1862 	    std::swap (prod0, prod1);
1863 
1864 	  if (prod2 > prod3)
1865 	    std::swap (prod2, prod3);
1866 
1867 	  /* diff = max - min.  */
1868 	  prod2 = prod3 - prod0;
1869 	  if (wi::geu_p (prod2, sizem1))
1870 	    {
1871 	      /* the range covers all values.  */
1872 	      set_value_range_to_varying (vr);
1873 	      return;
1874 	    }
1875 
1876 	  /* The following should handle the wrapping and selecting
1877 	     VR_ANTI_RANGE for us.  */
1878 	  min = wide_int_to_tree (expr_type, prod0);
1879 	  max = wide_int_to_tree (expr_type, prod3);
1880 	  set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
1881 	  return;
1882 	}
1883 
1884       /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
1885 	 drop to VR_VARYING.  It would take more effort to compute a
1886 	 precise range for such a case.  For example, if we have
1887 	 op0 == 65536 and op1 == 65536 with their ranges both being
1888 	 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
1889 	 we cannot claim that the product is in ~[0,0].  Note that we
1890 	 are guaranteed to have vr0.type == vr1.type at this
1891 	 point.  */
1892       if (vr0.type == VR_ANTI_RANGE
1893 	  && !TYPE_OVERFLOW_UNDEFINED (expr_type))
1894 	{
1895 	  set_value_range_to_varying (vr);
1896 	  return;
1897 	}
1898 
1899       extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
1900       return;
1901     }
1902   else if (code == RSHIFT_EXPR
1903 	   || code == LSHIFT_EXPR)
1904     {
1905       /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
1906 	 then drop to VR_VARYING.  Outside of this range we get undefined
1907 	 behavior from the shift operation.  We cannot even trust
1908 	 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
1909 	 shifts, and the operation at the tree level may be widened.  */
1910       if (range_int_cst_p (&vr1)
1911 	  && compare_tree_int (vr1.min, 0) >= 0
1912 	  && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1)
1913 	{
1914 	  if (code == RSHIFT_EXPR)
1915 	    {
1916 	      /* Even if vr0 is VARYING or otherwise not usable, we can derive
1917 		 useful ranges just from the shift count.  E.g.
1918 		 x >> 63 for signed 64-bit x is always [-1, 0].  */
1919 	      if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
1920 		{
1921 		  vr0.type = type = VR_RANGE;
1922 		  vr0.min = vrp_val_min (expr_type);
1923 		  vr0.max = vrp_val_max (expr_type);
1924 		}
1925 	      extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
1926 	      return;
1927 	    }
1928 	  /* We can map lshifts by constants to MULT_EXPR handling.  */
1929 	  else if (code == LSHIFT_EXPR
1930 		   && range_int_cst_singleton_p (&vr1))
1931 	    {
1932 	      bool saved_flag_wrapv;
1933 	      value_range vr1p = VR_INITIALIZER;
1934 	      vr1p.type = VR_RANGE;
1935 	      vr1p.min = (wide_int_to_tree
1936 			  (expr_type,
1937 			   wi::set_bit_in_zero (tree_to_shwi (vr1.min),
1938 						TYPE_PRECISION (expr_type))));
1939 	      vr1p.max = vr1p.min;
1940 	      /* We have to use a wrapping multiply though as signed overflow
1941 		 on lshifts is implementation defined in C89.  */
1942 	      saved_flag_wrapv = flag_wrapv;
1943 	      flag_wrapv = 1;
1944 	      extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type,
1945 						&vr0, &vr1p);
1946 	      flag_wrapv = saved_flag_wrapv;
1947 	      return;
1948 	    }
1949 	  else if (code == LSHIFT_EXPR
1950 		   && range_int_cst_p (&vr0))
1951 	    {
1952 	      int prec = TYPE_PRECISION (expr_type);
1953 	      int overflow_pos = prec;
1954 	      int bound_shift;
1955 	      wide_int low_bound, high_bound;
1956 	      bool uns = TYPE_UNSIGNED (expr_type);
1957 	      bool in_bounds = false;
1958 
1959 	      if (!uns)
1960 		overflow_pos -= 1;
1961 
1962 	      bound_shift = overflow_pos - tree_to_shwi (vr1.max);
1963 	      /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
1964 		 overflow.  However, for that to happen, vr1.max needs to be
1965 		 zero, which means vr1 is a singleton range of zero, which
1966 		 means it should be handled by the previous LSHIFT_EXPR
1967 		 if-clause.  */
1968 	      wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
1969 	      wide_int complement = ~(bound - 1);
1970 
1971 	      if (uns)
1972 		{
1973 		  low_bound = bound;
1974 		  high_bound = complement;
1975 		  if (wi::ltu_p (wi::to_wide (vr0.max), low_bound))
1976 		    {
1977 		      /* [5, 6] << [1, 2] == [10, 24].  */
1978 		      /* We're shifting out only zeroes, the value increases
1979 			 monotonically.  */
1980 		      in_bounds = true;
1981 		    }
1982 		  else if (wi::ltu_p (high_bound, wi::to_wide (vr0.min)))
1983 		    {
1984 		      /* [0xffffff00, 0xffffffff] << [1, 2]
1985 		         == [0xfffffc00, 0xfffffffe].  */
1986 		      /* We're shifting out only ones, the value decreases
1987 			 monotonically.  */
1988 		      in_bounds = true;
1989 		    }
1990 		}
1991 	      else
1992 		{
1993 		  /* [-1, 1] << [1, 2] == [-4, 4].  */
1994 		  low_bound = complement;
1995 		  high_bound = bound;
1996 		  if (wi::lts_p (wi::to_wide (vr0.max), high_bound)
1997 		      && wi::lts_p (low_bound, wi::to_wide (vr0.min)))
1998 		    {
1999 		      /* For non-negative numbers, we're shifting out only
2000 			 zeroes, the value increases monotonically.
2001 			 For negative numbers, we're shifting out only ones, the
2002 			 value decreases monotomically.  */
2003 		      in_bounds = true;
2004 		    }
2005 		}
2006 
2007 	      if (in_bounds)
2008 		{
2009 		  extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2010 		  return;
2011 		}
2012 	    }
2013 	}
2014       set_value_range_to_varying (vr);
2015       return;
2016     }
2017   else if (code == TRUNC_DIV_EXPR
2018 	   || code == FLOOR_DIV_EXPR
2019 	   || code == CEIL_DIV_EXPR
2020 	   || code == EXACT_DIV_EXPR
2021 	   || code == ROUND_DIV_EXPR)
2022     {
2023       if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2024 	{
2025 	  /* For division, if op1 has VR_RANGE but op0 does not, something
2026 	     can be deduced just from that range.  Say [min, max] / [4, max]
2027 	     gives [min / 4, max / 4] range.  */
2028 	  if (vr1.type == VR_RANGE
2029 	      && !symbolic_range_p (&vr1)
2030 	      && range_includes_zero_p (vr1.min, vr1.max) == 0)
2031 	    {
2032 	      vr0.type = type = VR_RANGE;
2033 	      vr0.min = vrp_val_min (expr_type);
2034 	      vr0.max = vrp_val_max (expr_type);
2035 	    }
2036 	  else
2037 	    {
2038 	      set_value_range_to_varying (vr);
2039 	      return;
2040 	    }
2041 	}
2042 
2043       /* For divisions, if flag_non_call_exceptions is true, we must
2044 	 not eliminate a division by zero.  */
2045       if (cfun->can_throw_non_call_exceptions
2046 	  && (vr1.type != VR_RANGE
2047 	      || range_includes_zero_p (vr1.min, vr1.max) != 0))
2048 	{
2049 	  set_value_range_to_varying (vr);
2050 	  return;
2051 	}
2052 
2053       /* For divisions, if op0 is VR_RANGE, we can deduce a range
2054 	 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2055 	 include 0.  */
2056       if (vr0.type == VR_RANGE
2057 	  && (vr1.type != VR_RANGE
2058 	      || range_includes_zero_p (vr1.min, vr1.max) != 0))
2059 	{
2060 	  tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2061 	  int cmp;
2062 
2063 	  min = NULL_TREE;
2064 	  max = NULL_TREE;
2065 	  if (TYPE_UNSIGNED (expr_type)
2066 	      || value_range_nonnegative_p (&vr1))
2067 	    {
2068 	      /* For unsigned division or when divisor is known
2069 		 to be non-negative, the range has to cover
2070 		 all numbers from 0 to max for positive max
2071 		 and all numbers from min to 0 for negative min.  */
2072 	      cmp = compare_values (vr0.max, zero);
2073 	      if (cmp == -1)
2074 		{
2075 		  /* When vr0.max < 0, vr1.min != 0 and value
2076 		     ranges for dividend and divisor are available.  */
2077 		  if (vr1.type == VR_RANGE
2078 		      && !symbolic_range_p (&vr0)
2079 		      && !symbolic_range_p (&vr1)
2080 		      && compare_values (vr1.min, zero) != 0)
2081 		    max = int_const_binop (code, vr0.max, vr1.min);
2082 		  else
2083 		    max = zero;
2084 		}
2085 	      else if (cmp == 0 || cmp == 1)
2086 		max = vr0.max;
2087 	      else
2088 		type = VR_VARYING;
2089 	      cmp = compare_values (vr0.min, zero);
2090 	      if (cmp == 1)
2091 		{
2092 		  /* For unsigned division when value ranges for dividend
2093 		     and divisor are available.  */
2094 		  if (vr1.type == VR_RANGE
2095 		      && !symbolic_range_p (&vr0)
2096 		      && !symbolic_range_p (&vr1)
2097 		      && compare_values (vr1.max, zero) != 0)
2098 		    min = int_const_binop (code, vr0.min, vr1.max);
2099 		  else
2100 		    min = zero;
2101 		}
2102 	      else if (cmp == 0 || cmp == -1)
2103 		min = vr0.min;
2104 	      else
2105 		type = VR_VARYING;
2106 	    }
2107 	  else
2108 	    {
2109 	      /* Otherwise the range is -max .. max or min .. -min
2110 		 depending on which bound is bigger in absolute value,
2111 		 as the division can change the sign.  */
2112 	      abs_extent_range (vr, vr0.min, vr0.max);
2113 	      return;
2114 	    }
2115 	  if (type == VR_VARYING)
2116 	    {
2117 	      set_value_range_to_varying (vr);
2118 	      return;
2119 	    }
2120 	}
2121       else if (range_int_cst_p (&vr0) && range_int_cst_p (&vr1))
2122 	{
2123 	  extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2124 	  return;
2125 	}
2126     }
2127   else if (code == TRUNC_MOD_EXPR)
2128     {
2129       if (range_is_null (&vr1))
2130 	{
2131 	  set_value_range_to_undefined (vr);
2132 	  return;
2133 	}
2134       /* ABS (A % B) < ABS (B) and either
2135 	 0 <= A % B <= A or A <= A % B <= 0.  */
2136       type = VR_RANGE;
2137       signop sgn = TYPE_SIGN (expr_type);
2138       unsigned int prec = TYPE_PRECISION (expr_type);
2139       wide_int wmin, wmax, tmp;
2140       if (vr1.type == VR_RANGE && !symbolic_range_p (&vr1))
2141 	{
2142 	  wmax = wi::to_wide (vr1.max) - 1;
2143 	  if (sgn == SIGNED)
2144 	    {
2145 	      tmp = -1 - wi::to_wide (vr1.min);
2146 	      wmax = wi::smax (wmax, tmp);
2147 	    }
2148 	}
2149       else
2150 	{
2151 	  wmax = wi::max_value (prec, sgn);
2152 	  /* X % INT_MIN may be INT_MAX.  */
2153 	  if (sgn == UNSIGNED)
2154 	    wmax = wmax - 1;
2155 	}
2156 
2157       if (sgn == UNSIGNED)
2158 	wmin = wi::zero (prec);
2159       else
2160 	{
2161 	  wmin = -wmax;
2162 	  if (vr0.type == VR_RANGE && TREE_CODE (vr0.min) == INTEGER_CST)
2163 	    {
2164 	      tmp = wi::to_wide (vr0.min);
2165 	      if (wi::gts_p (tmp, 0))
2166 		tmp = wi::zero (prec);
2167 	      wmin = wi::smax (wmin, tmp);
2168 	    }
2169 	}
2170 
2171       if (vr0.type == VR_RANGE && TREE_CODE (vr0.max) == INTEGER_CST)
2172 	{
2173 	  tmp = wi::to_wide (vr0.max);
2174 	  if (sgn == SIGNED && wi::neg_p (tmp))
2175 	    tmp = wi::zero (prec);
2176 	  wmax = wi::min (wmax, tmp, sgn);
2177 	}
2178 
2179       min = wide_int_to_tree (expr_type, wmin);
2180       max = wide_int_to_tree (expr_type, wmax);
2181     }
2182   else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
2183     {
2184       bool int_cst_range0, int_cst_range1;
2185       wide_int may_be_nonzero0, may_be_nonzero1;
2186       wide_int must_be_nonzero0, must_be_nonzero1;
2187 
2188       int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0,
2189 						  &may_be_nonzero0,
2190 						  &must_be_nonzero0);
2191       int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1,
2192 						  &may_be_nonzero1,
2193 						  &must_be_nonzero1);
2194 
2195       if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR)
2196 	{
2197 	  value_range *vr0p = NULL, *vr1p = NULL;
2198 	  if (range_int_cst_singleton_p (&vr1))
2199 	    {
2200 	      vr0p = &vr0;
2201 	      vr1p = &vr1;
2202 	    }
2203 	  else if (range_int_cst_singleton_p (&vr0))
2204 	    {
2205 	      vr0p = &vr1;
2206 	      vr1p = &vr0;
2207 	    }
2208 	  /* For op & or | attempt to optimize:
2209 	     [x, y] op z into [x op z, y op z]
2210 	     if z is a constant which (for op | its bitwise not) has n
2211 	     consecutive least significant bits cleared followed by m 1
2212 	     consecutive bits set immediately above it and either
2213 	     m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
2214 	     The least significant n bits of all the values in the range are
2215 	     cleared or set, the m bits above it are preserved and any bits
2216 	     above these are required to be the same for all values in the
2217 	     range.  */
2218 	  if (vr0p && range_int_cst_p (vr0p))
2219 	    {
2220 	      wide_int w = wi::to_wide (vr1p->min);
2221 	      int m = 0, n = 0;
2222 	      if (code == BIT_IOR_EXPR)
2223 		w = ~w;
2224 	      if (wi::eq_p (w, 0))
2225 		n = TYPE_PRECISION (expr_type);
2226 	      else
2227 		{
2228 		  n = wi::ctz (w);
2229 		  w = ~(w | wi::mask (n, false, w.get_precision ()));
2230 		  if (wi::eq_p (w, 0))
2231 		    m = TYPE_PRECISION (expr_type) - n;
2232 		  else
2233 		    m = wi::ctz (w) - n;
2234 		}
2235 	      wide_int mask = wi::mask (m + n, true, w.get_precision ());
2236 	      if ((mask & wi::to_wide (vr0p->min))
2237 		  == (mask & wi::to_wide (vr0p->max)))
2238 		{
2239 		  min = int_const_binop (code, vr0p->min, vr1p->min);
2240 		  max = int_const_binop (code, vr0p->max, vr1p->min);
2241 		}
2242 	    }
2243 	}
2244 
2245       type = VR_RANGE;
2246       if (min && max)
2247 	/* Optimized above already.  */;
2248       else if (code == BIT_AND_EXPR)
2249 	{
2250 	  min = wide_int_to_tree (expr_type,
2251 				  must_be_nonzero0 & must_be_nonzero1);
2252 	  wide_int wmax = may_be_nonzero0 & may_be_nonzero1;
2253 	  /* If both input ranges contain only negative values we can
2254 	     truncate the result range maximum to the minimum of the
2255 	     input range maxima.  */
2256 	  if (int_cst_range0 && int_cst_range1
2257 	      && tree_int_cst_sgn (vr0.max) < 0
2258 	      && tree_int_cst_sgn (vr1.max) < 0)
2259 	    {
2260 	      wmax = wi::min (wmax, wi::to_wide (vr0.max),
2261 			      TYPE_SIGN (expr_type));
2262 	      wmax = wi::min (wmax, wi::to_wide (vr1.max),
2263 			      TYPE_SIGN (expr_type));
2264 	    }
2265 	  /* If either input range contains only non-negative values
2266 	     we can truncate the result range maximum to the respective
2267 	     maximum of the input range.  */
2268 	  if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
2269 	    wmax = wi::min (wmax, wi::to_wide (vr0.max),
2270 			    TYPE_SIGN (expr_type));
2271 	  if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
2272 	    wmax = wi::min (wmax, wi::to_wide (vr1.max),
2273 			    TYPE_SIGN (expr_type));
2274 	  max = wide_int_to_tree (expr_type, wmax);
2275 	  cmp = compare_values (min, max);
2276 	  /* PR68217: In case of signed & sign-bit-CST should
2277 	     result in [-INF, 0] instead of [-INF, INF].  */
2278 	  if (cmp == -2 || cmp == 1)
2279 	    {
2280 	      wide_int sign_bit
2281 		= wi::set_bit_in_zero (TYPE_PRECISION (expr_type) - 1,
2282 				       TYPE_PRECISION (expr_type));
2283 	      if (!TYPE_UNSIGNED (expr_type)
2284 		  && ((int_cst_range0
2285 		       && value_range_constant_singleton (&vr0)
2286 		       && !wi::cmps (wi::to_wide (vr0.min), sign_bit))
2287 		      || (int_cst_range1
2288 			  && value_range_constant_singleton (&vr1)
2289 			  && !wi::cmps (wi::to_wide (vr1.min), sign_bit))))
2290 		{
2291 		  min = TYPE_MIN_VALUE (expr_type);
2292 		  max = build_int_cst (expr_type, 0);
2293 		}
2294 	    }
2295 	}
2296       else if (code == BIT_IOR_EXPR)
2297 	{
2298 	  max = wide_int_to_tree (expr_type,
2299 				  may_be_nonzero0 | may_be_nonzero1);
2300 	  wide_int wmin = must_be_nonzero0 | must_be_nonzero1;
2301 	  /* If the input ranges contain only positive values we can
2302 	     truncate the minimum of the result range to the maximum
2303 	     of the input range minima.  */
2304 	  if (int_cst_range0 && int_cst_range1
2305 	      && tree_int_cst_sgn (vr0.min) >= 0
2306 	      && tree_int_cst_sgn (vr1.min) >= 0)
2307 	    {
2308 	      wmin = wi::max (wmin, wi::to_wide (vr0.min),
2309 			      TYPE_SIGN (expr_type));
2310 	      wmin = wi::max (wmin, wi::to_wide (vr1.min),
2311 			      TYPE_SIGN (expr_type));
2312 	    }
2313 	  /* If either input range contains only negative values
2314 	     we can truncate the minimum of the result range to the
2315 	     respective minimum range.  */
2316 	  if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
2317 	    wmin = wi::max (wmin, wi::to_wide (vr0.min),
2318 			    TYPE_SIGN (expr_type));
2319 	  if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
2320 	    wmin = wi::max (wmin, wi::to_wide (vr1.min),
2321 			    TYPE_SIGN (expr_type));
2322 	  min = wide_int_to_tree (expr_type, wmin);
2323 	}
2324       else if (code == BIT_XOR_EXPR)
2325 	{
2326 	  wide_int result_zero_bits = ((must_be_nonzero0 & must_be_nonzero1)
2327 				       | ~(may_be_nonzero0 | may_be_nonzero1));
2328 	  wide_int result_one_bits
2329 	    = (wi::bit_and_not (must_be_nonzero0, may_be_nonzero1)
2330 	       | wi::bit_and_not (must_be_nonzero1, may_be_nonzero0));
2331 	  max = wide_int_to_tree (expr_type, ~result_zero_bits);
2332 	  min = wide_int_to_tree (expr_type, result_one_bits);
2333 	  /* If the range has all positive or all negative values the
2334 	     result is better than VARYING.  */
2335 	  if (tree_int_cst_sgn (min) < 0
2336 	      || tree_int_cst_sgn (max) >= 0)
2337 	    ;
2338 	  else
2339 	    max = min = NULL_TREE;
2340 	}
2341     }
2342   else
2343     gcc_unreachable ();
2344 
2345   /* If either MIN or MAX overflowed, then set the resulting range to
2346      VARYING.  */
2347   if (min == NULL_TREE
2348       || TREE_OVERFLOW_P (min)
2349       || max == NULL_TREE
2350       || TREE_OVERFLOW_P (max))
2351     {
2352       set_value_range_to_varying (vr);
2353       return;
2354     }
2355 
2356   /* We punt for [-INF, +INF].
2357      We learn nothing when we have INF on both sides.
2358      Note that we do accept [-INF, -INF] and [+INF, +INF].  */
2359   if (vrp_val_is_min (min) && vrp_val_is_max (max))
2360     {
2361       set_value_range_to_varying (vr);
2362       return;
2363     }
2364 
2365   cmp = compare_values (min, max);
2366   if (cmp == -2 || cmp == 1)
2367     {
2368       /* If the new range has its limits swapped around (MIN > MAX),
2369 	 then the operation caused one of them to wrap around, mark
2370 	 the new range VARYING.  */
2371       set_value_range_to_varying (vr);
2372     }
2373   else
2374     set_value_range (vr, type, min, max, NULL);
2375 }
2376 
2377 /* Extract range information from a unary operation CODE based on
2378    the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
2379    The resulting range is stored in *VR.  */
2380 
2381 void
2382 extract_range_from_unary_expr (value_range *vr,
2383 			       enum tree_code code, tree type,
2384 			       value_range *vr0_, tree op0_type)
2385 {
2386   value_range vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
2387 
2388   /* VRP only operates on integral and pointer types.  */
2389   if (!(INTEGRAL_TYPE_P (op0_type)
2390 	|| POINTER_TYPE_P (op0_type))
2391       || !(INTEGRAL_TYPE_P (type)
2392 	   || POINTER_TYPE_P (type)))
2393     {
2394       set_value_range_to_varying (vr);
2395       return;
2396     }
2397 
2398   /* If VR0 is UNDEFINED, so is the result.  */
2399   if (vr0.type == VR_UNDEFINED)
2400     {
2401       set_value_range_to_undefined (vr);
2402       return;
2403     }
2404 
2405   /* Handle operations that we express in terms of others.  */
2406   if (code == PAREN_EXPR || code == OBJ_TYPE_REF)
2407     {
2408       /* PAREN_EXPR and OBJ_TYPE_REF are simple copies.  */
2409       copy_value_range (vr, &vr0);
2410       return;
2411     }
2412   else if (code == NEGATE_EXPR)
2413     {
2414       /* -X is simply 0 - X, so re-use existing code that also handles
2415          anti-ranges fine.  */
2416       value_range zero = VR_INITIALIZER;
2417       set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
2418       extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
2419       return;
2420     }
2421   else if (code == BIT_NOT_EXPR)
2422     {
2423       /* ~X is simply -1 - X, so re-use existing code that also handles
2424          anti-ranges fine.  */
2425       value_range minusone = VR_INITIALIZER;
2426       set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
2427       extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
2428 					type, &minusone, &vr0);
2429       return;
2430     }
2431 
2432   /* Now canonicalize anti-ranges to ranges when they are not symbolic
2433      and express op ~[]  as (op []') U (op []'').  */
2434   if (vr0.type == VR_ANTI_RANGE
2435       && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
2436     {
2437       extract_range_from_unary_expr (vr, code, type, &vrtem0, op0_type);
2438       if (vrtem1.type != VR_UNDEFINED)
2439 	{
2440 	  value_range vrres = VR_INITIALIZER;
2441 	  extract_range_from_unary_expr (&vrres, code, type,
2442 					 &vrtem1, op0_type);
2443 	  vrp_meet (vr, &vrres);
2444 	}
2445       return;
2446     }
2447 
2448   if (CONVERT_EXPR_CODE_P (code))
2449     {
2450       tree inner_type = op0_type;
2451       tree outer_type = type;
2452 
2453       /* If the expression evaluates to a pointer, we are only interested in
2454 	 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]).  */
2455       if (POINTER_TYPE_P (type))
2456 	{
2457 	  if (range_is_nonnull (&vr0))
2458 	    set_value_range_to_nonnull (vr, type);
2459 	  else if (range_is_null (&vr0))
2460 	    set_value_range_to_null (vr, type);
2461 	  else
2462 	    set_value_range_to_varying (vr);
2463 	  return;
2464 	}
2465 
2466       /* If VR0 is varying and we increase the type precision, assume
2467 	 a full range for the following transformation.  */
2468       if (vr0.type == VR_VARYING
2469 	  && INTEGRAL_TYPE_P (inner_type)
2470 	  && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
2471 	{
2472 	  vr0.type = VR_RANGE;
2473 	  vr0.min = TYPE_MIN_VALUE (inner_type);
2474 	  vr0.max = TYPE_MAX_VALUE (inner_type);
2475 	}
2476 
2477       /* If VR0 is a constant range or anti-range and the conversion is
2478 	 not truncating we can convert the min and max values and
2479 	 canonicalize the resulting range.  Otherwise we can do the
2480 	 conversion if the size of the range is less than what the
2481 	 precision of the target type can represent and the range is
2482 	 not an anti-range.  */
2483       if ((vr0.type == VR_RANGE
2484 	   || vr0.type == VR_ANTI_RANGE)
2485 	  && TREE_CODE (vr0.min) == INTEGER_CST
2486 	  && TREE_CODE (vr0.max) == INTEGER_CST
2487 	  && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
2488 	      || (vr0.type == VR_RANGE
2489 		  && integer_zerop (int_const_binop (RSHIFT_EXPR,
2490 		       int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
2491 		         size_int (TYPE_PRECISION (outer_type)))))))
2492 	{
2493 	  tree new_min, new_max;
2494 	  new_min = force_fit_type (outer_type, wi::to_widest (vr0.min),
2495 				    0, false);
2496 	  new_max = force_fit_type (outer_type, wi::to_widest (vr0.max),
2497 				    0, false);
2498 	  set_and_canonicalize_value_range (vr, vr0.type,
2499 					    new_min, new_max, NULL);
2500 	  return;
2501 	}
2502 
2503       set_value_range_to_varying (vr);
2504       return;
2505     }
2506   else if (code == ABS_EXPR)
2507     {
2508       tree min, max;
2509       int cmp;
2510 
2511       /* Pass through vr0 in the easy cases.  */
2512       if (TYPE_UNSIGNED (type)
2513 	  || value_range_nonnegative_p (&vr0))
2514 	{
2515 	  copy_value_range (vr, &vr0);
2516 	  return;
2517 	}
2518 
2519       /* For the remaining varying or symbolic ranges we can't do anything
2520 	 useful.  */
2521       if (vr0.type == VR_VARYING
2522 	  || symbolic_range_p (&vr0))
2523 	{
2524 	  set_value_range_to_varying (vr);
2525 	  return;
2526 	}
2527 
2528       /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
2529          useful range.  */
2530       if (!TYPE_OVERFLOW_UNDEFINED (type)
2531 	  && ((vr0.type == VR_RANGE
2532 	       && vrp_val_is_min (vr0.min))
2533 	      || (vr0.type == VR_ANTI_RANGE
2534 		  && !vrp_val_is_min (vr0.min))))
2535 	{
2536 	  set_value_range_to_varying (vr);
2537 	  return;
2538 	}
2539 
2540       /* ABS_EXPR may flip the range around, if the original range
2541 	 included negative values.  */
2542       if (!vrp_val_is_min (vr0.min))
2543 	min = fold_unary_to_constant (code, type, vr0.min);
2544       else
2545 	min = TYPE_MAX_VALUE (type);
2546 
2547       if (!vrp_val_is_min (vr0.max))
2548 	max = fold_unary_to_constant (code, type, vr0.max);
2549       else
2550 	max = TYPE_MAX_VALUE (type);
2551 
2552       cmp = compare_values (min, max);
2553 
2554       /* If a VR_ANTI_RANGEs contains zero, then we have
2555 	 ~[-INF, min(MIN, MAX)].  */
2556       if (vr0.type == VR_ANTI_RANGE)
2557 	{
2558 	  if (range_includes_zero_p (vr0.min, vr0.max) == 1)
2559 	    {
2560 	      /* Take the lower of the two values.  */
2561 	      if (cmp != 1)
2562 		max = min;
2563 
2564 	      /* Create ~[-INF, min (abs(MIN), abs(MAX))]
2565 	         or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
2566 		 flag_wrapv is set and the original anti-range doesn't include
2567 	         TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE.  */
2568 	      if (TYPE_OVERFLOW_WRAPS (type))
2569 		{
2570 		  tree type_min_value = TYPE_MIN_VALUE (type);
2571 
2572 		  min = (vr0.min != type_min_value
2573 			 ? int_const_binop (PLUS_EXPR, type_min_value,
2574 					    build_int_cst (TREE_TYPE (type_min_value), 1))
2575 			 : type_min_value);
2576 		}
2577 	      else
2578 		min = TYPE_MIN_VALUE (type);
2579 	    }
2580 	  else
2581 	    {
2582 	      /* All else has failed, so create the range [0, INF], even for
2583 	         flag_wrapv since TYPE_MIN_VALUE is in the original
2584 	         anti-range.  */
2585 	      vr0.type = VR_RANGE;
2586 	      min = build_int_cst (type, 0);
2587 	      max = TYPE_MAX_VALUE (type);
2588 	    }
2589 	}
2590 
2591       /* If the range contains zero then we know that the minimum value in the
2592          range will be zero.  */
2593       else if (range_includes_zero_p (vr0.min, vr0.max) == 1)
2594 	{
2595 	  if (cmp == 1)
2596 	    max = min;
2597 	  min = build_int_cst (type, 0);
2598 	}
2599       else
2600 	{
2601           /* If the range was reversed, swap MIN and MAX.  */
2602 	  if (cmp == 1)
2603 	    std::swap (min, max);
2604 	}
2605 
2606       cmp = compare_values (min, max);
2607       if (cmp == -2 || cmp == 1)
2608 	{
2609 	  /* If the new range has its limits swapped around (MIN > MAX),
2610 	     then the operation caused one of them to wrap around, mark
2611 	     the new range VARYING.  */
2612 	  set_value_range_to_varying (vr);
2613 	}
2614       else
2615 	set_value_range (vr, vr0.type, min, max, NULL);
2616       return;
2617     }
2618 
2619   /* For unhandled operations fall back to varying.  */
2620   set_value_range_to_varying (vr);
2621   return;
2622 }
2623 
2624 /* Debugging dumps.  */
2625 
2626 void dump_value_range (FILE *, const value_range *);
2627 void debug_value_range (value_range *);
2628 void dump_all_value_ranges (FILE *);
2629 void dump_vr_equiv (FILE *, bitmap);
2630 void debug_vr_equiv (bitmap);
2631 
2632 
2633 /* Dump value range VR to FILE.  */
2634 
2635 void
2636 dump_value_range (FILE *file, const value_range *vr)
2637 {
2638   if (vr == NULL)
2639     fprintf (file, "[]");
2640   else if (vr->type == VR_UNDEFINED)
2641     fprintf (file, "UNDEFINED");
2642   else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
2643     {
2644       tree type = TREE_TYPE (vr->min);
2645 
2646       fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
2647 
2648       if (INTEGRAL_TYPE_P (type)
2649 	  && !TYPE_UNSIGNED (type)
2650 	  && vrp_val_is_min (vr->min))
2651 	fprintf (file, "-INF");
2652       else
2653 	print_generic_expr (file, vr->min);
2654 
2655       fprintf (file, ", ");
2656 
2657       if (INTEGRAL_TYPE_P (type)
2658 	  && vrp_val_is_max (vr->max))
2659 	fprintf (file, "+INF");
2660       else
2661 	print_generic_expr (file, vr->max);
2662 
2663       fprintf (file, "]");
2664 
2665       if (vr->equiv)
2666 	{
2667 	  bitmap_iterator bi;
2668 	  unsigned i, c = 0;
2669 
2670 	  fprintf (file, "  EQUIVALENCES: { ");
2671 
2672 	  EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
2673 	    {
2674 	      print_generic_expr (file, ssa_name (i));
2675 	      fprintf (file, " ");
2676 	      c++;
2677 	    }
2678 
2679 	  fprintf (file, "} (%u elements)", c);
2680 	}
2681     }
2682   else if (vr->type == VR_VARYING)
2683     fprintf (file, "VARYING");
2684   else
2685     fprintf (file, "INVALID RANGE");
2686 }
2687 
2688 
2689 /* Dump value range VR to stderr.  */
2690 
2691 DEBUG_FUNCTION void
2692 debug_value_range (value_range *vr)
2693 {
2694   dump_value_range (stderr, vr);
2695   fprintf (stderr, "\n");
2696 }
2697 
2698 
2699 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2700    create a new SSA name N and return the assertion assignment
2701    'N = ASSERT_EXPR <V, V OP W>'.  */
2702 
2703 static gimple *
2704 build_assert_expr_for (tree cond, tree v)
2705 {
2706   tree a;
2707   gassign *assertion;
2708 
2709   gcc_assert (TREE_CODE (v) == SSA_NAME
2710 	      && COMPARISON_CLASS_P (cond));
2711 
2712   a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
2713   assertion = gimple_build_assign (NULL_TREE, a);
2714 
2715   /* The new ASSERT_EXPR, creates a new SSA name that replaces the
2716      operand of the ASSERT_EXPR.  Create it so the new name and the old one
2717      are registered in the replacement table so that we can fix the SSA web
2718      after adding all the ASSERT_EXPRs.  */
2719   tree new_def = create_new_def_for (v, assertion, NULL);
2720   /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
2721      given we have to be able to fully propagate those out to re-create
2722      valid SSA when removing the asserts.  */
2723   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
2724     SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
2725 
2726   return assertion;
2727 }
2728 
2729 
2730 /* Return false if EXPR is a predicate expression involving floating
2731    point values.  */
2732 
2733 static inline bool
2734 fp_predicate (gimple *stmt)
2735 {
2736   GIMPLE_CHECK (stmt, GIMPLE_COND);
2737 
2738   return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
2739 }
2740 
2741 /* If the range of values taken by OP can be inferred after STMT executes,
2742    return the comparison code (COMP_CODE_P) and value (VAL_P) that
2743    describes the inferred range.  Return true if a range could be
2744    inferred.  */
2745 
2746 bool
2747 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
2748 {
2749   *val_p = NULL_TREE;
2750   *comp_code_p = ERROR_MARK;
2751 
2752   /* Do not attempt to infer anything in names that flow through
2753      abnormal edges.  */
2754   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
2755     return false;
2756 
2757   /* If STMT is the last statement of a basic block with no normal
2758      successors, there is no point inferring anything about any of its
2759      operands.  We would not be able to find a proper insertion point
2760      for the assertion, anyway.  */
2761   if (stmt_ends_bb_p (stmt))
2762     {
2763       edge_iterator ei;
2764       edge e;
2765 
2766       FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
2767 	if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
2768 	  break;
2769       if (e == NULL)
2770 	return false;
2771     }
2772 
2773   if (infer_nonnull_range (stmt, op))
2774     {
2775       *val_p = build_int_cst (TREE_TYPE (op), 0);
2776       *comp_code_p = NE_EXPR;
2777       return true;
2778     }
2779 
2780   return false;
2781 }
2782 
2783 
2784 void dump_asserts_for (FILE *, tree);
2785 void debug_asserts_for (tree);
2786 void dump_all_asserts (FILE *);
2787 void debug_all_asserts (void);
2788 
2789 /* Dump all the registered assertions for NAME to FILE.  */
2790 
2791 void
2792 dump_asserts_for (FILE *file, tree name)
2793 {
2794   assert_locus *loc;
2795 
2796   fprintf (file, "Assertions to be inserted for ");
2797   print_generic_expr (file, name);
2798   fprintf (file, "\n");
2799 
2800   loc = asserts_for[SSA_NAME_VERSION (name)];
2801   while (loc)
2802     {
2803       fprintf (file, "\t");
2804       print_gimple_stmt (file, gsi_stmt (loc->si), 0);
2805       fprintf (file, "\n\tBB #%d", loc->bb->index);
2806       if (loc->e)
2807 	{
2808 	  fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
2809 	           loc->e->dest->index);
2810 	  dump_edge_info (file, loc->e, dump_flags, 0);
2811 	}
2812       fprintf (file, "\n\tPREDICATE: ");
2813       print_generic_expr (file, loc->expr);
2814       fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
2815       print_generic_expr (file, loc->val);
2816       fprintf (file, "\n\n");
2817       loc = loc->next;
2818     }
2819 
2820   fprintf (file, "\n");
2821 }
2822 
2823 
2824 /* Dump all the registered assertions for NAME to stderr.  */
2825 
2826 DEBUG_FUNCTION void
2827 debug_asserts_for (tree name)
2828 {
2829   dump_asserts_for (stderr, name);
2830 }
2831 
2832 
2833 /* Dump all the registered assertions for all the names to FILE.  */
2834 
2835 void
2836 dump_all_asserts (FILE *file)
2837 {
2838   unsigned i;
2839   bitmap_iterator bi;
2840 
2841   fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
2842   EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
2843     dump_asserts_for (file, ssa_name (i));
2844   fprintf (file, "\n");
2845 }
2846 
2847 
2848 /* Dump all the registered assertions for all the names to stderr.  */
2849 
2850 DEBUG_FUNCTION void
2851 debug_all_asserts (void)
2852 {
2853   dump_all_asserts (stderr);
2854 }
2855 
2856 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS.  */
2857 
2858 static void
2859 add_assert_info (vec<assert_info> &asserts,
2860 		 tree name, tree expr, enum tree_code comp_code, tree val)
2861 {
2862   assert_info info;
2863   info.comp_code = comp_code;
2864   info.name = name;
2865   if (TREE_OVERFLOW_P (val))
2866     val = drop_tree_overflow (val);
2867   info.val = val;
2868   info.expr = expr;
2869   asserts.safe_push (info);
2870 }
2871 
2872 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2873    'EXPR COMP_CODE VAL' at a location that dominates block BB or
2874    E->DEST, then register this location as a possible insertion point
2875    for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2876 
2877    BB, E and SI provide the exact insertion point for the new
2878    ASSERT_EXPR.  If BB is NULL, then the ASSERT_EXPR is to be inserted
2879    on edge E.  Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2880    BB.  If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2881    must not be NULL.  */
2882 
2883 static void
2884 register_new_assert_for (tree name, tree expr,
2885 			 enum tree_code comp_code,
2886 			 tree val,
2887 			 basic_block bb,
2888 			 edge e,
2889 			 gimple_stmt_iterator si)
2890 {
2891   assert_locus *n, *loc, *last_loc;
2892   basic_block dest_bb;
2893 
2894   gcc_checking_assert (bb == NULL || e == NULL);
2895 
2896   if (e == NULL)
2897     gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
2898 			 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
2899 
2900   /* Never build an assert comparing against an integer constant with
2901      TREE_OVERFLOW set.  This confuses our undefined overflow warning
2902      machinery.  */
2903   if (TREE_OVERFLOW_P (val))
2904     val = drop_tree_overflow (val);
2905 
2906   /* The new assertion A will be inserted at BB or E.  We need to
2907      determine if the new location is dominated by a previously
2908      registered location for A.  If we are doing an edge insertion,
2909      assume that A will be inserted at E->DEST.  Note that this is not
2910      necessarily true.
2911 
2912      If E is a critical edge, it will be split.  But even if E is
2913      split, the new block will dominate the same set of blocks that
2914      E->DEST dominates.
2915 
2916      The reverse, however, is not true, blocks dominated by E->DEST
2917      will not be dominated by the new block created to split E.  So,
2918      if the insertion location is on a critical edge, we will not use
2919      the new location to move another assertion previously registered
2920      at a block dominated by E->DEST.  */
2921   dest_bb = (bb) ? bb : e->dest;
2922 
2923   /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2924      VAL at a block dominating DEST_BB, then we don't need to insert a new
2925      one.  Similarly, if the same assertion already exists at a block
2926      dominated by DEST_BB and the new location is not on a critical
2927      edge, then update the existing location for the assertion (i.e.,
2928      move the assertion up in the dominance tree).
2929 
2930      Note, this is implemented as a simple linked list because there
2931      should not be more than a handful of assertions registered per
2932      name.  If this becomes a performance problem, a table hashed by
2933      COMP_CODE and VAL could be implemented.  */
2934   loc = asserts_for[SSA_NAME_VERSION (name)];
2935   last_loc = loc;
2936   while (loc)
2937     {
2938       if (loc->comp_code == comp_code
2939 	  && (loc->val == val
2940 	      || operand_equal_p (loc->val, val, 0))
2941 	  && (loc->expr == expr
2942 	      || operand_equal_p (loc->expr, expr, 0)))
2943 	{
2944 	  /* If E is not a critical edge and DEST_BB
2945 	     dominates the existing location for the assertion, move
2946 	     the assertion up in the dominance tree by updating its
2947 	     location information.  */
2948 	  if ((e == NULL || !EDGE_CRITICAL_P (e))
2949 	      && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
2950 	    {
2951 	      loc->bb = dest_bb;
2952 	      loc->e = e;
2953 	      loc->si = si;
2954 	      return;
2955 	    }
2956 	}
2957 
2958       /* Update the last node of the list and move to the next one.  */
2959       last_loc = loc;
2960       loc = loc->next;
2961     }
2962 
2963   /* If we didn't find an assertion already registered for
2964      NAME COMP_CODE VAL, add a new one at the end of the list of
2965      assertions associated with NAME.  */
2966   n = XNEW (struct assert_locus);
2967   n->bb = dest_bb;
2968   n->e = e;
2969   n->si = si;
2970   n->comp_code = comp_code;
2971   n->val = val;
2972   n->expr = expr;
2973   n->next = NULL;
2974 
2975   if (last_loc)
2976     last_loc->next = n;
2977   else
2978     asserts_for[SSA_NAME_VERSION (name)] = n;
2979 
2980   bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
2981 }
2982 
2983 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
2984    Extract a suitable test code and value and store them into *CODE_P and
2985    *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
2986 
2987    If no extraction was possible, return FALSE, otherwise return TRUE.
2988 
2989    If INVERT is true, then we invert the result stored into *CODE_P.  */
2990 
2991 static bool
2992 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
2993 					 tree cond_op0, tree cond_op1,
2994 					 bool invert, enum tree_code *code_p,
2995 					 tree *val_p)
2996 {
2997   enum tree_code comp_code;
2998   tree val;
2999 
3000   /* Otherwise, we have a comparison of the form NAME COMP VAL
3001      or VAL COMP NAME.  */
3002   if (name == cond_op1)
3003     {
3004       /* If the predicate is of the form VAL COMP NAME, flip
3005 	 COMP around because we need to register NAME as the
3006 	 first operand in the predicate.  */
3007       comp_code = swap_tree_comparison (cond_code);
3008       val = cond_op0;
3009     }
3010   else if (name == cond_op0)
3011     {
3012       /* The comparison is of the form NAME COMP VAL, so the
3013 	 comparison code remains unchanged.  */
3014       comp_code = cond_code;
3015       val = cond_op1;
3016     }
3017   else
3018     gcc_unreachable ();
3019 
3020   /* Invert the comparison code as necessary.  */
3021   if (invert)
3022     comp_code = invert_tree_comparison (comp_code, 0);
3023 
3024   /* VRP only handles integral and pointer types.  */
3025   if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
3026       && ! POINTER_TYPE_P (TREE_TYPE (val)))
3027     return false;
3028 
3029   /* Do not register always-false predicates.
3030      FIXME:  this works around a limitation in fold() when dealing with
3031      enumerations.  Given 'enum { N1, N2 } x;', fold will not
3032      fold 'if (x > N2)' to 'if (0)'.  */
3033   if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
3034       && INTEGRAL_TYPE_P (TREE_TYPE (val)))
3035     {
3036       tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
3037       tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
3038 
3039       if (comp_code == GT_EXPR
3040 	  && (!max
3041 	      || compare_values (val, max) == 0))
3042 	return false;
3043 
3044       if (comp_code == LT_EXPR
3045 	  && (!min
3046 	      || compare_values (val, min) == 0))
3047 	return false;
3048     }
3049   *code_p = comp_code;
3050   *val_p = val;
3051   return true;
3052 }
3053 
3054 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
3055    (otherwise return VAL).  VAL and MASK must be zero-extended for
3056    precision PREC.  If SGNBIT is non-zero, first xor VAL with SGNBIT
3057    (to transform signed values into unsigned) and at the end xor
3058    SGNBIT back.  */
3059 
3060 static wide_int
3061 masked_increment (const wide_int &val_in, const wide_int &mask,
3062 		  const wide_int &sgnbit, unsigned int prec)
3063 {
3064   wide_int bit = wi::one (prec), res;
3065   unsigned int i;
3066 
3067   wide_int val = val_in ^ sgnbit;
3068   for (i = 0; i < prec; i++, bit += bit)
3069     {
3070       res = mask;
3071       if ((res & bit) == 0)
3072 	continue;
3073       res = bit - 1;
3074       res = wi::bit_and_not (val + bit, res);
3075       res &= mask;
3076       if (wi::gtu_p (res, val))
3077 	return res ^ sgnbit;
3078     }
3079   return val ^ sgnbit;
3080 }
3081 
3082 /* Helper for overflow_comparison_p
3083 
3084    OP0 CODE OP1 is a comparison.  Examine the comparison and potentially
3085    OP1's defining statement to see if it ultimately has the form
3086    OP0 CODE (OP0 PLUS INTEGER_CST)
3087 
3088    If so, return TRUE indicating this is an overflow test and store into
3089    *NEW_CST an updated constant that can be used in a narrowed range test.
3090 
3091    REVERSED indicates if the comparison was originally:
3092 
3093    OP1 CODE' OP0.
3094 
3095    This affects how we build the updated constant.  */
3096 
3097 static bool
3098 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
3099 		         bool follow_assert_exprs, bool reversed, tree *new_cst)
3100 {
3101   /* See if this is a relational operation between two SSA_NAMES with
3102      unsigned, overflow wrapping values.  If so, check it more deeply.  */
3103   if ((code == LT_EXPR || code == LE_EXPR
3104        || code == GE_EXPR || code == GT_EXPR)
3105       && TREE_CODE (op0) == SSA_NAME
3106       && TREE_CODE (op1) == SSA_NAME
3107       && INTEGRAL_TYPE_P (TREE_TYPE (op0))
3108       && TYPE_UNSIGNED (TREE_TYPE (op0))
3109       && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
3110     {
3111       gimple *op1_def = SSA_NAME_DEF_STMT (op1);
3112 
3113       /* If requested, follow any ASSERT_EXPRs backwards for OP1.  */
3114       if (follow_assert_exprs)
3115 	{
3116 	  while (gimple_assign_single_p (op1_def)
3117 		 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
3118 	    {
3119 	      op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
3120 	      if (TREE_CODE (op1) != SSA_NAME)
3121 		break;
3122 	      op1_def = SSA_NAME_DEF_STMT (op1);
3123 	    }
3124 	}
3125 
3126       /* Now look at the defining statement of OP1 to see if it adds
3127 	 or subtracts a nonzero constant from another operand.  */
3128       if (op1_def
3129 	  && is_gimple_assign (op1_def)
3130 	  && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
3131 	  && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
3132 	  && !integer_zerop (gimple_assign_rhs2 (op1_def)))
3133 	{
3134 	  tree target = gimple_assign_rhs1 (op1_def);
3135 
3136 	  /* If requested, follow ASSERT_EXPRs backwards for op0 looking
3137 	     for one where TARGET appears on the RHS.  */
3138 	  if (follow_assert_exprs)
3139 	    {
3140 	      /* Now see if that "other operand" is op0, following the chain
3141 		 of ASSERT_EXPRs if necessary.  */
3142 	      gimple *op0_def = SSA_NAME_DEF_STMT (op0);
3143 	      while (op0 != target
3144 		     && gimple_assign_single_p (op0_def)
3145 		     && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
3146 		{
3147 		  op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
3148 		  if (TREE_CODE (op0) != SSA_NAME)
3149 		    break;
3150 		  op0_def = SSA_NAME_DEF_STMT (op0);
3151 		}
3152 	    }
3153 
3154 	  /* If we did not find our target SSA_NAME, then this is not
3155 	     an overflow test.  */
3156 	  if (op0 != target)
3157 	    return false;
3158 
3159 	  tree type = TREE_TYPE (op0);
3160 	  wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
3161 	  tree inc = gimple_assign_rhs2 (op1_def);
3162 	  if (reversed)
3163 	    *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
3164 	  else
3165 	    *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
3166 	  return true;
3167 	}
3168     }
3169   return false;
3170 }
3171 
3172 /* OP0 CODE OP1 is a comparison.  Examine the comparison and potentially
3173    OP1's defining statement to see if it ultimately has the form
3174    OP0 CODE (OP0 PLUS INTEGER_CST)
3175 
3176    If so, return TRUE indicating this is an overflow test and store into
3177    *NEW_CST an updated constant that can be used in a narrowed range test.
3178 
3179    These statements are left as-is in the IL to facilitate discovery of
3180    {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline.  But
3181    the alternate range representation is often useful within VRP.  */
3182 
3183 bool
3184 overflow_comparison_p (tree_code code, tree name, tree val,
3185 		       bool use_equiv_p, tree *new_cst)
3186 {
3187   if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
3188     return true;
3189   return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
3190 				  use_equiv_p, true, new_cst);
3191 }
3192 
3193 
3194 /* Try to register an edge assertion for SSA name NAME on edge E for
3195    the condition COND contributing to the conditional jump pointed to by BSI.
3196    Invert the condition COND if INVERT is true.  */
3197 
3198 static void
3199 register_edge_assert_for_2 (tree name, edge e,
3200 			    enum tree_code cond_code,
3201 			    tree cond_op0, tree cond_op1, bool invert,
3202 			    vec<assert_info> &asserts)
3203 {
3204   tree val;
3205   enum tree_code comp_code;
3206 
3207   if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
3208 						cond_op0,
3209 						cond_op1,
3210 						invert, &comp_code, &val))
3211     return;
3212 
3213   /* Queue the assert.  */
3214   tree x;
3215   if (overflow_comparison_p (comp_code, name, val, false, &x))
3216     {
3217       enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
3218 				 ? GT_EXPR : LE_EXPR);
3219       add_assert_info (asserts, name, name, new_code, x);
3220     }
3221   add_assert_info (asserts, name, name, comp_code, val);
3222 
3223   /* In the case of NAME <= CST and NAME being defined as
3224      NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
3225      and NAME2 <= CST - CST2.  We can do the same for NAME > CST.
3226      This catches range and anti-range tests.  */
3227   if ((comp_code == LE_EXPR
3228        || comp_code == GT_EXPR)
3229       && TREE_CODE (val) == INTEGER_CST
3230       && TYPE_UNSIGNED (TREE_TYPE (val)))
3231     {
3232       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3233       tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
3234 
3235       /* Extract CST2 from the (optional) addition.  */
3236       if (is_gimple_assign (def_stmt)
3237 	  && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
3238 	{
3239 	  name2 = gimple_assign_rhs1 (def_stmt);
3240 	  cst2 = gimple_assign_rhs2 (def_stmt);
3241 	  if (TREE_CODE (name2) == SSA_NAME
3242 	      && TREE_CODE (cst2) == INTEGER_CST)
3243 	    def_stmt = SSA_NAME_DEF_STMT (name2);
3244 	}
3245 
3246       /* Extract NAME2 from the (optional) sign-changing cast.  */
3247       if (gimple_assign_cast_p (def_stmt))
3248 	{
3249 	  if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
3250 	      && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
3251 	      && (TYPE_PRECISION (gimple_expr_type (def_stmt))
3252 		  == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
3253 	    name3 = gimple_assign_rhs1 (def_stmt);
3254 	}
3255 
3256       /* If name3 is used later, create an ASSERT_EXPR for it.  */
3257       if (name3 != NULL_TREE
3258       	  && TREE_CODE (name3) == SSA_NAME
3259 	  && (cst2 == NULL_TREE
3260 	      || TREE_CODE (cst2) == INTEGER_CST)
3261 	  && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
3262 	{
3263 	  tree tmp;
3264 
3265 	  /* Build an expression for the range test.  */
3266 	  tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
3267 	  if (cst2 != NULL_TREE)
3268 	    tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
3269 
3270 	  if (dump_file)
3271 	    {
3272 	      fprintf (dump_file, "Adding assert for ");
3273 	      print_generic_expr (dump_file, name3);
3274 	      fprintf (dump_file, " from ");
3275 	      print_generic_expr (dump_file, tmp);
3276 	      fprintf (dump_file, "\n");
3277 	    }
3278 
3279 	  add_assert_info (asserts, name3, tmp, comp_code, val);
3280 	}
3281 
3282       /* If name2 is used later, create an ASSERT_EXPR for it.  */
3283       if (name2 != NULL_TREE
3284       	  && TREE_CODE (name2) == SSA_NAME
3285 	  && TREE_CODE (cst2) == INTEGER_CST
3286 	  && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
3287 	{
3288 	  tree tmp;
3289 
3290 	  /* Build an expression for the range test.  */
3291 	  tmp = name2;
3292 	  if (TREE_TYPE (name) != TREE_TYPE (name2))
3293 	    tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
3294 	  if (cst2 != NULL_TREE)
3295 	    tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
3296 
3297 	  if (dump_file)
3298 	    {
3299 	      fprintf (dump_file, "Adding assert for ");
3300 	      print_generic_expr (dump_file, name2);
3301 	      fprintf (dump_file, " from ");
3302 	      print_generic_expr (dump_file, tmp);
3303 	      fprintf (dump_file, "\n");
3304 	    }
3305 
3306 	  add_assert_info (asserts, name2, tmp, comp_code, val);
3307 	}
3308     }
3309 
3310   /* In the case of post-in/decrement tests like if (i++) ... and uses
3311      of the in/decremented value on the edge the extra name we want to
3312      assert for is not on the def chain of the name compared.  Instead
3313      it is in the set of use stmts.
3314      Similar cases happen for conversions that were simplified through
3315      fold_{sign_changed,widened}_comparison.  */
3316   if ((comp_code == NE_EXPR
3317        || comp_code == EQ_EXPR)
3318       && TREE_CODE (val) == INTEGER_CST)
3319     {
3320       imm_use_iterator ui;
3321       gimple *use_stmt;
3322       FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
3323 	{
3324 	  if (!is_gimple_assign (use_stmt))
3325 	    continue;
3326 
3327 	  /* Cut off to use-stmts that are dominating the predecessor.  */
3328 	  if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
3329 	    continue;
3330 
3331 	  tree name2 = gimple_assign_lhs (use_stmt);
3332 	  if (TREE_CODE (name2) != SSA_NAME)
3333 	    continue;
3334 
3335 	  enum tree_code code = gimple_assign_rhs_code (use_stmt);
3336 	  tree cst;
3337 	  if (code == PLUS_EXPR
3338 	      || code == MINUS_EXPR)
3339 	    {
3340 	      cst = gimple_assign_rhs2 (use_stmt);
3341 	      if (TREE_CODE (cst) != INTEGER_CST)
3342 		continue;
3343 	      cst = int_const_binop (code, val, cst);
3344 	    }
3345 	  else if (CONVERT_EXPR_CODE_P (code))
3346 	    {
3347 	      /* For truncating conversions we cannot record
3348 		 an inequality.  */
3349 	      if (comp_code == NE_EXPR
3350 		  && (TYPE_PRECISION (TREE_TYPE (name2))
3351 		      < TYPE_PRECISION (TREE_TYPE (name))))
3352 		continue;
3353 	      cst = fold_convert (TREE_TYPE (name2), val);
3354 	    }
3355 	  else
3356 	    continue;
3357 
3358 	  if (TREE_OVERFLOW_P (cst))
3359 	    cst = drop_tree_overflow (cst);
3360 	  add_assert_info (asserts, name2, name2, comp_code, cst);
3361 	}
3362     }
3363 
3364   if (TREE_CODE_CLASS (comp_code) == tcc_comparison
3365       && TREE_CODE (val) == INTEGER_CST)
3366     {
3367       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3368       tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
3369       tree val2 = NULL_TREE;
3370       unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
3371       wide_int mask = wi::zero (prec);
3372       unsigned int nprec = prec;
3373       enum tree_code rhs_code = ERROR_MARK;
3374 
3375       if (is_gimple_assign (def_stmt))
3376 	rhs_code = gimple_assign_rhs_code (def_stmt);
3377 
3378       /* In the case of NAME != CST1 where NAME = A +- CST2 we can
3379          assert that A != CST1 -+ CST2.  */
3380       if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
3381 	  && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
3382 	{
3383 	  tree op0 = gimple_assign_rhs1 (def_stmt);
3384 	  tree op1 = gimple_assign_rhs2 (def_stmt);
3385 	  if (TREE_CODE (op0) == SSA_NAME
3386 	      && TREE_CODE (op1) == INTEGER_CST)
3387 	    {
3388 	      enum tree_code reverse_op = (rhs_code == PLUS_EXPR
3389 					   ? MINUS_EXPR : PLUS_EXPR);
3390 	      op1 = int_const_binop (reverse_op, val, op1);
3391 	      if (TREE_OVERFLOW (op1))
3392 		op1 = drop_tree_overflow (op1);
3393 	      add_assert_info (asserts, op0, op0, comp_code, op1);
3394 	    }
3395 	}
3396 
3397       /* Add asserts for NAME cmp CST and NAME being defined
3398 	 as NAME = (int) NAME2.  */
3399       if (!TYPE_UNSIGNED (TREE_TYPE (val))
3400 	  && (comp_code == LE_EXPR || comp_code == LT_EXPR
3401 	      || comp_code == GT_EXPR || comp_code == GE_EXPR)
3402 	  && gimple_assign_cast_p (def_stmt))
3403 	{
3404 	  name2 = gimple_assign_rhs1 (def_stmt);
3405 	  if (CONVERT_EXPR_CODE_P (rhs_code)
3406 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
3407 	      && TYPE_UNSIGNED (TREE_TYPE (name2))
3408 	      && prec == TYPE_PRECISION (TREE_TYPE (name2))
3409 	      && (comp_code == LE_EXPR || comp_code == GT_EXPR
3410 		  || !tree_int_cst_equal (val,
3411 					  TYPE_MIN_VALUE (TREE_TYPE (val)))))
3412 	    {
3413 	      tree tmp, cst;
3414 	      enum tree_code new_comp_code = comp_code;
3415 
3416 	      cst = fold_convert (TREE_TYPE (name2),
3417 				  TYPE_MIN_VALUE (TREE_TYPE (val)));
3418 	      /* Build an expression for the range test.  */
3419 	      tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
3420 	      cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
3421 				 fold_convert (TREE_TYPE (name2), val));
3422 	      if (comp_code == LT_EXPR || comp_code == GE_EXPR)
3423 		{
3424 		  new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
3425 		  cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
3426 				     build_int_cst (TREE_TYPE (name2), 1));
3427 		}
3428 
3429 	      if (dump_file)
3430 		{
3431 		  fprintf (dump_file, "Adding assert for ");
3432 		  print_generic_expr (dump_file, name2);
3433 		  fprintf (dump_file, " from ");
3434 		  print_generic_expr (dump_file, tmp);
3435 		  fprintf (dump_file, "\n");
3436 		}
3437 
3438 	      add_assert_info (asserts, name2, tmp, new_comp_code, cst);
3439 	    }
3440 	}
3441 
3442       /* Add asserts for NAME cmp CST and NAME being defined as
3443 	 NAME = NAME2 >> CST2.
3444 
3445 	 Extract CST2 from the right shift.  */
3446       if (rhs_code == RSHIFT_EXPR)
3447 	{
3448 	  name2 = gimple_assign_rhs1 (def_stmt);
3449 	  cst2 = gimple_assign_rhs2 (def_stmt);
3450 	  if (TREE_CODE (name2) == SSA_NAME
3451 	      && tree_fits_uhwi_p (cst2)
3452 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
3453 	      && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
3454 	      && type_has_mode_precision_p (TREE_TYPE (val)))
3455 	    {
3456 	      mask = wi::mask (tree_to_uhwi (cst2), false, prec);
3457 	      val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
3458 	    }
3459 	}
3460       if (val2 != NULL_TREE
3461 	  && TREE_CODE (val2) == INTEGER_CST
3462 	  && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
3463 					    TREE_TYPE (val),
3464 					    val2, cst2), val))
3465 	{
3466 	  enum tree_code new_comp_code = comp_code;
3467 	  tree tmp, new_val;
3468 
3469 	  tmp = name2;
3470 	  if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
3471 	    {
3472 	      if (!TYPE_UNSIGNED (TREE_TYPE (val)))
3473 		{
3474 		  tree type = build_nonstandard_integer_type (prec, 1);
3475 		  tmp = build1 (NOP_EXPR, type, name2);
3476 		  val2 = fold_convert (type, val2);
3477 		}
3478 	      tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
3479 	      new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
3480 	      new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
3481 	    }
3482 	  else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
3483 	    {
3484 	      wide_int minval
3485 		= wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
3486 	      new_val = val2;
3487 	      if (minval == wi::to_wide (new_val))
3488 		new_val = NULL_TREE;
3489 	    }
3490 	  else
3491 	    {
3492 	      wide_int maxval
3493 		= wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
3494 	      mask |= wi::to_wide (val2);
3495 	      if (wi::eq_p (mask, maxval))
3496 		new_val = NULL_TREE;
3497 	      else
3498 		new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
3499 	    }
3500 
3501 	  if (new_val)
3502 	    {
3503 	      if (dump_file)
3504 		{
3505 		  fprintf (dump_file, "Adding assert for ");
3506 		  print_generic_expr (dump_file, name2);
3507 		  fprintf (dump_file, " from ");
3508 		  print_generic_expr (dump_file, tmp);
3509 		  fprintf (dump_file, "\n");
3510 		}
3511 
3512 	      add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
3513 	    }
3514 	}
3515 
3516       /* Add asserts for NAME cmp CST and NAME being defined as
3517 	 NAME = NAME2 & CST2.
3518 
3519 	 Extract CST2 from the and.
3520 
3521 	 Also handle
3522 	 NAME = (unsigned) NAME2;
3523 	 casts where NAME's type is unsigned and has smaller precision
3524 	 than NAME2's type as if it was NAME = NAME2 & MASK.  */
3525       names[0] = NULL_TREE;
3526       names[1] = NULL_TREE;
3527       cst2 = NULL_TREE;
3528       if (rhs_code == BIT_AND_EXPR
3529 	  || (CONVERT_EXPR_CODE_P (rhs_code)
3530 	      && INTEGRAL_TYPE_P (TREE_TYPE (val))
3531 	      && TYPE_UNSIGNED (TREE_TYPE (val))
3532 	      && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
3533 		 > prec))
3534 	{
3535 	  name2 = gimple_assign_rhs1 (def_stmt);
3536 	  if (rhs_code == BIT_AND_EXPR)
3537 	    cst2 = gimple_assign_rhs2 (def_stmt);
3538 	  else
3539 	    {
3540 	      cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
3541 	      nprec = TYPE_PRECISION (TREE_TYPE (name2));
3542 	    }
3543 	  if (TREE_CODE (name2) == SSA_NAME
3544 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
3545 	      && TREE_CODE (cst2) == INTEGER_CST
3546 	      && !integer_zerop (cst2)
3547 	      && (nprec > 1
3548 		  || TYPE_UNSIGNED (TREE_TYPE (val))))
3549 	    {
3550 	      gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
3551 	      if (gimple_assign_cast_p (def_stmt2))
3552 		{
3553 		  names[1] = gimple_assign_rhs1 (def_stmt2);
3554 		  if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
3555 		      || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
3556 		      || (TYPE_PRECISION (TREE_TYPE (name2))
3557 			  != TYPE_PRECISION (TREE_TYPE (names[1]))))
3558 		    names[1] = NULL_TREE;
3559 		}
3560 	      names[0] = name2;
3561 	    }
3562 	}
3563       if (names[0] || names[1])
3564 	{
3565 	  wide_int minv, maxv, valv, cst2v;
3566 	  wide_int tem, sgnbit;
3567 	  bool valid_p = false, valn, cst2n;
3568 	  enum tree_code ccode = comp_code;
3569 
3570 	  valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
3571 	  cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
3572 	  valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
3573 	  cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
3574 	  /* If CST2 doesn't have most significant bit set,
3575 	     but VAL is negative, we have comparison like
3576 	     if ((x & 0x123) > -4) (always true).  Just give up.  */
3577 	  if (!cst2n && valn)
3578 	    ccode = ERROR_MARK;
3579 	  if (cst2n)
3580 	    sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3581 	  else
3582 	    sgnbit = wi::zero (nprec);
3583 	  minv = valv & cst2v;
3584 	  switch (ccode)
3585 	    {
3586 	    case EQ_EXPR:
3587 	      /* Minimum unsigned value for equality is VAL & CST2
3588 		 (should be equal to VAL, otherwise we probably should
3589 		 have folded the comparison into false) and
3590 		 maximum unsigned value is VAL | ~CST2.  */
3591 	      maxv = valv | ~cst2v;
3592 	      valid_p = true;
3593 	      break;
3594 
3595 	    case NE_EXPR:
3596 	      tem = valv | ~cst2v;
3597 	      /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U.  */
3598 	      if (valv == 0)
3599 		{
3600 		  cst2n = false;
3601 		  sgnbit = wi::zero (nprec);
3602 		  goto gt_expr;
3603 		}
3604 	      /* If (VAL | ~CST2) is all ones, handle it as
3605 		 (X & CST2) < VAL.  */
3606 	      if (tem == -1)
3607 		{
3608 		  cst2n = false;
3609 		  valn = false;
3610 		  sgnbit = wi::zero (nprec);
3611 		  goto lt_expr;
3612 		}
3613 	      if (!cst2n && wi::neg_p (cst2v))
3614 		sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3615 	      if (sgnbit != 0)
3616 		{
3617 		  if (valv == sgnbit)
3618 		    {
3619 		      cst2n = true;
3620 		      valn = true;
3621 		      goto gt_expr;
3622 		    }
3623 		  if (tem == wi::mask (nprec - 1, false, nprec))
3624 		    {
3625 		      cst2n = true;
3626 		      goto lt_expr;
3627 		    }
3628 		  if (!cst2n)
3629 		    sgnbit = wi::zero (nprec);
3630 		}
3631 	      break;
3632 
3633 	    case GE_EXPR:
3634 	      /* Minimum unsigned value for >= if (VAL & CST2) == VAL
3635 		 is VAL and maximum unsigned value is ~0.  For signed
3636 		 comparison, if CST2 doesn't have most significant bit
3637 		 set, handle it similarly.  If CST2 has MSB set,
3638 		 the minimum is the same, and maximum is ~0U/2.  */
3639 	      if (minv != valv)
3640 		{
3641 		  /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
3642 		     VAL.  */
3643 		  minv = masked_increment (valv, cst2v, sgnbit, nprec);
3644 		  if (minv == valv)
3645 		    break;
3646 		}
3647 	      maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3648 	      valid_p = true;
3649 	      break;
3650 
3651 	    case GT_EXPR:
3652 	    gt_expr:
3653 	      /* Find out smallest MINV where MINV > VAL
3654 		 && (MINV & CST2) == MINV, if any.  If VAL is signed and
3655 		 CST2 has MSB set, compute it biased by 1 << (nprec - 1).  */
3656 	      minv = masked_increment (valv, cst2v, sgnbit, nprec);
3657 	      if (minv == valv)
3658 		break;
3659 	      maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3660 	      valid_p = true;
3661 	      break;
3662 
3663 	    case LE_EXPR:
3664 	      /* Minimum unsigned value for <= is 0 and maximum
3665 		 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
3666 		 Otherwise, find smallest VAL2 where VAL2 > VAL
3667 		 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3668 		 as maximum.
3669 		 For signed comparison, if CST2 doesn't have most
3670 		 significant bit set, handle it similarly.  If CST2 has
3671 		 MSB set, the maximum is the same and minimum is INT_MIN.  */
3672 	      if (minv == valv)
3673 		maxv = valv;
3674 	      else
3675 		{
3676 		  maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3677 		  if (maxv == valv)
3678 		    break;
3679 		  maxv -= 1;
3680 		}
3681 	      maxv |= ~cst2v;
3682 	      minv = sgnbit;
3683 	      valid_p = true;
3684 	      break;
3685 
3686 	    case LT_EXPR:
3687 	    lt_expr:
3688 	      /* Minimum unsigned value for < is 0 and maximum
3689 		 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
3690 		 Otherwise, find smallest VAL2 where VAL2 > VAL
3691 		 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3692 		 as maximum.
3693 		 For signed comparison, if CST2 doesn't have most
3694 		 significant bit set, handle it similarly.  If CST2 has
3695 		 MSB set, the maximum is the same and minimum is INT_MIN.  */
3696 	      if (minv == valv)
3697 		{
3698 		  if (valv == sgnbit)
3699 		    break;
3700 		  maxv = valv;
3701 		}
3702 	      else
3703 		{
3704 		  maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3705 		  if (maxv == valv)
3706 		    break;
3707 		}
3708 	      maxv -= 1;
3709 	      maxv |= ~cst2v;
3710 	      minv = sgnbit;
3711 	      valid_p = true;
3712 	      break;
3713 
3714 	    default:
3715 	      break;
3716 	    }
3717 	  if (valid_p
3718 	      && (maxv - minv) != -1)
3719 	    {
3720 	      tree tmp, new_val, type;
3721 	      int i;
3722 
3723 	      for (i = 0; i < 2; i++)
3724 		if (names[i])
3725 		  {
3726 		    wide_int maxv2 = maxv;
3727 		    tmp = names[i];
3728 		    type = TREE_TYPE (names[i]);
3729 		    if (!TYPE_UNSIGNED (type))
3730 		      {
3731 			type = build_nonstandard_integer_type (nprec, 1);
3732 			tmp = build1 (NOP_EXPR, type, names[i]);
3733 		      }
3734 		    if (minv != 0)
3735 		      {
3736 			tmp = build2 (PLUS_EXPR, type, tmp,
3737 				      wide_int_to_tree (type, -minv));
3738 			maxv2 = maxv - minv;
3739 		      }
3740 		    new_val = wide_int_to_tree (type, maxv2);
3741 
3742 		    if (dump_file)
3743 		      {
3744 			fprintf (dump_file, "Adding assert for ");
3745 			print_generic_expr (dump_file, names[i]);
3746 			fprintf (dump_file, " from ");
3747 			print_generic_expr (dump_file, tmp);
3748 			fprintf (dump_file, "\n");
3749 		      }
3750 
3751 		    add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
3752 		  }
3753 	    }
3754 	}
3755     }
3756 }
3757 
3758 /* OP is an operand of a truth value expression which is known to have
3759    a particular value.  Register any asserts for OP and for any
3760    operands in OP's defining statement.
3761 
3762    If CODE is EQ_EXPR, then we want to register OP is zero (false),
3763    if CODE is NE_EXPR, then we want to register OP is nonzero (true).   */
3764 
3765 static void
3766 register_edge_assert_for_1 (tree op, enum tree_code code,
3767 			    edge e, vec<assert_info> &asserts)
3768 {
3769   gimple *op_def;
3770   tree val;
3771   enum tree_code rhs_code;
3772 
3773   /* We only care about SSA_NAMEs.  */
3774   if (TREE_CODE (op) != SSA_NAME)
3775     return;
3776 
3777   /* We know that OP will have a zero or nonzero value.  */
3778   val = build_int_cst (TREE_TYPE (op), 0);
3779   add_assert_info (asserts, op, op, code, val);
3780 
3781   /* Now look at how OP is set.  If it's set from a comparison,
3782      a truth operation or some bit operations, then we may be able
3783      to register information about the operands of that assignment.  */
3784   op_def = SSA_NAME_DEF_STMT (op);
3785   if (gimple_code (op_def) != GIMPLE_ASSIGN)
3786     return;
3787 
3788   rhs_code = gimple_assign_rhs_code (op_def);
3789 
3790   if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
3791     {
3792       bool invert = (code == EQ_EXPR ? true : false);
3793       tree op0 = gimple_assign_rhs1 (op_def);
3794       tree op1 = gimple_assign_rhs2 (op_def);
3795 
3796       if (TREE_CODE (op0) == SSA_NAME)
3797         register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
3798       if (TREE_CODE (op1) == SSA_NAME)
3799         register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
3800     }
3801   else if ((code == NE_EXPR
3802 	    && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
3803 	   || (code == EQ_EXPR
3804 	       && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
3805     {
3806       /* Recurse on each operand.  */
3807       tree op0 = gimple_assign_rhs1 (op_def);
3808       tree op1 = gimple_assign_rhs2 (op_def);
3809       if (TREE_CODE (op0) == SSA_NAME
3810 	  && has_single_use (op0))
3811 	register_edge_assert_for_1 (op0, code, e, asserts);
3812       if (TREE_CODE (op1) == SSA_NAME
3813 	  && has_single_use (op1))
3814 	register_edge_assert_for_1 (op1, code, e, asserts);
3815     }
3816   else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
3817 	   && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
3818     {
3819       /* Recurse, flipping CODE.  */
3820       code = invert_tree_comparison (code, false);
3821       register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3822     }
3823   else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
3824     {
3825       /* Recurse through the copy.  */
3826       register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3827     }
3828   else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
3829     {
3830       /* Recurse through the type conversion, unless it is a narrowing
3831 	 conversion or conversion from non-integral type.  */
3832       tree rhs = gimple_assign_rhs1 (op_def);
3833       if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
3834 	  && (TYPE_PRECISION (TREE_TYPE (rhs))
3835 	      <= TYPE_PRECISION (TREE_TYPE (op))))
3836 	register_edge_assert_for_1 (rhs, code, e, asserts);
3837     }
3838 }
3839 
3840 /* Check if comparison
3841      NAME COND_OP INTEGER_CST
3842    has a form of
3843      (X & 11...100..0) COND_OP XX...X00...0
3844    Such comparison can yield assertions like
3845      X >= XX...X00...0
3846      X <= XX...X11...1
3847    in case of COND_OP being NE_EXPR or
3848      X < XX...X00...0
3849      X > XX...X11...1
3850    in case of EQ_EXPR.  */
3851 
3852 static bool
3853 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
3854 		      tree *new_name, tree *low, enum tree_code *low_code,
3855 		      tree *high, enum tree_code *high_code)
3856 {
3857   gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3858 
3859   if (!is_gimple_assign (def_stmt)
3860       || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
3861     return false;
3862 
3863   tree t = gimple_assign_rhs1 (def_stmt);
3864   tree maskt = gimple_assign_rhs2 (def_stmt);
3865   if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
3866     return false;
3867 
3868   wi::tree_to_wide_ref mask = wi::to_wide (maskt);
3869   wide_int inv_mask = ~mask;
3870   /* Assume VALT is INTEGER_CST.  */
3871   wi::tree_to_wide_ref val = wi::to_wide (valt);
3872 
3873   if ((inv_mask & (inv_mask + 1)) != 0
3874       || (val & mask) != val)
3875     return false;
3876 
3877   bool is_range = cond_code == EQ_EXPR;
3878 
3879   tree type = TREE_TYPE (t);
3880   wide_int min = wi::min_value (type),
3881     max = wi::max_value (type);
3882 
3883   if (is_range)
3884     {
3885       *low_code = val == min ? ERROR_MARK : GE_EXPR;
3886       *high_code = val == max ? ERROR_MARK : LE_EXPR;
3887     }
3888   else
3889     {
3890       /* We can still generate assertion if one of alternatives
3891 	 is known to always be false.  */
3892       if (val == min)
3893 	{
3894 	  *low_code = (enum tree_code) 0;
3895 	  *high_code = GT_EXPR;
3896 	}
3897       else if ((val | inv_mask) == max)
3898 	{
3899 	  *low_code = LT_EXPR;
3900 	  *high_code = (enum tree_code) 0;
3901 	}
3902       else
3903 	return false;
3904     }
3905 
3906   *new_name = t;
3907   *low = wide_int_to_tree (type, val);
3908   *high = wide_int_to_tree (type, val | inv_mask);
3909 
3910   if (wi::neg_p (val, TYPE_SIGN (type)))
3911     std::swap (*low, *high);
3912 
3913   return true;
3914 }
3915 
3916 /* Try to register an edge assertion for SSA name NAME on edge E for
3917    the condition COND contributing to the conditional jump pointed to by
3918    SI.  */
3919 
3920 void
3921 register_edge_assert_for (tree name, edge e,
3922 			  enum tree_code cond_code, tree cond_op0,
3923 			  tree cond_op1, vec<assert_info> &asserts)
3924 {
3925   tree val;
3926   enum tree_code comp_code;
3927   bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
3928 
3929   /* Do not attempt to infer anything in names that flow through
3930      abnormal edges.  */
3931   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
3932     return;
3933 
3934   if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
3935 						cond_op0, cond_op1,
3936 						is_else_edge,
3937 						&comp_code, &val))
3938     return;
3939 
3940   /* Register ASSERT_EXPRs for name.  */
3941   register_edge_assert_for_2 (name, e, cond_code, cond_op0,
3942 			      cond_op1, is_else_edge, asserts);
3943 
3944 
3945   /* If COND is effectively an equality test of an SSA_NAME against
3946      the value zero or one, then we may be able to assert values
3947      for SSA_NAMEs which flow into COND.  */
3948 
3949   /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
3950      statement of NAME we can assert both operands of the BIT_AND_EXPR
3951      have nonzero value.  */
3952   if (((comp_code == EQ_EXPR && integer_onep (val))
3953        || (comp_code == NE_EXPR && integer_zerop (val))))
3954     {
3955       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3956 
3957       if (is_gimple_assign (def_stmt)
3958 	  && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
3959 	{
3960 	  tree op0 = gimple_assign_rhs1 (def_stmt);
3961 	  tree op1 = gimple_assign_rhs2 (def_stmt);
3962 	  register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
3963 	  register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
3964 	}
3965     }
3966 
3967   /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
3968      statement of NAME we can assert both operands of the BIT_IOR_EXPR
3969      have zero value.  */
3970   if (((comp_code == EQ_EXPR && integer_zerop (val))
3971        || (comp_code == NE_EXPR && integer_onep (val))))
3972     {
3973       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3974 
3975       /* For BIT_IOR_EXPR only if NAME == 0 both operands have
3976 	 necessarily zero value, or if type-precision is one.  */
3977       if (is_gimple_assign (def_stmt)
3978 	  && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
3979 	      && (TYPE_PRECISION (TREE_TYPE (name)) == 1
3980 	          || comp_code == EQ_EXPR)))
3981 	{
3982 	  tree op0 = gimple_assign_rhs1 (def_stmt);
3983 	  tree op1 = gimple_assign_rhs2 (def_stmt);
3984 	  register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
3985 	  register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
3986 	}
3987     }
3988 
3989   /* Sometimes we can infer ranges from (NAME & MASK) == VALUE.  */
3990   if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
3991       && TREE_CODE (val) == INTEGER_CST)
3992     {
3993       enum tree_code low_code, high_code;
3994       tree low, high;
3995       if (is_masked_range_test (name, val, comp_code, &name, &low,
3996 				&low_code, &high, &high_code))
3997 	{
3998 	  if (low_code != ERROR_MARK)
3999 	    register_edge_assert_for_2 (name, e, low_code, name,
4000 					low, /*invert*/false, asserts);
4001 	  if (high_code != ERROR_MARK)
4002 	    register_edge_assert_for_2 (name, e, high_code, name,
4003 					high, /*invert*/false, asserts);
4004 	}
4005     }
4006 }
4007 
4008 /* Finish found ASSERTS for E and register them at GSI.  */
4009 
4010 static void
4011 finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
4012 				 vec<assert_info> &asserts)
4013 {
4014   for (unsigned i = 0; i < asserts.length (); ++i)
4015     /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
4016        reachable from E.  */
4017     if (live_on_edge (e, asserts[i].name))
4018       register_new_assert_for (asserts[i].name, asserts[i].expr,
4019 			       asserts[i].comp_code, asserts[i].val,
4020 			       NULL, e, gsi);
4021 }
4022 
4023 
4024 
4025 /* Determine whether the outgoing edges of BB should receive an
4026    ASSERT_EXPR for each of the operands of BB's LAST statement.
4027    The last statement of BB must be a COND_EXPR.
4028 
4029    If any of the sub-graphs rooted at BB have an interesting use of
4030    the predicate operands, an assert location node is added to the
4031    list of assertions for the corresponding operands.  */
4032 
4033 static void
4034 find_conditional_asserts (basic_block bb, gcond *last)
4035 {
4036   gimple_stmt_iterator bsi;
4037   tree op;
4038   edge_iterator ei;
4039   edge e;
4040   ssa_op_iter iter;
4041 
4042   bsi = gsi_for_stmt (last);
4043 
4044   /* Look for uses of the operands in each of the sub-graphs
4045      rooted at BB.  We need to check each of the outgoing edges
4046      separately, so that we know what kind of ASSERT_EXPR to
4047      insert.  */
4048   FOR_EACH_EDGE (e, ei, bb->succs)
4049     {
4050       if (e->dest == bb)
4051 	continue;
4052 
4053       /* Register the necessary assertions for each operand in the
4054 	 conditional predicate.  */
4055       auto_vec<assert_info, 8> asserts;
4056       FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
4057 	register_edge_assert_for (op, e,
4058 				  gimple_cond_code (last),
4059 				  gimple_cond_lhs (last),
4060 				  gimple_cond_rhs (last), asserts);
4061       finish_register_edge_assert_for (e, bsi, asserts);
4062     }
4063 }
4064 
4065 struct case_info
4066 {
4067   tree expr;
4068   basic_block bb;
4069 };
4070 
4071 /* Compare two case labels sorting first by the destination bb index
4072    and then by the case value.  */
4073 
4074 static int
4075 compare_case_labels (const void *p1, const void *p2)
4076 {
4077   const struct case_info *ci1 = (const struct case_info *) p1;
4078   const struct case_info *ci2 = (const struct case_info *) p2;
4079   int idx1 = ci1->bb->index;
4080   int idx2 = ci2->bb->index;
4081 
4082   if (idx1 < idx2)
4083     return -1;
4084   else if (idx1 == idx2)
4085     {
4086       /* Make sure the default label is first in a group.  */
4087       if (!CASE_LOW (ci1->expr))
4088 	return -1;
4089       else if (!CASE_LOW (ci2->expr))
4090 	return 1;
4091       else
4092 	return tree_int_cst_compare (CASE_LOW (ci1->expr),
4093 				     CASE_LOW (ci2->expr));
4094     }
4095   else
4096     return 1;
4097 }
4098 
4099 /* Determine whether the outgoing edges of BB should receive an
4100    ASSERT_EXPR for each of the operands of BB's LAST statement.
4101    The last statement of BB must be a SWITCH_EXPR.
4102 
4103    If any of the sub-graphs rooted at BB have an interesting use of
4104    the predicate operands, an assert location node is added to the
4105    list of assertions for the corresponding operands.  */
4106 
4107 static void
4108 find_switch_asserts (basic_block bb, gswitch *last)
4109 {
4110   gimple_stmt_iterator bsi;
4111   tree op;
4112   edge e;
4113   struct case_info *ci;
4114   size_t n = gimple_switch_num_labels (last);
4115 #if GCC_VERSION >= 4000
4116   unsigned int idx;
4117 #else
4118   /* Work around GCC 3.4 bug (PR 37086).  */
4119   volatile unsigned int idx;
4120 #endif
4121 
4122   bsi = gsi_for_stmt (last);
4123   op = gimple_switch_index (last);
4124   if (TREE_CODE (op) != SSA_NAME)
4125     return;
4126 
4127   /* Build a vector of case labels sorted by destination label.  */
4128   ci = XNEWVEC (struct case_info, n);
4129   for (idx = 0; idx < n; ++idx)
4130     {
4131       ci[idx].expr = gimple_switch_label (last, idx);
4132       ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
4133     }
4134   edge default_edge = find_edge (bb, ci[0].bb);
4135   qsort (ci, n, sizeof (struct case_info), compare_case_labels);
4136 
4137   for (idx = 0; idx < n; ++idx)
4138     {
4139       tree min, max;
4140       tree cl = ci[idx].expr;
4141       basic_block cbb = ci[idx].bb;
4142 
4143       min = CASE_LOW (cl);
4144       max = CASE_HIGH (cl);
4145 
4146       /* If there are multiple case labels with the same destination
4147 	 we need to combine them to a single value range for the edge.  */
4148       if (idx + 1 < n && cbb == ci[idx + 1].bb)
4149 	{
4150 	  /* Skip labels until the last of the group.  */
4151 	  do {
4152 	    ++idx;
4153 	  } while (idx < n && cbb == ci[idx].bb);
4154 	  --idx;
4155 
4156 	  /* Pick up the maximum of the case label range.  */
4157 	  if (CASE_HIGH (ci[idx].expr))
4158 	    max = CASE_HIGH (ci[idx].expr);
4159 	  else
4160 	    max = CASE_LOW (ci[idx].expr);
4161 	}
4162 
4163       /* Can't extract a useful assertion out of a range that includes the
4164 	 default label.  */
4165       if (min == NULL_TREE)
4166 	continue;
4167 
4168       /* Find the edge to register the assert expr on.  */
4169       e = find_edge (bb, cbb);
4170 
4171       /* Register the necessary assertions for the operand in the
4172 	 SWITCH_EXPR.  */
4173       auto_vec<assert_info, 8> asserts;
4174       register_edge_assert_for (op, e,
4175 				max ? GE_EXPR : EQ_EXPR,
4176 				op, fold_convert (TREE_TYPE (op), min),
4177 				asserts);
4178       if (max)
4179 	register_edge_assert_for (op, e, LE_EXPR, op,
4180 				  fold_convert (TREE_TYPE (op), max),
4181 				  asserts);
4182       finish_register_edge_assert_for (e, bsi, asserts);
4183     }
4184 
4185   XDELETEVEC (ci);
4186 
4187   if (!live_on_edge (default_edge, op))
4188     return;
4189 
4190   /* Now register along the default label assertions that correspond to the
4191      anti-range of each label.  */
4192   int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
4193   if (insertion_limit == 0)
4194     return;
4195 
4196   /* We can't do this if the default case shares a label with another case.  */
4197   tree default_cl = gimple_switch_default_label (last);
4198   for (idx = 1; idx < n; idx++)
4199     {
4200       tree min, max;
4201       tree cl = gimple_switch_label (last, idx);
4202       if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
4203 	continue;
4204 
4205       min = CASE_LOW (cl);
4206       max = CASE_HIGH (cl);
4207 
4208       /* Combine contiguous case ranges to reduce the number of assertions
4209 	 to insert.  */
4210       for (idx = idx + 1; idx < n; idx++)
4211 	{
4212 	  tree next_min, next_max;
4213 	  tree next_cl = gimple_switch_label (last, idx);
4214 	  if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
4215 	    break;
4216 
4217 	  next_min = CASE_LOW (next_cl);
4218 	  next_max = CASE_HIGH (next_cl);
4219 
4220 	  wide_int difference = (wi::to_wide (next_min)
4221 				 - wi::to_wide (max ? max : min));
4222 	  if (wi::eq_p (difference, 1))
4223 	    max = next_max ? next_max : next_min;
4224 	  else
4225 	    break;
4226 	}
4227       idx--;
4228 
4229       if (max == NULL_TREE)
4230 	{
4231 	  /* Register the assertion OP != MIN.  */
4232 	  auto_vec<assert_info, 8> asserts;
4233 	  min = fold_convert (TREE_TYPE (op), min);
4234 	  register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
4235 				    asserts);
4236 	  finish_register_edge_assert_for (default_edge, bsi, asserts);
4237 	}
4238       else
4239 	{
4240 	  /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
4241 	     which will give OP the anti-range ~[MIN,MAX].  */
4242 	  tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
4243 	  min = fold_convert (TREE_TYPE (uop), min);
4244 	  max = fold_convert (TREE_TYPE (uop), max);
4245 
4246 	  tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
4247 	  tree rhs = int_const_binop (MINUS_EXPR, max, min);
4248 	  register_new_assert_for (op, lhs, GT_EXPR, rhs,
4249 				   NULL, default_edge, bsi);
4250 	}
4251 
4252       if (--insertion_limit == 0)
4253 	break;
4254     }
4255 }
4256 
4257 
4258 /* Traverse all the statements in block BB looking for statements that
4259    may generate useful assertions for the SSA names in their operand.
4260    If a statement produces a useful assertion A for name N_i, then the
4261    list of assertions already generated for N_i is scanned to
4262    determine if A is actually needed.
4263 
4264    If N_i already had the assertion A at a location dominating the
4265    current location, then nothing needs to be done.  Otherwise, the
4266    new location for A is recorded instead.
4267 
4268    1- For every statement S in BB, all the variables used by S are
4269       added to bitmap FOUND_IN_SUBGRAPH.
4270 
4271    2- If statement S uses an operand N in a way that exposes a known
4272       value range for N, then if N was not already generated by an
4273       ASSERT_EXPR, create a new assert location for N.  For instance,
4274       if N is a pointer and the statement dereferences it, we can
4275       assume that N is not NULL.
4276 
4277    3- COND_EXPRs are a special case of #2.  We can derive range
4278       information from the predicate but need to insert different
4279       ASSERT_EXPRs for each of the sub-graphs rooted at the
4280       conditional block.  If the last statement of BB is a conditional
4281       expression of the form 'X op Y', then
4282 
4283       a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
4284 
4285       b) If the conditional is the only entry point to the sub-graph
4286 	 corresponding to the THEN_CLAUSE, recurse into it.  On
4287 	 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
4288 	 an ASSERT_EXPR is added for the corresponding variable.
4289 
4290       c) Repeat step (b) on the ELSE_CLAUSE.
4291 
4292       d) Mark X and Y in FOUND_IN_SUBGRAPH.
4293 
4294       For instance,
4295 
4296 	    if (a == 9)
4297 	      b = a;
4298 	    else
4299 	      b = c + 1;
4300 
4301       In this case, an assertion on the THEN clause is useful to
4302       determine that 'a' is always 9 on that edge.  However, an assertion
4303       on the ELSE clause would be unnecessary.
4304 
4305    4- If BB does not end in a conditional expression, then we recurse
4306       into BB's dominator children.
4307 
4308    At the end of the recursive traversal, every SSA name will have a
4309    list of locations where ASSERT_EXPRs should be added.  When a new
4310    location for name N is found, it is registered by calling
4311    register_new_assert_for.  That function keeps track of all the
4312    registered assertions to prevent adding unnecessary assertions.
4313    For instance, if a pointer P_4 is dereferenced more than once in a
4314    dominator tree, only the location dominating all the dereference of
4315    P_4 will receive an ASSERT_EXPR.  */
4316 
4317 static void
4318 find_assert_locations_1 (basic_block bb, sbitmap live)
4319 {
4320   gimple *last;
4321 
4322   last = last_stmt (bb);
4323 
4324   /* If BB's last statement is a conditional statement involving integer
4325      operands, determine if we need to add ASSERT_EXPRs.  */
4326   if (last
4327       && gimple_code (last) == GIMPLE_COND
4328       && !fp_predicate (last)
4329       && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4330     find_conditional_asserts (bb, as_a <gcond *> (last));
4331 
4332   /* If BB's last statement is a switch statement involving integer
4333      operands, determine if we need to add ASSERT_EXPRs.  */
4334   if (last
4335       && gimple_code (last) == GIMPLE_SWITCH
4336       && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4337     find_switch_asserts (bb, as_a <gswitch *> (last));
4338 
4339   /* Traverse all the statements in BB marking used names and looking
4340      for statements that may infer assertions for their used operands.  */
4341   for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
4342        gsi_prev (&si))
4343     {
4344       gimple *stmt;
4345       tree op;
4346       ssa_op_iter i;
4347 
4348       stmt = gsi_stmt (si);
4349 
4350       if (is_gimple_debug (stmt))
4351 	continue;
4352 
4353       /* See if we can derive an assertion for any of STMT's operands.  */
4354       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
4355 	{
4356 	  tree value;
4357 	  enum tree_code comp_code;
4358 
4359 	  /* If op is not live beyond this stmt, do not bother to insert
4360 	     asserts for it.  */
4361 	  if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
4362 	    continue;
4363 
4364 	  /* If OP is used in such a way that we can infer a value
4365 	     range for it, and we don't find a previous assertion for
4366 	     it, create a new assertion location node for OP.  */
4367 	  if (infer_value_range (stmt, op, &comp_code, &value))
4368 	    {
4369 	      /* If we are able to infer a nonzero value range for OP,
4370 		 then walk backwards through the use-def chain to see if OP
4371 		 was set via a typecast.
4372 
4373 		 If so, then we can also infer a nonzero value range
4374 		 for the operand of the NOP_EXPR.  */
4375 	      if (comp_code == NE_EXPR && integer_zerop (value))
4376 		{
4377 		  tree t = op;
4378 		  gimple *def_stmt = SSA_NAME_DEF_STMT (t);
4379 
4380 		  while (is_gimple_assign (def_stmt)
4381 			 && CONVERT_EXPR_CODE_P
4382 			     (gimple_assign_rhs_code (def_stmt))
4383 			 && TREE_CODE
4384 			     (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
4385 			 && POINTER_TYPE_P
4386 			     (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
4387 		    {
4388 		      t = gimple_assign_rhs1 (def_stmt);
4389 		      def_stmt = SSA_NAME_DEF_STMT (t);
4390 
4391 		      /* Note we want to register the assert for the
4392 			 operand of the NOP_EXPR after SI, not after the
4393 			 conversion.  */
4394 		      if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
4395 			register_new_assert_for (t, t, comp_code, value,
4396 						 bb, NULL, si);
4397 		    }
4398 		}
4399 
4400 	      register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
4401 	    }
4402 	}
4403 
4404       /* Update live.  */
4405       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
4406 	bitmap_set_bit (live, SSA_NAME_VERSION (op));
4407       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
4408 	bitmap_clear_bit (live, SSA_NAME_VERSION (op));
4409     }
4410 
4411   /* Traverse all PHI nodes in BB, updating live.  */
4412   for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
4413        gsi_next (&si))
4414     {
4415       use_operand_p arg_p;
4416       ssa_op_iter i;
4417       gphi *phi = si.phi ();
4418       tree res = gimple_phi_result (phi);
4419 
4420       if (virtual_operand_p (res))
4421 	continue;
4422 
4423       FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
4424 	{
4425 	  tree arg = USE_FROM_PTR (arg_p);
4426 	  if (TREE_CODE (arg) == SSA_NAME)
4427 	    bitmap_set_bit (live, SSA_NAME_VERSION (arg));
4428 	}
4429 
4430       bitmap_clear_bit (live, SSA_NAME_VERSION (res));
4431     }
4432 }
4433 
4434 /* Do an RPO walk over the function computing SSA name liveness
4435    on-the-fly and deciding on assert expressions to insert.  */
4436 
4437 static void
4438 find_assert_locations (void)
4439 {
4440   int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
4441   int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
4442   int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
4443   int rpo_cnt, i;
4444 
4445   live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
4446   rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
4447   for (i = 0; i < rpo_cnt; ++i)
4448     bb_rpo[rpo[i]] = i;
4449 
4450   /* Pre-seed loop latch liveness from loop header PHI nodes.  Due to
4451      the order we compute liveness and insert asserts we otherwise
4452      fail to insert asserts into the loop latch.  */
4453   loop_p loop;
4454   FOR_EACH_LOOP (loop, 0)
4455     {
4456       i = loop->latch->index;
4457       unsigned int j = single_succ_edge (loop->latch)->dest_idx;
4458       for (gphi_iterator gsi = gsi_start_phis (loop->header);
4459 	   !gsi_end_p (gsi); gsi_next (&gsi))
4460 	{
4461 	  gphi *phi = gsi.phi ();
4462 	  if (virtual_operand_p (gimple_phi_result (phi)))
4463 	    continue;
4464 	  tree arg = gimple_phi_arg_def (phi, j);
4465 	  if (TREE_CODE (arg) == SSA_NAME)
4466 	    {
4467 	      if (live[i] == NULL)
4468 		{
4469 		  live[i] = sbitmap_alloc (num_ssa_names);
4470 		  bitmap_clear (live[i]);
4471 		}
4472 	      bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
4473 	    }
4474 	}
4475     }
4476 
4477   for (i = rpo_cnt - 1; i >= 0; --i)
4478     {
4479       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
4480       edge e;
4481       edge_iterator ei;
4482 
4483       if (!live[rpo[i]])
4484 	{
4485 	  live[rpo[i]] = sbitmap_alloc (num_ssa_names);
4486 	  bitmap_clear (live[rpo[i]]);
4487 	}
4488 
4489       /* Process BB and update the live information with uses in
4490          this block.  */
4491       find_assert_locations_1 (bb, live[rpo[i]]);
4492 
4493       /* Merge liveness into the predecessor blocks and free it.  */
4494       if (!bitmap_empty_p (live[rpo[i]]))
4495 	{
4496 	  int pred_rpo = i;
4497 	  FOR_EACH_EDGE (e, ei, bb->preds)
4498 	    {
4499 	      int pred = e->src->index;
4500 	      if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
4501 		continue;
4502 
4503 	      if (!live[pred])
4504 		{
4505 		  live[pred] = sbitmap_alloc (num_ssa_names);
4506 		  bitmap_clear (live[pred]);
4507 		}
4508 	      bitmap_ior (live[pred], live[pred], live[rpo[i]]);
4509 
4510 	      if (bb_rpo[pred] < pred_rpo)
4511 		pred_rpo = bb_rpo[pred];
4512 	    }
4513 
4514 	  /* Record the RPO number of the last visited block that needs
4515 	     live information from this block.  */
4516 	  last_rpo[rpo[i]] = pred_rpo;
4517 	}
4518       else
4519 	{
4520 	  sbitmap_free (live[rpo[i]]);
4521 	  live[rpo[i]] = NULL;
4522 	}
4523 
4524       /* We can free all successors live bitmaps if all their
4525          predecessors have been visited already.  */
4526       FOR_EACH_EDGE (e, ei, bb->succs)
4527 	if (last_rpo[e->dest->index] == i
4528 	    && live[e->dest->index])
4529 	  {
4530 	    sbitmap_free (live[e->dest->index]);
4531 	    live[e->dest->index] = NULL;
4532 	  }
4533     }
4534 
4535   XDELETEVEC (rpo);
4536   XDELETEVEC (bb_rpo);
4537   XDELETEVEC (last_rpo);
4538   for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
4539     if (live[i])
4540       sbitmap_free (live[i]);
4541   XDELETEVEC (live);
4542 }
4543 
4544 /* Create an ASSERT_EXPR for NAME and insert it in the location
4545    indicated by LOC.  Return true if we made any edge insertions.  */
4546 
4547 static bool
4548 process_assert_insertions_for (tree name, assert_locus *loc)
4549 {
4550   /* Build the comparison expression NAME_i COMP_CODE VAL.  */
4551   gimple *stmt;
4552   tree cond;
4553   gimple *assert_stmt;
4554   edge_iterator ei;
4555   edge e;
4556 
4557   /* If we have X <=> X do not insert an assert expr for that.  */
4558   if (loc->expr == loc->val)
4559     return false;
4560 
4561   cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
4562   assert_stmt = build_assert_expr_for (cond, name);
4563   if (loc->e)
4564     {
4565       /* We have been asked to insert the assertion on an edge.  This
4566 	 is used only by COND_EXPR and SWITCH_EXPR assertions.  */
4567       gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
4568 			   || (gimple_code (gsi_stmt (loc->si))
4569 			       == GIMPLE_SWITCH));
4570 
4571       gsi_insert_on_edge (loc->e, assert_stmt);
4572       return true;
4573     }
4574 
4575   /* If the stmt iterator points at the end then this is an insertion
4576      at the beginning of a block.  */
4577   if (gsi_end_p (loc->si))
4578     {
4579       gimple_stmt_iterator si = gsi_after_labels (loc->bb);
4580       gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
4581       return false;
4582 
4583     }
4584   /* Otherwise, we can insert right after LOC->SI iff the
4585      statement must not be the last statement in the block.  */
4586   stmt = gsi_stmt (loc->si);
4587   if (!stmt_ends_bb_p (stmt))
4588     {
4589       gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
4590       return false;
4591     }
4592 
4593   /* If STMT must be the last statement in BB, we can only insert new
4594      assertions on the non-abnormal edge out of BB.  Note that since
4595      STMT is not control flow, there may only be one non-abnormal/eh edge
4596      out of BB.  */
4597   FOR_EACH_EDGE (e, ei, loc->bb->succs)
4598     if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
4599       {
4600 	gsi_insert_on_edge (e, assert_stmt);
4601 	return true;
4602       }
4603 
4604   gcc_unreachable ();
4605 }
4606 
4607 /* Qsort helper for sorting assert locations.  If stable is true, don't
4608    use iterative_hash_expr because it can be unstable for -fcompare-debug,
4609    on the other side some pointers might be NULL.  */
4610 
4611 template <bool stable>
4612 static int
4613 compare_assert_loc (const void *pa, const void *pb)
4614 {
4615   assert_locus * const a = *(assert_locus * const *)pa;
4616   assert_locus * const b = *(assert_locus * const *)pb;
4617 
4618   /* If stable, some asserts might be optimized away already, sort
4619      them last.  */
4620   if (stable)
4621     {
4622       if (a == NULL)
4623 	return b != NULL;
4624       else if (b == NULL)
4625 	return -1;
4626     }
4627 
4628   if (a->e == NULL && b->e != NULL)
4629     return 1;
4630   else if (a->e != NULL && b->e == NULL)
4631     return -1;
4632 
4633   /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
4634      no need to test both a->e and b->e.  */
4635 
4636   /* Sort after destination index.  */
4637   if (a->e == NULL)
4638     ;
4639   else if (a->e->dest->index > b->e->dest->index)
4640     return 1;
4641   else if (a->e->dest->index < b->e->dest->index)
4642     return -1;
4643 
4644   /* Sort after comp_code.  */
4645   if (a->comp_code > b->comp_code)
4646     return 1;
4647   else if (a->comp_code < b->comp_code)
4648     return -1;
4649 
4650   hashval_t ha, hb;
4651 
4652   /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
4653      uses DECL_UID of the VAR_DECL, so sorting might differ between
4654      -g and -g0.  When doing the removal of redundant assert exprs
4655      and commonization to successors, this does not matter, but for
4656      the final sort needs to be stable.  */
4657   if (stable)
4658     {
4659       ha = 0;
4660       hb = 0;
4661     }
4662   else
4663     {
4664       ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
4665       hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
4666     }
4667 
4668   /* Break the tie using hashing and source/bb index.  */
4669   if (ha == hb)
4670     return (a->e != NULL
4671 	    ? a->e->src->index - b->e->src->index
4672 	    : a->bb->index - b->bb->index);
4673   return ha > hb ? 1 : -1;
4674 }
4675 
4676 /* Process all the insertions registered for every name N_i registered
4677    in NEED_ASSERT_FOR.  The list of assertions to be inserted are
4678    found in ASSERTS_FOR[i].  */
4679 
4680 static void
4681 process_assert_insertions (void)
4682 {
4683   unsigned i;
4684   bitmap_iterator bi;
4685   bool update_edges_p = false;
4686   int num_asserts = 0;
4687 
4688   if (dump_file && (dump_flags & TDF_DETAILS))
4689     dump_all_asserts (dump_file);
4690 
4691   EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4692     {
4693       assert_locus *loc = asserts_for[i];
4694       gcc_assert (loc);
4695 
4696       auto_vec<assert_locus *, 16> asserts;
4697       for (; loc; loc = loc->next)
4698 	asserts.safe_push (loc);
4699       asserts.qsort (compare_assert_loc<false>);
4700 
4701       /* Push down common asserts to successors and remove redundant ones.  */
4702       unsigned ecnt = 0;
4703       assert_locus *common = NULL;
4704       unsigned commonj = 0;
4705       for (unsigned j = 0; j < asserts.length (); ++j)
4706 	{
4707 	  loc = asserts[j];
4708 	  if (! loc->e)
4709 	    common = NULL;
4710 	  else if (! common
4711 		   || loc->e->dest != common->e->dest
4712 		   || loc->comp_code != common->comp_code
4713 		   || ! operand_equal_p (loc->val, common->val, 0)
4714 		   || ! operand_equal_p (loc->expr, common->expr, 0))
4715 	    {
4716 	      commonj = j;
4717 	      common = loc;
4718 	      ecnt = 1;
4719 	    }
4720 	  else if (loc->e == asserts[j-1]->e)
4721 	    {
4722 	      /* Remove duplicate asserts.  */
4723 	      if (commonj == j - 1)
4724 		{
4725 		  commonj = j;
4726 		  common = loc;
4727 		}
4728 	      free (asserts[j-1]);
4729 	      asserts[j-1] = NULL;
4730 	    }
4731 	  else
4732 	    {
4733 	      ecnt++;
4734 	      if (EDGE_COUNT (common->e->dest->preds) == ecnt)
4735 		{
4736 		  /* We have the same assertion on all incoming edges of a BB.
4737 		     Insert it at the beginning of that block.  */
4738 		  loc->bb = loc->e->dest;
4739 		  loc->e = NULL;
4740 		  loc->si = gsi_none ();
4741 		  common = NULL;
4742 		  /* Clear asserts commoned.  */
4743 		  for (; commonj != j; ++commonj)
4744 		    if (asserts[commonj])
4745 		      {
4746 			free (asserts[commonj]);
4747 			asserts[commonj] = NULL;
4748 		      }
4749 		}
4750 	    }
4751 	}
4752 
4753       /* The asserts vector sorting above might be unstable for
4754 	 -fcompare-debug, sort again to ensure a stable sort.  */
4755       asserts.qsort (compare_assert_loc<true>);
4756       for (unsigned j = 0; j < asserts.length (); ++j)
4757 	{
4758 	  loc = asserts[j];
4759 	  if (! loc)
4760 	    break;
4761 	  update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
4762 	  num_asserts++;
4763 	  free (loc);
4764 	}
4765     }
4766 
4767   if (update_edges_p)
4768     gsi_commit_edge_inserts ();
4769 
4770   statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
4771 			    num_asserts);
4772 }
4773 
4774 
4775 /* Traverse the flowgraph looking for conditional jumps to insert range
4776    expressions.  These range expressions are meant to provide information
4777    to optimizations that need to reason in terms of value ranges.  They
4778    will not be expanded into RTL.  For instance, given:
4779 
4780    x = ...
4781    y = ...
4782    if (x < y)
4783      y = x - 2;
4784    else
4785      x = y + 3;
4786 
4787    this pass will transform the code into:
4788 
4789    x = ...
4790    y = ...
4791    if (x < y)
4792     {
4793       x = ASSERT_EXPR <x, x < y>
4794       y = x - 2
4795     }
4796    else
4797     {
4798       y = ASSERT_EXPR <y, x >= y>
4799       x = y + 3
4800     }
4801 
4802    The idea is that once copy and constant propagation have run, other
4803    optimizations will be able to determine what ranges of values can 'x'
4804    take in different paths of the code, simply by checking the reaching
4805    definition of 'x'.  */
4806 
4807 static void
4808 insert_range_assertions (void)
4809 {
4810   need_assert_for = BITMAP_ALLOC (NULL);
4811   asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
4812 
4813   calculate_dominance_info (CDI_DOMINATORS);
4814 
4815   find_assert_locations ();
4816   if (!bitmap_empty_p (need_assert_for))
4817     {
4818       process_assert_insertions ();
4819       update_ssa (TODO_update_ssa_no_phi);
4820     }
4821 
4822   if (dump_file && (dump_flags & TDF_DETAILS))
4823     {
4824       fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
4825       dump_function_to_file (current_function_decl, dump_file, dump_flags);
4826     }
4827 
4828   free (asserts_for);
4829   BITMAP_FREE (need_assert_for);
4830 }
4831 
4832 class vrp_prop : public ssa_propagation_engine
4833 {
4834  public:
4835   enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
4836   enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
4837 
4838   void vrp_initialize (void);
4839   void vrp_finalize (bool);
4840   void check_all_array_refs (void);
4841   void check_array_ref (location_t, tree, bool);
4842   void search_for_addr_array (tree, location_t);
4843 
4844   class vr_values vr_values;
4845   /* Temporary delegator to minimize code churn.  */
4846   value_range *get_value_range (const_tree op)
4847     { return vr_values.get_value_range (op); }
4848   void set_defs_to_varying (gimple *stmt)
4849     { return vr_values.set_defs_to_varying (stmt); }
4850   void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
4851 				tree *output_p, value_range *vr)
4852     { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); }
4853   bool update_value_range (const_tree op, value_range *vr)
4854     { return vr_values.update_value_range (op, vr); }
4855   void extract_range_basic (value_range *vr, gimple *stmt)
4856     { vr_values.extract_range_basic (vr, stmt); }
4857   void extract_range_from_phi_node (gphi *phi, value_range *vr)
4858     { vr_values.extract_range_from_phi_node (phi, vr); }
4859 };
4860 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
4861    and "struct" hacks. If VRP can determine that the
4862    array subscript is a constant, check if it is outside valid
4863    range. If the array subscript is a RANGE, warn if it is
4864    non-overlapping with valid range.
4865    IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR.  */
4866 
4867 void
4868 vrp_prop::check_array_ref (location_t location, tree ref,
4869 			   bool ignore_off_by_one)
4870 {
4871   value_range *vr = NULL;
4872   tree low_sub, up_sub;
4873   tree low_bound, up_bound, up_bound_p1;
4874 
4875   if (TREE_NO_WARNING (ref))
4876     return;
4877 
4878   low_sub = up_sub = TREE_OPERAND (ref, 1);
4879   up_bound = array_ref_up_bound (ref);
4880 
4881   if (!up_bound
4882       || TREE_CODE (up_bound) != INTEGER_CST
4883       || (warn_array_bounds < 2
4884 	  && array_at_struct_end_p (ref)))
4885     {
4886       /* Accesses to trailing arrays via pointers may access storage
4887 	 beyond the types array bounds.  For such arrays, or for flexible
4888 	 array members, as well as for other arrays of an unknown size,
4889 	 replace the upper bound with a more permissive one that assumes
4890 	 the size of the largest object is PTRDIFF_MAX.  */
4891       tree eltsize = array_ref_element_size (ref);
4892 
4893       if (TREE_CODE (eltsize) != INTEGER_CST
4894 	  || integer_zerop (eltsize))
4895 	{
4896 	  up_bound = NULL_TREE;
4897 	  up_bound_p1 = NULL_TREE;
4898 	}
4899       else
4900 	{
4901 	  tree maxbound = TYPE_MAX_VALUE (ptrdiff_type_node);
4902 	  tree arg = TREE_OPERAND (ref, 0);
4903 	  poly_int64 off;
4904 
4905 	  if (get_addr_base_and_unit_offset (arg, &off) && known_gt (off, 0))
4906 	    maxbound = wide_int_to_tree (sizetype,
4907 					 wi::sub (wi::to_wide (maxbound),
4908 						  off));
4909 	  else
4910 	    maxbound = fold_convert (sizetype, maxbound);
4911 
4912 	  up_bound_p1 = int_const_binop (TRUNC_DIV_EXPR, maxbound, eltsize);
4913 
4914 	  up_bound = int_const_binop (MINUS_EXPR, up_bound_p1,
4915 				      build_int_cst (ptrdiff_type_node, 1));
4916 	}
4917     }
4918   else
4919     up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
4920 				   build_int_cst (TREE_TYPE (up_bound), 1));
4921 
4922   low_bound = array_ref_low_bound (ref);
4923 
4924   tree artype = TREE_TYPE (TREE_OPERAND (ref, 0));
4925 
4926   /* Empty array.  */
4927   if (up_bound && tree_int_cst_equal (low_bound, up_bound_p1))
4928     {
4929       warning_at (location, OPT_Warray_bounds,
4930 		  "array subscript %E is above array bounds of %qT",
4931 		  low_bound, artype);
4932       TREE_NO_WARNING (ref) = 1;
4933     }
4934 
4935   if (TREE_CODE (low_sub) == SSA_NAME)
4936     {
4937       vr = get_value_range (low_sub);
4938       if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4939         {
4940           low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
4941           up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
4942         }
4943     }
4944 
4945   if (vr && vr->type == VR_ANTI_RANGE)
4946     {
4947       if (up_bound
4948 	  && TREE_CODE (up_sub) == INTEGER_CST
4949           && (ignore_off_by_one
4950 	      ? tree_int_cst_lt (up_bound, up_sub)
4951 	      : tree_int_cst_le (up_bound, up_sub))
4952           && TREE_CODE (low_sub) == INTEGER_CST
4953           && tree_int_cst_le (low_sub, low_bound))
4954         {
4955           warning_at (location, OPT_Warray_bounds,
4956 		      "array subscript [%E, %E] is outside array bounds of %qT",
4957 		      low_sub, up_sub, artype);
4958           TREE_NO_WARNING (ref) = 1;
4959         }
4960     }
4961   else if (up_bound
4962 	   && TREE_CODE (up_sub) == INTEGER_CST
4963 	   && (ignore_off_by_one
4964 	       ? !tree_int_cst_le (up_sub, up_bound_p1)
4965 	       : !tree_int_cst_le (up_sub, up_bound)))
4966     {
4967       if (dump_file && (dump_flags & TDF_DETAILS))
4968 	{
4969 	  fprintf (dump_file, "Array bound warning for ");
4970 	  dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4971 	  fprintf (dump_file, "\n");
4972 	}
4973       warning_at (location, OPT_Warray_bounds,
4974 		  "array subscript %E is above array bounds of %qT",
4975 		  up_sub, artype);
4976       TREE_NO_WARNING (ref) = 1;
4977     }
4978   else if (TREE_CODE (low_sub) == INTEGER_CST
4979            && tree_int_cst_lt (low_sub, low_bound))
4980     {
4981       if (dump_file && (dump_flags & TDF_DETAILS))
4982 	{
4983 	  fprintf (dump_file, "Array bound warning for ");
4984 	  dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4985 	  fprintf (dump_file, "\n");
4986 	}
4987       warning_at (location, OPT_Warray_bounds,
4988 		  "array subscript %E is below array bounds of %qT",
4989 		  low_sub, artype);
4990       TREE_NO_WARNING (ref) = 1;
4991     }
4992 }
4993 
4994 /* Searches if the expr T, located at LOCATION computes
4995    address of an ARRAY_REF, and call check_array_ref on it.  */
4996 
4997 void
4998 vrp_prop::search_for_addr_array (tree t, location_t location)
4999 {
5000   /* Check each ARRAY_REFs in the reference chain. */
5001   do
5002     {
5003       if (TREE_CODE (t) == ARRAY_REF)
5004 	check_array_ref (location, t, true /*ignore_off_by_one*/);
5005 
5006       t = TREE_OPERAND (t, 0);
5007     }
5008   while (handled_component_p (t));
5009 
5010   if (TREE_CODE (t) == MEM_REF
5011       && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
5012       && !TREE_NO_WARNING (t))
5013     {
5014       tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
5015       tree low_bound, up_bound, el_sz;
5016       offset_int idx;
5017       if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
5018 	  || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
5019 	  || !TYPE_DOMAIN (TREE_TYPE (tem)))
5020 	return;
5021 
5022       low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5023       up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5024       el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
5025       if (!low_bound
5026 	  || TREE_CODE (low_bound) != INTEGER_CST
5027 	  || !up_bound
5028 	  || TREE_CODE (up_bound) != INTEGER_CST
5029 	  || !el_sz
5030 	  || TREE_CODE (el_sz) != INTEGER_CST)
5031 	return;
5032 
5033       if (!mem_ref_offset (t).is_constant (&idx))
5034 	return;
5035 
5036       idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
5037       if (idx < 0)
5038 	{
5039 	  if (dump_file && (dump_flags & TDF_DETAILS))
5040 	    {
5041 	      fprintf (dump_file, "Array bound warning for ");
5042 	      dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
5043 	      fprintf (dump_file, "\n");
5044 	    }
5045 	  warning_at (location, OPT_Warray_bounds,
5046 		      "array subscript %wi is below array bounds of %qT",
5047 		      idx.to_shwi (), TREE_TYPE (tem));
5048 	  TREE_NO_WARNING (t) = 1;
5049 	}
5050       else if (idx > (wi::to_offset (up_bound)
5051 		      - wi::to_offset (low_bound) + 1))
5052 	{
5053 	  if (dump_file && (dump_flags & TDF_DETAILS))
5054 	    {
5055 	      fprintf (dump_file, "Array bound warning for ");
5056 	      dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
5057 	      fprintf (dump_file, "\n");
5058 	    }
5059 	  warning_at (location, OPT_Warray_bounds,
5060 		      "array subscript %wu is above array bounds of %qT",
5061 		      idx.to_uhwi (), TREE_TYPE (tem));
5062 	  TREE_NO_WARNING (t) = 1;
5063 	}
5064     }
5065 }
5066 
5067 /* walk_tree() callback that checks if *TP is
5068    an ARRAY_REF inside an ADDR_EXPR (in which an array
5069    subscript one outside the valid range is allowed). Call
5070    check_array_ref for each ARRAY_REF found. The location is
5071    passed in DATA.  */
5072 
5073 static tree
5074 check_array_bounds (tree *tp, int *walk_subtree, void *data)
5075 {
5076   tree t = *tp;
5077   struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
5078   location_t location;
5079 
5080   if (EXPR_HAS_LOCATION (t))
5081     location = EXPR_LOCATION (t);
5082   else
5083     location = gimple_location (wi->stmt);
5084 
5085   *walk_subtree = TRUE;
5086 
5087   vrp_prop *vrp_prop = (class vrp_prop *)wi->info;
5088   if (TREE_CODE (t) == ARRAY_REF)
5089     vrp_prop->check_array_ref (location, t, false /*ignore_off_by_one*/);
5090 
5091   else if (TREE_CODE (t) == ADDR_EXPR)
5092     {
5093       vrp_prop->search_for_addr_array (t, location);
5094       *walk_subtree = FALSE;
5095     }
5096 
5097   return NULL_TREE;
5098 }
5099 
5100 /* A dom_walker subclass for use by vrp_prop::check_all_array_refs,
5101    to walk over all statements of all reachable BBs and call
5102    check_array_bounds on them.  */
5103 
5104 class check_array_bounds_dom_walker : public dom_walker
5105 {
5106  public:
5107   check_array_bounds_dom_walker (vrp_prop *prop)
5108     : dom_walker (CDI_DOMINATORS,
5109 		  /* Discover non-executable edges, preserving EDGE_EXECUTABLE
5110 		     flags, so that we can merge in information on
5111 		     non-executable edges from vrp_folder .  */
5112 		  REACHABLE_BLOCKS_PRESERVING_FLAGS),
5113       m_prop (prop) {}
5114   ~check_array_bounds_dom_walker () {}
5115 
5116   edge before_dom_children (basic_block) FINAL OVERRIDE;
5117 
5118  private:
5119   vrp_prop *m_prop;
5120 };
5121 
5122 /* Implementation of dom_walker::before_dom_children.
5123 
5124    Walk over all statements of BB and call check_array_bounds on them,
5125    and determine if there's a unique successor edge.  */
5126 
5127 edge
5128 check_array_bounds_dom_walker::before_dom_children (basic_block bb)
5129 {
5130   gimple_stmt_iterator si;
5131   for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5132     {
5133       gimple *stmt = gsi_stmt (si);
5134       struct walk_stmt_info wi;
5135       if (!gimple_has_location (stmt)
5136 	  || is_gimple_debug (stmt))
5137 	continue;
5138 
5139       memset (&wi, 0, sizeof (wi));
5140 
5141       wi.info = m_prop;
5142 
5143       walk_gimple_op (stmt, check_array_bounds, &wi);
5144     }
5145 
5146   /* Determine if there's a unique successor edge, and if so, return
5147      that back to dom_walker, ensuring that we don't visit blocks that
5148      became unreachable during the VRP propagation
5149      (PR tree-optimization/83312).  */
5150   return find_taken_edge (bb, NULL_TREE);
5151 }
5152 
5153 /* Walk over all statements of all reachable BBs and call check_array_bounds
5154    on them.  */
5155 
5156 void
5157 vrp_prop::check_all_array_refs ()
5158 {
5159   check_array_bounds_dom_walker w (this);
5160   w.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
5161 }
5162 
5163 /* Return true if all imm uses of VAR are either in STMT, or
5164    feed (optionally through a chain of single imm uses) GIMPLE_COND
5165    in basic block COND_BB.  */
5166 
5167 static bool
5168 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
5169 {
5170   use_operand_p use_p, use2_p;
5171   imm_use_iterator iter;
5172 
5173   FOR_EACH_IMM_USE_FAST (use_p, iter, var)
5174     if (USE_STMT (use_p) != stmt)
5175       {
5176 	gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
5177 	if (is_gimple_debug (use_stmt))
5178 	  continue;
5179 	while (is_gimple_assign (use_stmt)
5180 	       && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
5181 	       && single_imm_use (gimple_assign_lhs (use_stmt),
5182 				  &use2_p, &use_stmt2))
5183 	  use_stmt = use_stmt2;
5184 	if (gimple_code (use_stmt) != GIMPLE_COND
5185 	    || gimple_bb (use_stmt) != cond_bb)
5186 	  return false;
5187       }
5188   return true;
5189 }
5190 
5191 /* Handle
5192    _4 = x_3 & 31;
5193    if (_4 != 0)
5194      goto <bb 6>;
5195    else
5196      goto <bb 7>;
5197    <bb 6>:
5198    __builtin_unreachable ();
5199    <bb 7>:
5200    x_5 = ASSERT_EXPR <x_3, ...>;
5201    If x_3 has no other immediate uses (checked by caller),
5202    var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
5203    from the non-zero bitmask.  */
5204 
5205 void
5206 maybe_set_nonzero_bits (edge e, tree var)
5207 {
5208   basic_block cond_bb = e->src;
5209   gimple *stmt = last_stmt (cond_bb);
5210   tree cst;
5211 
5212   if (stmt == NULL
5213       || gimple_code (stmt) != GIMPLE_COND
5214       || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
5215 				     ? EQ_EXPR : NE_EXPR)
5216       || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
5217       || !integer_zerop (gimple_cond_rhs (stmt)))
5218     return;
5219 
5220   stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
5221   if (!is_gimple_assign (stmt)
5222       || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
5223       || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
5224     return;
5225   if (gimple_assign_rhs1 (stmt) != var)
5226     {
5227       gimple *stmt2;
5228 
5229       if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5230 	return;
5231       stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
5232       if (!gimple_assign_cast_p (stmt2)
5233 	  || gimple_assign_rhs1 (stmt2) != var
5234 	  || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
5235 	  || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
5236 			      != TYPE_PRECISION (TREE_TYPE (var))))
5237 	return;
5238     }
5239   cst = gimple_assign_rhs2 (stmt);
5240   set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
5241 					  wi::to_wide (cst)));
5242 }
5243 
5244 /* Convert range assertion expressions into the implied copies and
5245    copy propagate away the copies.  Doing the trivial copy propagation
5246    here avoids the need to run the full copy propagation pass after
5247    VRP.
5248 
5249    FIXME, this will eventually lead to copy propagation removing the
5250    names that had useful range information attached to them.  For
5251    instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
5252    then N_i will have the range [3, +INF].
5253 
5254    However, by converting the assertion into the implied copy
5255    operation N_i = N_j, we will then copy-propagate N_j into the uses
5256    of N_i and lose the range information.  We may want to hold on to
5257    ASSERT_EXPRs a little while longer as the ranges could be used in
5258    things like jump threading.
5259 
5260    The problem with keeping ASSERT_EXPRs around is that passes after
5261    VRP need to handle them appropriately.
5262 
5263    Another approach would be to make the range information a first
5264    class property of the SSA_NAME so that it can be queried from
5265    any pass.  This is made somewhat more complex by the need for
5266    multiple ranges to be associated with one SSA_NAME.  */
5267 
5268 static void
5269 remove_range_assertions (void)
5270 {
5271   basic_block bb;
5272   gimple_stmt_iterator si;
5273   /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
5274      a basic block preceeded by GIMPLE_COND branching to it and
5275      __builtin_trap, -1 if not yet checked, 0 otherwise.  */
5276   int is_unreachable;
5277 
5278   /* Note that the BSI iterator bump happens at the bottom of the
5279      loop and no bump is necessary if we're removing the statement
5280      referenced by the current BSI.  */
5281   FOR_EACH_BB_FN (bb, cfun)
5282     for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
5283       {
5284 	gimple *stmt = gsi_stmt (si);
5285 
5286 	if (is_gimple_assign (stmt)
5287 	    && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
5288 	  {
5289 	    tree lhs = gimple_assign_lhs (stmt);
5290 	    tree rhs = gimple_assign_rhs1 (stmt);
5291 	    tree var;
5292 
5293 	    var = ASSERT_EXPR_VAR (rhs);
5294 
5295 	    if (TREE_CODE (var) == SSA_NAME
5296 		&& !POINTER_TYPE_P (TREE_TYPE (lhs))
5297 		&& SSA_NAME_RANGE_INFO (lhs))
5298 	      {
5299 		if (is_unreachable == -1)
5300 		  {
5301 		    is_unreachable = 0;
5302 		    if (single_pred_p (bb)
5303 			&& assert_unreachable_fallthru_edge_p
5304 						    (single_pred_edge (bb)))
5305 		      is_unreachable = 1;
5306 		  }
5307 		/* Handle
5308 		   if (x_7 >= 10 && x_7 < 20)
5309 		     __builtin_unreachable ();
5310 		   x_8 = ASSERT_EXPR <x_7, ...>;
5311 		   if the only uses of x_7 are in the ASSERT_EXPR and
5312 		   in the condition.  In that case, we can copy the
5313 		   range info from x_8 computed in this pass also
5314 		   for x_7.  */
5315 		if (is_unreachable
5316 		    && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
5317 							  single_pred (bb)))
5318 		  {
5319 		    set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
5320 				    SSA_NAME_RANGE_INFO (lhs)->get_min (),
5321 				    SSA_NAME_RANGE_INFO (lhs)->get_max ());
5322 		    maybe_set_nonzero_bits (single_pred_edge (bb), var);
5323 		  }
5324 	      }
5325 
5326 	    /* Propagate the RHS into every use of the LHS.  For SSA names
5327 	       also propagate abnormals as it merely restores the original
5328 	       IL in this case (an replace_uses_by would assert).  */
5329 	    if (TREE_CODE (var) == SSA_NAME)
5330 	      {
5331 		imm_use_iterator iter;
5332 		use_operand_p use_p;
5333 		gimple *use_stmt;
5334 		FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
5335 		  FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
5336 		    SET_USE (use_p, var);
5337 	      }
5338 	    else
5339 	      replace_uses_by (lhs, var);
5340 
5341 	    /* And finally, remove the copy, it is not needed.  */
5342 	    gsi_remove (&si, true);
5343 	    release_defs (stmt);
5344 	  }
5345 	else
5346 	  {
5347 	    if (!is_gimple_debug (gsi_stmt (si)))
5348 	      is_unreachable = 0;
5349 	    gsi_next (&si);
5350 	  }
5351       }
5352 }
5353 
5354 /* Return true if STMT is interesting for VRP.  */
5355 
5356 bool
5357 stmt_interesting_for_vrp (gimple *stmt)
5358 {
5359   if (gimple_code (stmt) == GIMPLE_PHI)
5360     {
5361       tree res = gimple_phi_result (stmt);
5362       return (!virtual_operand_p (res)
5363 	      && (INTEGRAL_TYPE_P (TREE_TYPE (res))
5364 		  || POINTER_TYPE_P (TREE_TYPE (res))));
5365     }
5366   else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
5367     {
5368       tree lhs = gimple_get_lhs (stmt);
5369 
5370       /* In general, assignments with virtual operands are not useful
5371 	 for deriving ranges, with the obvious exception of calls to
5372 	 builtin functions.  */
5373       if (lhs && TREE_CODE (lhs) == SSA_NAME
5374 	  && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5375 	      || POINTER_TYPE_P (TREE_TYPE (lhs)))
5376 	  && (is_gimple_call (stmt)
5377 	      || !gimple_vuse (stmt)))
5378 	return true;
5379       else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5380 	switch (gimple_call_internal_fn (stmt))
5381 	  {
5382 	  case IFN_ADD_OVERFLOW:
5383 	  case IFN_SUB_OVERFLOW:
5384 	  case IFN_MUL_OVERFLOW:
5385 	  case IFN_ATOMIC_COMPARE_EXCHANGE:
5386 	    /* These internal calls return _Complex integer type,
5387 	       but are interesting to VRP nevertheless.  */
5388 	    if (lhs && TREE_CODE (lhs) == SSA_NAME)
5389 	      return true;
5390 	    break;
5391 	  default:
5392 	    break;
5393 	  }
5394     }
5395   else if (gimple_code (stmt) == GIMPLE_COND
5396 	   || gimple_code (stmt) == GIMPLE_SWITCH)
5397     return true;
5398 
5399   return false;
5400 }
5401 
5402 /* Initialization required by ssa_propagate engine.  */
5403 
5404 void
5405 vrp_prop::vrp_initialize ()
5406 {
5407   basic_block bb;
5408 
5409   FOR_EACH_BB_FN (bb, cfun)
5410     {
5411       for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
5412 	   gsi_next (&si))
5413 	{
5414 	  gphi *phi = si.phi ();
5415 	  if (!stmt_interesting_for_vrp (phi))
5416 	    {
5417 	      tree lhs = PHI_RESULT (phi);
5418 	      set_value_range_to_varying (get_value_range (lhs));
5419 	      prop_set_simulate_again (phi, false);
5420 	    }
5421 	  else
5422 	    prop_set_simulate_again (phi, true);
5423 	}
5424 
5425       for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
5426 	   gsi_next (&si))
5427         {
5428 	  gimple *stmt = gsi_stmt (si);
5429 
5430  	  /* If the statement is a control insn, then we do not
5431  	     want to avoid simulating the statement once.  Failure
5432  	     to do so means that those edges will never get added.  */
5433 	  if (stmt_ends_bb_p (stmt))
5434 	    prop_set_simulate_again (stmt, true);
5435 	  else if (!stmt_interesting_for_vrp (stmt))
5436 	    {
5437 	      set_defs_to_varying (stmt);
5438 	      prop_set_simulate_again (stmt, false);
5439 	    }
5440 	  else
5441 	    prop_set_simulate_again (stmt, true);
5442 	}
5443     }
5444 }
5445 
5446 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
5447    that includes the value VAL.  The search is restricted to the range
5448    [START_IDX, n - 1] where n is the size of VEC.
5449 
5450    If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
5451    returned.
5452 
5453    If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
5454    it is placed in IDX and false is returned.
5455 
5456    If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
5457    returned. */
5458 
5459 bool
5460 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
5461 {
5462   size_t n = gimple_switch_num_labels (stmt);
5463   size_t low, high;
5464 
5465   /* Find case label for minimum of the value range or the next one.
5466      At each iteration we are searching in [low, high - 1]. */
5467 
5468   for (low = start_idx, high = n; high != low; )
5469     {
5470       tree t;
5471       int cmp;
5472       /* Note that i != high, so we never ask for n. */
5473       size_t i = (high + low) / 2;
5474       t = gimple_switch_label (stmt, i);
5475 
5476       /* Cache the result of comparing CASE_LOW and val.  */
5477       cmp = tree_int_cst_compare (CASE_LOW (t), val);
5478 
5479       if (cmp == 0)
5480 	{
5481 	  /* Ranges cannot be empty. */
5482 	  *idx = i;
5483 	  return true;
5484 	}
5485       else if (cmp > 0)
5486         high = i;
5487       else
5488 	{
5489 	  low = i + 1;
5490 	  if (CASE_HIGH (t) != NULL
5491 	      && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
5492 	    {
5493 	      *idx = i;
5494 	      return true;
5495 	    }
5496         }
5497     }
5498 
5499   *idx = high;
5500   return false;
5501 }
5502 
5503 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
5504    for values between MIN and MAX. The first index is placed in MIN_IDX. The
5505    last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
5506    then MAX_IDX < MIN_IDX.
5507    Returns true if the default label is not needed. */
5508 
5509 bool
5510 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
5511 		       size_t *max_idx)
5512 {
5513   size_t i, j;
5514   bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
5515   bool max_take_default = !find_case_label_index (stmt, i, max, &j);
5516 
5517   if (i == j
5518       && min_take_default
5519       && max_take_default)
5520     {
5521       /* Only the default case label reached.
5522          Return an empty range. */
5523       *min_idx = 1;
5524       *max_idx = 0;
5525       return false;
5526     }
5527   else
5528     {
5529       bool take_default = min_take_default || max_take_default;
5530       tree low, high;
5531       size_t k;
5532 
5533       if (max_take_default)
5534 	j--;
5535 
5536       /* If the case label range is continuous, we do not need
5537 	 the default case label.  Verify that.  */
5538       high = CASE_LOW (gimple_switch_label (stmt, i));
5539       if (CASE_HIGH (gimple_switch_label (stmt, i)))
5540 	high = CASE_HIGH (gimple_switch_label (stmt, i));
5541       for (k = i + 1; k <= j; ++k)
5542 	{
5543 	  low = CASE_LOW (gimple_switch_label (stmt, k));
5544 	  if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
5545 	    {
5546 	      take_default = true;
5547 	      break;
5548 	    }
5549 	  high = low;
5550 	  if (CASE_HIGH (gimple_switch_label (stmt, k)))
5551 	    high = CASE_HIGH (gimple_switch_label (stmt, k));
5552 	}
5553 
5554       *min_idx = i;
5555       *max_idx = j;
5556       return !take_default;
5557     }
5558 }
5559 
5560 /* Evaluate statement STMT.  If the statement produces a useful range,
5561    return SSA_PROP_INTERESTING and record the SSA name with the
5562    interesting range into *OUTPUT_P.
5563 
5564    If STMT is a conditional branch and we can determine its truth
5565    value, the taken edge is recorded in *TAKEN_EDGE_P.
5566 
5567    If STMT produces a varying value, return SSA_PROP_VARYING.  */
5568 
5569 enum ssa_prop_result
5570 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
5571 {
5572   value_range vr = VR_INITIALIZER;
5573   tree lhs = gimple_get_lhs (stmt);
5574   extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
5575 
5576   if (*output_p)
5577     {
5578       if (update_value_range (*output_p, &vr))
5579 	{
5580 	  if (dump_file && (dump_flags & TDF_DETAILS))
5581 	    {
5582 	      fprintf (dump_file, "Found new range for ");
5583 	      print_generic_expr (dump_file, *output_p);
5584 	      fprintf (dump_file, ": ");
5585 	      dump_value_range (dump_file, &vr);
5586 	      fprintf (dump_file, "\n");
5587 	    }
5588 
5589 	  if (vr.type == VR_VARYING)
5590 	    return SSA_PROP_VARYING;
5591 
5592 	  return SSA_PROP_INTERESTING;
5593 	}
5594       return SSA_PROP_NOT_INTERESTING;
5595     }
5596 
5597   if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5598     switch (gimple_call_internal_fn (stmt))
5599       {
5600       case IFN_ADD_OVERFLOW:
5601       case IFN_SUB_OVERFLOW:
5602       case IFN_MUL_OVERFLOW:
5603       case IFN_ATOMIC_COMPARE_EXCHANGE:
5604 	/* These internal calls return _Complex integer type,
5605 	   which VRP does not track, but the immediate uses
5606 	   thereof might be interesting.  */
5607 	if (lhs && TREE_CODE (lhs) == SSA_NAME)
5608 	  {
5609 	    imm_use_iterator iter;
5610 	    use_operand_p use_p;
5611 	    enum ssa_prop_result res = SSA_PROP_VARYING;
5612 
5613 	    set_value_range_to_varying (get_value_range (lhs));
5614 
5615 	    FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
5616 	      {
5617 		gimple *use_stmt = USE_STMT (use_p);
5618 		if (!is_gimple_assign (use_stmt))
5619 		  continue;
5620 		enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
5621 		if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
5622 		  continue;
5623 		tree rhs1 = gimple_assign_rhs1 (use_stmt);
5624 		tree use_lhs = gimple_assign_lhs (use_stmt);
5625 		if (TREE_CODE (rhs1) != rhs_code
5626 		    || TREE_OPERAND (rhs1, 0) != lhs
5627 		    || TREE_CODE (use_lhs) != SSA_NAME
5628 		    || !stmt_interesting_for_vrp (use_stmt)
5629 		    || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
5630 			|| !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
5631 			|| !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
5632 		  continue;
5633 
5634 		/* If there is a change in the value range for any of the
5635 		   REALPART_EXPR/IMAGPART_EXPR immediate uses, return
5636 		   SSA_PROP_INTERESTING.  If there are any REALPART_EXPR
5637 		   or IMAGPART_EXPR immediate uses, but none of them have
5638 		   a change in their value ranges, return
5639 		   SSA_PROP_NOT_INTERESTING.  If there are no
5640 		   {REAL,IMAG}PART_EXPR uses at all,
5641 		   return SSA_PROP_VARYING.  */
5642 		value_range new_vr = VR_INITIALIZER;
5643 		extract_range_basic (&new_vr, use_stmt);
5644 		value_range *old_vr = get_value_range (use_lhs);
5645 		if (old_vr->type != new_vr.type
5646 		    || !vrp_operand_equal_p (old_vr->min, new_vr.min)
5647 		    || !vrp_operand_equal_p (old_vr->max, new_vr.max)
5648 		    || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv))
5649 		  res = SSA_PROP_INTERESTING;
5650 		else
5651 		  res = SSA_PROP_NOT_INTERESTING;
5652 		BITMAP_FREE (new_vr.equiv);
5653 		if (res == SSA_PROP_INTERESTING)
5654 		  {
5655 		    *output_p = lhs;
5656 		    return res;
5657 		  }
5658 	      }
5659 
5660 	    return res;
5661 	  }
5662 	break;
5663       default:
5664 	break;
5665       }
5666 
5667   /* All other statements produce nothing of interest for VRP, so mark
5668      their outputs varying and prevent further simulation.  */
5669   set_defs_to_varying (stmt);
5670 
5671   return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
5672 }
5673 
5674 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5675    { VR1TYPE, VR0MIN, VR0MAX } and store the result
5676    in { *VR0TYPE, *VR0MIN, *VR0MAX }.  This may not be the smallest
5677    possible such range.  The resulting range is not canonicalized.  */
5678 
5679 static void
5680 union_ranges (enum value_range_type *vr0type,
5681 	      tree *vr0min, tree *vr0max,
5682 	      enum value_range_type vr1type,
5683 	      tree vr1min, tree vr1max)
5684 {
5685   bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5686   bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5687 
5688   /* [] is vr0, () is vr1 in the following classification comments.  */
5689   if (mineq && maxeq)
5690     {
5691       /* [(  )] */
5692       if (*vr0type == vr1type)
5693 	/* Nothing to do for equal ranges.  */
5694 	;
5695       else if ((*vr0type == VR_RANGE
5696 		&& vr1type == VR_ANTI_RANGE)
5697 	       || (*vr0type == VR_ANTI_RANGE
5698 		   && vr1type == VR_RANGE))
5699 	{
5700 	  /* For anti-range with range union the result is varying.  */
5701 	  goto give_up;
5702 	}
5703       else
5704 	gcc_unreachable ();
5705     }
5706   else if (operand_less_p (*vr0max, vr1min) == 1
5707 	   || operand_less_p (vr1max, *vr0min) == 1)
5708     {
5709       /* [ ] ( ) or ( ) [ ]
5710 	 If the ranges have an empty intersection, result of the union
5711 	 operation is the anti-range or if both are anti-ranges
5712 	 it covers all.  */
5713       if (*vr0type == VR_ANTI_RANGE
5714 	  && vr1type == VR_ANTI_RANGE)
5715 	goto give_up;
5716       else if (*vr0type == VR_ANTI_RANGE
5717 	       && vr1type == VR_RANGE)
5718 	;
5719       else if (*vr0type == VR_RANGE
5720 	       && vr1type == VR_ANTI_RANGE)
5721 	{
5722 	  *vr0type = vr1type;
5723 	  *vr0min = vr1min;
5724 	  *vr0max = vr1max;
5725 	}
5726       else if (*vr0type == VR_RANGE
5727 	       && vr1type == VR_RANGE)
5728 	{
5729 	  /* The result is the convex hull of both ranges.  */
5730 	  if (operand_less_p (*vr0max, vr1min) == 1)
5731 	    {
5732 	      /* If the result can be an anti-range, create one.  */
5733 	      if (TREE_CODE (*vr0max) == INTEGER_CST
5734 		  && TREE_CODE (vr1min) == INTEGER_CST
5735 		  && vrp_val_is_min (*vr0min)
5736 		  && vrp_val_is_max (vr1max))
5737 		{
5738 		  tree min = int_const_binop (PLUS_EXPR,
5739 					      *vr0max,
5740 					      build_int_cst (TREE_TYPE (*vr0max), 1));
5741 		  tree max = int_const_binop (MINUS_EXPR,
5742 					      vr1min,
5743 					      build_int_cst (TREE_TYPE (vr1min), 1));
5744 		  if (!operand_less_p (max, min))
5745 		    {
5746 		      *vr0type = VR_ANTI_RANGE;
5747 		      *vr0min = min;
5748 		      *vr0max = max;
5749 		    }
5750 		  else
5751 		    *vr0max = vr1max;
5752 		}
5753 	      else
5754 		*vr0max = vr1max;
5755 	    }
5756 	  else
5757 	    {
5758 	      /* If the result can be an anti-range, create one.  */
5759 	      if (TREE_CODE (vr1max) == INTEGER_CST
5760 		  && TREE_CODE (*vr0min) == INTEGER_CST
5761 		  && vrp_val_is_min (vr1min)
5762 		  && vrp_val_is_max (*vr0max))
5763 		{
5764 		  tree min = int_const_binop (PLUS_EXPR,
5765 					      vr1max,
5766 					      build_int_cst (TREE_TYPE (vr1max), 1));
5767 		  tree max = int_const_binop (MINUS_EXPR,
5768 					      *vr0min,
5769 					      build_int_cst (TREE_TYPE (*vr0min), 1));
5770 		  if (!operand_less_p (max, min))
5771 		    {
5772 		      *vr0type = VR_ANTI_RANGE;
5773 		      *vr0min = min;
5774 		      *vr0max = max;
5775 		    }
5776 		  else
5777 		    *vr0min = vr1min;
5778 		}
5779 	      else
5780 		*vr0min = vr1min;
5781 	    }
5782 	}
5783       else
5784 	gcc_unreachable ();
5785     }
5786   else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5787 	   && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5788     {
5789       /* [ (  ) ] or [(  ) ] or [ (  )] */
5790       if (*vr0type == VR_RANGE
5791 	  && vr1type == VR_RANGE)
5792 	;
5793       else if (*vr0type == VR_ANTI_RANGE
5794 	       && vr1type == VR_ANTI_RANGE)
5795 	{
5796 	  *vr0type = vr1type;
5797 	  *vr0min = vr1min;
5798 	  *vr0max = vr1max;
5799 	}
5800       else if (*vr0type == VR_ANTI_RANGE
5801 	       && vr1type == VR_RANGE)
5802 	{
5803 	  /* Arbitrarily choose the right or left gap.  */
5804 	  if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
5805 	    *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5806 				       build_int_cst (TREE_TYPE (vr1min), 1));
5807 	  else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
5808 	    *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5809 				       build_int_cst (TREE_TYPE (vr1max), 1));
5810 	  else
5811 	    goto give_up;
5812 	}
5813       else if (*vr0type == VR_RANGE
5814 	       && vr1type == VR_ANTI_RANGE)
5815 	/* The result covers everything.  */
5816 	goto give_up;
5817       else
5818 	gcc_unreachable ();
5819     }
5820   else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5821 	   && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5822     {
5823       /* ( [  ] ) or ([  ] ) or ( [  ]) */
5824       if (*vr0type == VR_RANGE
5825 	  && vr1type == VR_RANGE)
5826 	{
5827 	  *vr0type = vr1type;
5828 	  *vr0min = vr1min;
5829 	  *vr0max = vr1max;
5830 	}
5831       else if (*vr0type == VR_ANTI_RANGE
5832 	       && vr1type == VR_ANTI_RANGE)
5833 	;
5834       else if (*vr0type == VR_RANGE
5835 	       && vr1type == VR_ANTI_RANGE)
5836 	{
5837 	  *vr0type = VR_ANTI_RANGE;
5838 	  if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
5839 	    {
5840 	      *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5841 					 build_int_cst (TREE_TYPE (*vr0min), 1));
5842 	      *vr0min = vr1min;
5843 	    }
5844 	  else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
5845 	    {
5846 	      *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5847 					 build_int_cst (TREE_TYPE (*vr0max), 1));
5848 	      *vr0max = vr1max;
5849 	    }
5850 	  else
5851 	    goto give_up;
5852 	}
5853       else if (*vr0type == VR_ANTI_RANGE
5854 	       && vr1type == VR_RANGE)
5855 	/* The result covers everything.  */
5856 	goto give_up;
5857       else
5858 	gcc_unreachable ();
5859     }
5860   else if ((operand_less_p (vr1min, *vr0max) == 1
5861 	    || operand_equal_p (vr1min, *vr0max, 0))
5862 	   && operand_less_p (*vr0min, vr1min) == 1
5863 	   && operand_less_p (*vr0max, vr1max) == 1)
5864     {
5865       /* [  (  ]  ) or [   ](   ) */
5866       if (*vr0type == VR_RANGE
5867 	  && vr1type == VR_RANGE)
5868 	*vr0max = vr1max;
5869       else if (*vr0type == VR_ANTI_RANGE
5870 	       && vr1type == VR_ANTI_RANGE)
5871 	*vr0min = vr1min;
5872       else if (*vr0type == VR_ANTI_RANGE
5873 	       && vr1type == VR_RANGE)
5874 	{
5875 	  if (TREE_CODE (vr1min) == INTEGER_CST)
5876 	    *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5877 				       build_int_cst (TREE_TYPE (vr1min), 1));
5878 	  else
5879 	    goto give_up;
5880 	}
5881       else if (*vr0type == VR_RANGE
5882 	       && vr1type == VR_ANTI_RANGE)
5883 	{
5884 	  if (TREE_CODE (*vr0max) == INTEGER_CST)
5885 	    {
5886 	      *vr0type = vr1type;
5887 	      *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5888 					 build_int_cst (TREE_TYPE (*vr0max), 1));
5889 	      *vr0max = vr1max;
5890 	    }
5891 	  else
5892 	    goto give_up;
5893 	}
5894       else
5895 	gcc_unreachable ();
5896     }
5897   else if ((operand_less_p (*vr0min, vr1max) == 1
5898 	    || operand_equal_p (*vr0min, vr1max, 0))
5899 	   && operand_less_p (vr1min, *vr0min) == 1
5900 	   && operand_less_p (vr1max, *vr0max) == 1)
5901     {
5902       /* (  [  )  ] or (   )[   ] */
5903       if (*vr0type == VR_RANGE
5904 	  && vr1type == VR_RANGE)
5905 	*vr0min = vr1min;
5906       else if (*vr0type == VR_ANTI_RANGE
5907 	       && vr1type == VR_ANTI_RANGE)
5908 	*vr0max = vr1max;
5909       else if (*vr0type == VR_ANTI_RANGE
5910 	       && vr1type == VR_RANGE)
5911 	{
5912 	  if (TREE_CODE (vr1max) == INTEGER_CST)
5913 	    *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5914 				       build_int_cst (TREE_TYPE (vr1max), 1));
5915 	  else
5916 	    goto give_up;
5917 	}
5918       else if (*vr0type == VR_RANGE
5919 	       && vr1type == VR_ANTI_RANGE)
5920 	{
5921 	  if (TREE_CODE (*vr0min) == INTEGER_CST)
5922 	    {
5923 	      *vr0type = vr1type;
5924 	      *vr0min = vr1min;
5925 	      *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5926 					 build_int_cst (TREE_TYPE (*vr0min), 1));
5927 	    }
5928 	  else
5929 	    goto give_up;
5930 	}
5931       else
5932 	gcc_unreachable ();
5933     }
5934   else
5935     goto give_up;
5936 
5937   return;
5938 
5939 give_up:
5940   *vr0type = VR_VARYING;
5941   *vr0min = NULL_TREE;
5942   *vr0max = NULL_TREE;
5943 }
5944 
5945 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5946    { VR1TYPE, VR0MIN, VR0MAX } and store the result
5947    in { *VR0TYPE, *VR0MIN, *VR0MAX }.  This may not be the smallest
5948    possible such range.  The resulting range is not canonicalized.  */
5949 
5950 static void
5951 intersect_ranges (enum value_range_type *vr0type,
5952 		  tree *vr0min, tree *vr0max,
5953 		  enum value_range_type vr1type,
5954 		  tree vr1min, tree vr1max)
5955 {
5956   bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5957   bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5958 
5959   /* [] is vr0, () is vr1 in the following classification comments.  */
5960   if (mineq && maxeq)
5961     {
5962       /* [(  )] */
5963       if (*vr0type == vr1type)
5964 	/* Nothing to do for equal ranges.  */
5965 	;
5966       else if ((*vr0type == VR_RANGE
5967 		&& vr1type == VR_ANTI_RANGE)
5968 	       || (*vr0type == VR_ANTI_RANGE
5969 		   && vr1type == VR_RANGE))
5970 	{
5971 	  /* For anti-range with range intersection the result is empty.  */
5972 	  *vr0type = VR_UNDEFINED;
5973 	  *vr0min = NULL_TREE;
5974 	  *vr0max = NULL_TREE;
5975 	}
5976       else
5977 	gcc_unreachable ();
5978     }
5979   else if (operand_less_p (*vr0max, vr1min) == 1
5980 	   || operand_less_p (vr1max, *vr0min) == 1)
5981     {
5982       /* [ ] ( ) or ( ) [ ]
5983 	 If the ranges have an empty intersection, the result of the
5984 	 intersect operation is the range for intersecting an
5985 	 anti-range with a range or empty when intersecting two ranges.  */
5986       if (*vr0type == VR_RANGE
5987 	  && vr1type == VR_ANTI_RANGE)
5988 	;
5989       else if (*vr0type == VR_ANTI_RANGE
5990 	       && vr1type == VR_RANGE)
5991 	{
5992 	  *vr0type = vr1type;
5993 	  *vr0min = vr1min;
5994 	  *vr0max = vr1max;
5995 	}
5996       else if (*vr0type == VR_RANGE
5997 	       && vr1type == VR_RANGE)
5998 	{
5999 	  *vr0type = VR_UNDEFINED;
6000 	  *vr0min = NULL_TREE;
6001 	  *vr0max = NULL_TREE;
6002 	}
6003       else if (*vr0type == VR_ANTI_RANGE
6004 	       && vr1type == VR_ANTI_RANGE)
6005 	{
6006 	  /* If the anti-ranges are adjacent to each other merge them.  */
6007 	  if (TREE_CODE (*vr0max) == INTEGER_CST
6008 	      && TREE_CODE (vr1min) == INTEGER_CST
6009 	      && operand_less_p (*vr0max, vr1min) == 1
6010 	      && integer_onep (int_const_binop (MINUS_EXPR,
6011 						vr1min, *vr0max)))
6012 	    *vr0max = vr1max;
6013 	  else if (TREE_CODE (vr1max) == INTEGER_CST
6014 		   && TREE_CODE (*vr0min) == INTEGER_CST
6015 		   && operand_less_p (vr1max, *vr0min) == 1
6016 		   && integer_onep (int_const_binop (MINUS_EXPR,
6017 						     *vr0min, vr1max)))
6018 	    *vr0min = vr1min;
6019 	  /* Else arbitrarily take VR0.  */
6020 	}
6021     }
6022   else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
6023 	   && (mineq || operand_less_p (*vr0min, vr1min) == 1))
6024     {
6025       /* [ (  ) ] or [(  ) ] or [ (  )] */
6026       if (*vr0type == VR_RANGE
6027 	  && vr1type == VR_RANGE)
6028 	{
6029 	  /* If both are ranges the result is the inner one.  */
6030 	  *vr0type = vr1type;
6031 	  *vr0min = vr1min;
6032 	  *vr0max = vr1max;
6033 	}
6034       else if (*vr0type == VR_RANGE
6035 	       && vr1type == VR_ANTI_RANGE)
6036 	{
6037 	  /* Choose the right gap if the left one is empty.  */
6038 	  if (mineq)
6039 	    {
6040 	      if (TREE_CODE (vr1max) != INTEGER_CST)
6041 		*vr0min = vr1max;
6042 	      else if (TYPE_PRECISION (TREE_TYPE (vr1max)) == 1
6043 		       && !TYPE_UNSIGNED (TREE_TYPE (vr1max)))
6044 		*vr0min
6045 		  = int_const_binop (MINUS_EXPR, vr1max,
6046 				     build_int_cst (TREE_TYPE (vr1max), -1));
6047 	      else
6048 		*vr0min
6049 		  = int_const_binop (PLUS_EXPR, vr1max,
6050 				     build_int_cst (TREE_TYPE (vr1max), 1));
6051 	    }
6052 	  /* Choose the left gap if the right one is empty.  */
6053 	  else if (maxeq)
6054 	    {
6055 	      if (TREE_CODE (vr1min) != INTEGER_CST)
6056 		*vr0max = vr1min;
6057 	      else if (TYPE_PRECISION (TREE_TYPE (vr1min)) == 1
6058 		       && !TYPE_UNSIGNED (TREE_TYPE (vr1min)))
6059 		*vr0max
6060 		  = int_const_binop (PLUS_EXPR, vr1min,
6061 				     build_int_cst (TREE_TYPE (vr1min), -1));
6062 	      else
6063 		*vr0max
6064 		  = int_const_binop (MINUS_EXPR, vr1min,
6065 				     build_int_cst (TREE_TYPE (vr1min), 1));
6066 	    }
6067 	  /* Choose the anti-range if the range is effectively varying.  */
6068 	  else if (vrp_val_is_min (*vr0min)
6069 		   && vrp_val_is_max (*vr0max))
6070 	    {
6071 	      *vr0type = vr1type;
6072 	      *vr0min = vr1min;
6073 	      *vr0max = vr1max;
6074 	    }
6075 	  /* Else choose the range.  */
6076 	}
6077       else if (*vr0type == VR_ANTI_RANGE
6078 	       && vr1type == VR_ANTI_RANGE)
6079 	/* If both are anti-ranges the result is the outer one.  */
6080 	;
6081       else if (*vr0type == VR_ANTI_RANGE
6082 	       && vr1type == VR_RANGE)
6083 	{
6084 	  /* The intersection is empty.  */
6085 	  *vr0type = VR_UNDEFINED;
6086 	  *vr0min = NULL_TREE;
6087 	  *vr0max = NULL_TREE;
6088 	}
6089       else
6090 	gcc_unreachable ();
6091     }
6092   else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
6093 	   && (mineq || operand_less_p (vr1min, *vr0min) == 1))
6094     {
6095       /* ( [  ] ) or ([  ] ) or ( [  ]) */
6096       if (*vr0type == VR_RANGE
6097 	  && vr1type == VR_RANGE)
6098 	/* Choose the inner range.  */
6099 	;
6100       else if (*vr0type == VR_ANTI_RANGE
6101 	       && vr1type == VR_RANGE)
6102 	{
6103 	  /* Choose the right gap if the left is empty.  */
6104 	  if (mineq)
6105 	    {
6106 	      *vr0type = VR_RANGE;
6107 	      if (TREE_CODE (*vr0max) != INTEGER_CST)
6108 		*vr0min = *vr0max;
6109 	      else if (TYPE_PRECISION (TREE_TYPE (*vr0max)) == 1
6110 		       && !TYPE_UNSIGNED (TREE_TYPE (*vr0max)))
6111 		*vr0min
6112 		  = int_const_binop (MINUS_EXPR, *vr0max,
6113 				     build_int_cst (TREE_TYPE (*vr0max), -1));
6114 	      else
6115 		*vr0min
6116 		  = int_const_binop (PLUS_EXPR, *vr0max,
6117 				     build_int_cst (TREE_TYPE (*vr0max), 1));
6118 	      *vr0max = vr1max;
6119 	    }
6120 	  /* Choose the left gap if the right is empty.  */
6121 	  else if (maxeq)
6122 	    {
6123 	      *vr0type = VR_RANGE;
6124 	      if (TREE_CODE (*vr0min) != INTEGER_CST)
6125 		*vr0max = *vr0min;
6126 	      else if (TYPE_PRECISION (TREE_TYPE (*vr0min)) == 1
6127 		       && !TYPE_UNSIGNED (TREE_TYPE (*vr0min)))
6128 		*vr0max
6129 		  = int_const_binop (PLUS_EXPR, *vr0min,
6130 				     build_int_cst (TREE_TYPE (*vr0min), -1));
6131 	      else
6132 		*vr0max
6133 		  = int_const_binop (MINUS_EXPR, *vr0min,
6134 				     build_int_cst (TREE_TYPE (*vr0min), 1));
6135 	      *vr0min = vr1min;
6136 	    }
6137 	  /* Choose the anti-range if the range is effectively varying.  */
6138 	  else if (vrp_val_is_min (vr1min)
6139 		   && vrp_val_is_max (vr1max))
6140 	    ;
6141 	  /* Choose the anti-range if it is ~[0,0], that range is special
6142 	     enough to special case when vr1's range is relatively wide.
6143 	     At least for types bigger than int - this covers pointers
6144 	     and arguments to functions like ctz.  */
6145 	  else if (*vr0min == *vr0max
6146 		   && integer_zerop (*vr0min)
6147 		   && ((TYPE_PRECISION (TREE_TYPE (*vr0min))
6148 			>= TYPE_PRECISION (integer_type_node))
6149 		       || POINTER_TYPE_P (TREE_TYPE (*vr0min)))
6150 		   && TREE_CODE (vr1max) == INTEGER_CST
6151 		   && TREE_CODE (vr1min) == INTEGER_CST
6152 		   && (wi::clz (wi::to_wide (vr1max) - wi::to_wide (vr1min))
6153 		       < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
6154 	    ;
6155 	  /* Else choose the range.  */
6156 	  else
6157 	    {
6158 	      *vr0type = vr1type;
6159 	      *vr0min = vr1min;
6160 	      *vr0max = vr1max;
6161 	    }
6162 	}
6163       else if (*vr0type == VR_ANTI_RANGE
6164 	       && vr1type == VR_ANTI_RANGE)
6165 	{
6166 	  /* If both are anti-ranges the result is the outer one.  */
6167 	  *vr0type = vr1type;
6168 	  *vr0min = vr1min;
6169 	  *vr0max = vr1max;
6170 	}
6171       else if (vr1type == VR_ANTI_RANGE
6172 	       && *vr0type == VR_RANGE)
6173 	{
6174 	  /* The intersection is empty.  */
6175 	  *vr0type = VR_UNDEFINED;
6176 	  *vr0min = NULL_TREE;
6177 	  *vr0max = NULL_TREE;
6178 	}
6179       else
6180 	gcc_unreachable ();
6181     }
6182   else if ((operand_less_p (vr1min, *vr0max) == 1
6183 	    || operand_equal_p (vr1min, *vr0max, 0))
6184 	   && operand_less_p (*vr0min, vr1min) == 1)
6185     {
6186       /* [  (  ]  ) or [  ](  ) */
6187       if (*vr0type == VR_ANTI_RANGE
6188 	  && vr1type == VR_ANTI_RANGE)
6189 	*vr0max = vr1max;
6190       else if (*vr0type == VR_RANGE
6191 	       && vr1type == VR_RANGE)
6192 	*vr0min = vr1min;
6193       else if (*vr0type == VR_RANGE
6194 	       && vr1type == VR_ANTI_RANGE)
6195 	{
6196 	  if (TREE_CODE (vr1min) == INTEGER_CST)
6197 	    *vr0max = int_const_binop (MINUS_EXPR, vr1min,
6198 				       build_int_cst (TREE_TYPE (vr1min), 1));
6199 	  else
6200 	    *vr0max = vr1min;
6201 	}
6202       else if (*vr0type == VR_ANTI_RANGE
6203 	       && vr1type == VR_RANGE)
6204 	{
6205 	  *vr0type = VR_RANGE;
6206 	  if (TREE_CODE (*vr0max) == INTEGER_CST)
6207 	    *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
6208 				       build_int_cst (TREE_TYPE (*vr0max), 1));
6209 	  else
6210 	    *vr0min = *vr0max;
6211 	  *vr0max = vr1max;
6212 	}
6213       else
6214 	gcc_unreachable ();
6215     }
6216   else if ((operand_less_p (*vr0min, vr1max) == 1
6217 	    || operand_equal_p (*vr0min, vr1max, 0))
6218 	   && operand_less_p (vr1min, *vr0min) == 1)
6219     {
6220       /* (  [  )  ] or (  )[  ] */
6221       if (*vr0type == VR_ANTI_RANGE
6222 	  && vr1type == VR_ANTI_RANGE)
6223 	*vr0min = vr1min;
6224       else if (*vr0type == VR_RANGE
6225 	       && vr1type == VR_RANGE)
6226 	*vr0max = vr1max;
6227       else if (*vr0type == VR_RANGE
6228 	       && vr1type == VR_ANTI_RANGE)
6229 	{
6230 	  if (TREE_CODE (vr1max) == INTEGER_CST)
6231 	    *vr0min = int_const_binop (PLUS_EXPR, vr1max,
6232 				       build_int_cst (TREE_TYPE (vr1max), 1));
6233 	  else
6234 	    *vr0min = vr1max;
6235 	}
6236       else if (*vr0type == VR_ANTI_RANGE
6237 	       && vr1type == VR_RANGE)
6238 	{
6239 	  *vr0type = VR_RANGE;
6240 	  if (TREE_CODE (*vr0min) == INTEGER_CST)
6241 	    *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
6242 				       build_int_cst (TREE_TYPE (*vr0min), 1));
6243 	  else
6244 	    *vr0max = *vr0min;
6245 	  *vr0min = vr1min;
6246 	}
6247       else
6248 	gcc_unreachable ();
6249     }
6250 
6251   /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
6252      result for the intersection.  That's always a conservative
6253      correct estimate unless VR1 is a constant singleton range
6254      in which case we choose that.  */
6255   if (vr1type == VR_RANGE
6256       && is_gimple_min_invariant (vr1min)
6257       && vrp_operand_equal_p (vr1min, vr1max))
6258     {
6259       *vr0type = vr1type;
6260       *vr0min = vr1min;
6261       *vr0max = vr1max;
6262     }
6263 
6264   return;
6265 }
6266 
6267 
6268 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
6269    in *VR0.  This may not be the smallest possible such range.  */
6270 
6271 static void
6272 vrp_intersect_ranges_1 (value_range *vr0, value_range *vr1)
6273 {
6274   value_range saved;
6275 
6276   /* If either range is VR_VARYING the other one wins.  */
6277   if (vr1->type == VR_VARYING)
6278     return;
6279   if (vr0->type == VR_VARYING)
6280     {
6281       copy_value_range (vr0, vr1);
6282       return;
6283     }
6284 
6285   /* When either range is VR_UNDEFINED the resulting range is
6286      VR_UNDEFINED, too.  */
6287   if (vr0->type == VR_UNDEFINED)
6288     return;
6289   if (vr1->type == VR_UNDEFINED)
6290     {
6291       set_value_range_to_undefined (vr0);
6292       return;
6293     }
6294 
6295   /* Save the original vr0 so we can return it as conservative intersection
6296      result when our worker turns things to varying.  */
6297   saved = *vr0;
6298   intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
6299 		    vr1->type, vr1->min, vr1->max);
6300   /* Make sure to canonicalize the result though as the inversion of a
6301      VR_RANGE can still be a VR_RANGE.  */
6302   set_and_canonicalize_value_range (vr0, vr0->type,
6303 				    vr0->min, vr0->max, vr0->equiv);
6304   /* If that failed, use the saved original VR0.  */
6305   if (vr0->type == VR_VARYING)
6306     {
6307       *vr0 = saved;
6308       return;
6309     }
6310   /* If the result is VR_UNDEFINED there is no need to mess with
6311      the equivalencies.  */
6312   if (vr0->type == VR_UNDEFINED)
6313     return;
6314 
6315   /* The resulting set of equivalences for range intersection is the union of
6316      the two sets.  */
6317   if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6318     bitmap_ior_into (vr0->equiv, vr1->equiv);
6319   else if (vr1->equiv && !vr0->equiv)
6320     {
6321       /* All equivalence bitmaps are allocated from the same obstack.  So
6322 	 we can use the obstack associated with VR to allocate vr0->equiv.  */
6323       vr0->equiv = BITMAP_ALLOC (vr1->equiv->obstack);
6324       bitmap_copy (vr0->equiv, vr1->equiv);
6325     }
6326 }
6327 
6328 void
6329 vrp_intersect_ranges (value_range *vr0, value_range *vr1)
6330 {
6331   if (dump_file && (dump_flags & TDF_DETAILS))
6332     {
6333       fprintf (dump_file, "Intersecting\n  ");
6334       dump_value_range (dump_file, vr0);
6335       fprintf (dump_file, "\nand\n  ");
6336       dump_value_range (dump_file, vr1);
6337       fprintf (dump_file, "\n");
6338     }
6339   vrp_intersect_ranges_1 (vr0, vr1);
6340   if (dump_file && (dump_flags & TDF_DETAILS))
6341     {
6342       fprintf (dump_file, "to\n  ");
6343       dump_value_range (dump_file, vr0);
6344       fprintf (dump_file, "\n");
6345     }
6346 }
6347 
6348 /* Meet operation for value ranges.  Given two value ranges VR0 and
6349    VR1, store in VR0 a range that contains both VR0 and VR1.  This
6350    may not be the smallest possible such range.  */
6351 
6352 static void
6353 vrp_meet_1 (value_range *vr0, const value_range *vr1)
6354 {
6355   value_range saved;
6356 
6357   if (vr0->type == VR_UNDEFINED)
6358     {
6359       set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
6360       return;
6361     }
6362 
6363   if (vr1->type == VR_UNDEFINED)
6364     {
6365       /* VR0 already has the resulting range.  */
6366       return;
6367     }
6368 
6369   if (vr0->type == VR_VARYING)
6370     {
6371       /* Nothing to do.  VR0 already has the resulting range.  */
6372       return;
6373     }
6374 
6375   if (vr1->type == VR_VARYING)
6376     {
6377       set_value_range_to_varying (vr0);
6378       return;
6379     }
6380 
6381   saved = *vr0;
6382   union_ranges (&vr0->type, &vr0->min, &vr0->max,
6383 		vr1->type, vr1->min, vr1->max);
6384   if (vr0->type == VR_VARYING)
6385     {
6386       /* Failed to find an efficient meet.  Before giving up and setting
6387 	 the result to VARYING, see if we can at least derive a useful
6388 	 anti-range.  FIXME, all this nonsense about distinguishing
6389 	 anti-ranges from ranges is necessary because of the odd
6390 	 semantics of range_includes_zero_p and friends.  */
6391       if (((saved.type == VR_RANGE
6392 	    && range_includes_zero_p (saved.min, saved.max) == 0)
6393 	   || (saved.type == VR_ANTI_RANGE
6394 	       && range_includes_zero_p (saved.min, saved.max) == 1))
6395 	  && ((vr1->type == VR_RANGE
6396 	       && range_includes_zero_p (vr1->min, vr1->max) == 0)
6397 	      || (vr1->type == VR_ANTI_RANGE
6398 		  && range_includes_zero_p (vr1->min, vr1->max) == 1)))
6399 	{
6400 	  set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
6401 
6402 	  /* Since this meet operation did not result from the meeting of
6403 	     two equivalent names, VR0 cannot have any equivalences.  */
6404 	  if (vr0->equiv)
6405 	    bitmap_clear (vr0->equiv);
6406 	  return;
6407 	}
6408 
6409       set_value_range_to_varying (vr0);
6410       return;
6411     }
6412   set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
6413 				    vr0->equiv);
6414   if (vr0->type == VR_VARYING)
6415     return;
6416 
6417   /* The resulting set of equivalences is always the intersection of
6418      the two sets.  */
6419   if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6420     bitmap_and_into (vr0->equiv, vr1->equiv);
6421   else if (vr0->equiv && !vr1->equiv)
6422     bitmap_clear (vr0->equiv);
6423 }
6424 
6425 void
6426 vrp_meet (value_range *vr0, const value_range *vr1)
6427 {
6428   if (dump_file && (dump_flags & TDF_DETAILS))
6429     {
6430       fprintf (dump_file, "Meeting\n  ");
6431       dump_value_range (dump_file, vr0);
6432       fprintf (dump_file, "\nand\n  ");
6433       dump_value_range (dump_file, vr1);
6434       fprintf (dump_file, "\n");
6435     }
6436   vrp_meet_1 (vr0, vr1);
6437   if (dump_file && (dump_flags & TDF_DETAILS))
6438     {
6439       fprintf (dump_file, "to\n  ");
6440       dump_value_range (dump_file, vr0);
6441       fprintf (dump_file, "\n");
6442     }
6443 }
6444 
6445 
6446 /* Visit all arguments for PHI node PHI that flow through executable
6447    edges.  If a valid value range can be derived from all the incoming
6448    value ranges, set a new range for the LHS of PHI.  */
6449 
6450 enum ssa_prop_result
6451 vrp_prop::visit_phi (gphi *phi)
6452 {
6453   tree lhs = PHI_RESULT (phi);
6454   value_range vr_result = VR_INITIALIZER;
6455   extract_range_from_phi_node (phi, &vr_result);
6456   if (update_value_range (lhs, &vr_result))
6457     {
6458       if (dump_file && (dump_flags & TDF_DETAILS))
6459 	{
6460 	  fprintf (dump_file, "Found new range for ");
6461 	  print_generic_expr (dump_file, lhs);
6462 	  fprintf (dump_file, ": ");
6463 	  dump_value_range (dump_file, &vr_result);
6464 	  fprintf (dump_file, "\n");
6465 	}
6466 
6467       if (vr_result.type == VR_VARYING)
6468 	return SSA_PROP_VARYING;
6469 
6470       return SSA_PROP_INTERESTING;
6471     }
6472 
6473   /* Nothing changed, don't add outgoing edges.  */
6474   return SSA_PROP_NOT_INTERESTING;
6475 }
6476 
6477 class vrp_folder : public substitute_and_fold_engine
6478 {
6479  public:
6480   tree get_value (tree) FINAL OVERRIDE;
6481   bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
6482   bool fold_predicate_in (gimple_stmt_iterator *);
6483 
6484   class vr_values *vr_values;
6485 
6486   /* Delegators.  */
6487   tree vrp_evaluate_conditional (tree_code code, tree op0,
6488 				 tree op1, gimple *stmt)
6489     { return vr_values->vrp_evaluate_conditional (code, op0, op1, stmt); }
6490   bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
6491     { return vr_values->simplify_stmt_using_ranges (gsi); }
6492  tree op_with_constant_singleton_value_range (tree op)
6493     { return vr_values->op_with_constant_singleton_value_range (op); }
6494 };
6495 
6496 /* If the statement pointed by SI has a predicate whose value can be
6497    computed using the value range information computed by VRP, compute
6498    its value and return true.  Otherwise, return false.  */
6499 
6500 bool
6501 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
6502 {
6503   bool assignment_p = false;
6504   tree val;
6505   gimple *stmt = gsi_stmt (*si);
6506 
6507   if (is_gimple_assign (stmt)
6508       && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
6509     {
6510       assignment_p = true;
6511       val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
6512 				      gimple_assign_rhs1 (stmt),
6513 				      gimple_assign_rhs2 (stmt),
6514 				      stmt);
6515     }
6516   else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6517     val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6518 				    gimple_cond_lhs (cond_stmt),
6519 				    gimple_cond_rhs (cond_stmt),
6520 				    stmt);
6521   else
6522     return false;
6523 
6524   if (val)
6525     {
6526       if (assignment_p)
6527         val = fold_convert (gimple_expr_type (stmt), val);
6528 
6529       if (dump_file)
6530 	{
6531 	  fprintf (dump_file, "Folding predicate ");
6532 	  print_gimple_expr (dump_file, stmt, 0);
6533 	  fprintf (dump_file, " to ");
6534 	  print_generic_expr (dump_file, val);
6535 	  fprintf (dump_file, "\n");
6536 	}
6537 
6538       if (is_gimple_assign (stmt))
6539 	gimple_assign_set_rhs_from_tree (si, val);
6540       else
6541 	{
6542 	  gcc_assert (gimple_code (stmt) == GIMPLE_COND);
6543 	  gcond *cond_stmt = as_a <gcond *> (stmt);
6544 	  if (integer_zerop (val))
6545 	    gimple_cond_make_false (cond_stmt);
6546 	  else if (integer_onep (val))
6547 	    gimple_cond_make_true (cond_stmt);
6548 	  else
6549 	    gcc_unreachable ();
6550 	}
6551 
6552       return true;
6553     }
6554 
6555   return false;
6556 }
6557 
6558 /* Callback for substitute_and_fold folding the stmt at *SI.  */
6559 
6560 bool
6561 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
6562 {
6563   if (fold_predicate_in (si))
6564     return true;
6565 
6566   return simplify_stmt_using_ranges (si);
6567 }
6568 
6569 /* If OP has a value range with a single constant value return that,
6570    otherwise return NULL_TREE.  This returns OP itself if OP is a
6571    constant.
6572 
6573    Implemented as a pure wrapper right now, but this will change.  */
6574 
6575 tree
6576 vrp_folder::get_value (tree op)
6577 {
6578   return op_with_constant_singleton_value_range (op);
6579 }
6580 
6581 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
6582    argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
6583    BB.  If no such ASSERT_EXPR is found, return OP.  */
6584 
6585 static tree
6586 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
6587 {
6588   imm_use_iterator imm_iter;
6589   gimple *use_stmt;
6590   use_operand_p use_p;
6591 
6592   if (TREE_CODE (op) == SSA_NAME)
6593     {
6594       FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
6595 	{
6596 	  use_stmt = USE_STMT (use_p);
6597 	  if (use_stmt != stmt
6598 	      && gimple_assign_single_p (use_stmt)
6599 	      && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
6600 	      && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
6601 	      && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
6602 	    return gimple_assign_lhs (use_stmt);
6603 	}
6604     }
6605   return op;
6606 }
6607 
6608 /* A hack.  */
6609 static class vr_values *x_vr_values;
6610 
6611 /* A trivial wrapper so that we can present the generic jump threading
6612    code with a simple API for simplifying statements.  STMT is the
6613    statement we want to simplify, WITHIN_STMT provides the location
6614    for any overflow warnings.  */
6615 
6616 static tree
6617 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
6618     class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED,
6619     basic_block bb)
6620 {
6621   /* First see if the conditional is in the hash table.  */
6622   tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
6623   if (cached_lhs && is_gimple_min_invariant (cached_lhs))
6624     return cached_lhs;
6625 
6626   vr_values *vr_values = x_vr_values;
6627   if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6628     {
6629       tree op0 = gimple_cond_lhs (cond_stmt);
6630       op0 = lhs_of_dominating_assert (op0, bb, stmt);
6631 
6632       tree op1 = gimple_cond_rhs (cond_stmt);
6633       op1 = lhs_of_dominating_assert (op1, bb, stmt);
6634 
6635       return vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6636 						  op0, op1, within_stmt);
6637     }
6638 
6639   /* We simplify a switch statement by trying to determine which case label
6640      will be taken.  If we are successful then we return the corresponding
6641      CASE_LABEL_EXPR.  */
6642   if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
6643     {
6644       tree op = gimple_switch_index (switch_stmt);
6645       if (TREE_CODE (op) != SSA_NAME)
6646 	return NULL_TREE;
6647 
6648       op = lhs_of_dominating_assert (op, bb, stmt);
6649 
6650       value_range *vr = vr_values->get_value_range (op);
6651       if ((vr->type != VR_RANGE && vr->type != VR_ANTI_RANGE)
6652 	  || symbolic_range_p (vr))
6653 	return NULL_TREE;
6654 
6655       if (vr->type == VR_RANGE)
6656 	{
6657 	  size_t i, j;
6658 	  /* Get the range of labels that contain a part of the operand's
6659 	     value range.  */
6660 	  find_case_label_range (switch_stmt, vr->min, vr->max, &i, &j);
6661 
6662 	  /* Is there only one such label?  */
6663 	  if (i == j)
6664 	    {
6665 	      tree label = gimple_switch_label (switch_stmt, i);
6666 
6667 	      /* The i'th label will be taken only if the value range of the
6668 		 operand is entirely within the bounds of this label.  */
6669 	      if (CASE_HIGH (label) != NULL_TREE
6670 		  ? (tree_int_cst_compare (CASE_LOW (label), vr->min) <= 0
6671 		     && tree_int_cst_compare (CASE_HIGH (label), vr->max) >= 0)
6672 		  : (tree_int_cst_equal (CASE_LOW (label), vr->min)
6673 		     && tree_int_cst_equal (vr->min, vr->max)))
6674 		return label;
6675 	    }
6676 
6677 	  /* If there are no such labels then the default label will be
6678 	     taken.  */
6679 	  if (i > j)
6680 	    return gimple_switch_label (switch_stmt, 0);
6681 	}
6682 
6683       if (vr->type == VR_ANTI_RANGE)
6684 	{
6685 	  unsigned n = gimple_switch_num_labels (switch_stmt);
6686 	  tree min_label = gimple_switch_label (switch_stmt, 1);
6687 	  tree max_label = gimple_switch_label (switch_stmt, n - 1);
6688 
6689 	  /* The default label will be taken only if the anti-range of the
6690 	     operand is entirely outside the bounds of all the (non-default)
6691 	     case labels.  */
6692 	  if (tree_int_cst_compare (vr->min, CASE_LOW (min_label)) <= 0
6693 	      && (CASE_HIGH (max_label) != NULL_TREE
6694 		  ? tree_int_cst_compare (vr->max, CASE_HIGH (max_label)) >= 0
6695 		  : tree_int_cst_compare (vr->max, CASE_LOW (max_label)) >= 0))
6696 	  return gimple_switch_label (switch_stmt, 0);
6697 	}
6698 
6699       return NULL_TREE;
6700     }
6701 
6702   if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
6703     {
6704       tree lhs = gimple_assign_lhs (assign_stmt);
6705       if (TREE_CODE (lhs) == SSA_NAME
6706 	  && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6707 	      || POINTER_TYPE_P (TREE_TYPE (lhs)))
6708 	  && stmt_interesting_for_vrp (stmt))
6709 	{
6710 	  edge dummy_e;
6711 	  tree dummy_tree;
6712 	  value_range new_vr = VR_INITIALIZER;
6713 	  vr_values->extract_range_from_stmt (stmt, &dummy_e,
6714 					      &dummy_tree, &new_vr);
6715 	  if (range_int_cst_singleton_p (&new_vr))
6716 	    return new_vr.min;
6717 	}
6718     }
6719 
6720   return NULL_TREE;
6721 }
6722 
6723 class vrp_dom_walker : public dom_walker
6724 {
6725 public:
6726   vrp_dom_walker (cdi_direction direction,
6727 		  class const_and_copies *const_and_copies,
6728 		  class avail_exprs_stack *avail_exprs_stack)
6729     : dom_walker (direction, REACHABLE_BLOCKS),
6730       m_const_and_copies (const_and_copies),
6731       m_avail_exprs_stack (avail_exprs_stack),
6732       m_dummy_cond (NULL) {}
6733 
6734   virtual edge before_dom_children (basic_block);
6735   virtual void after_dom_children (basic_block);
6736 
6737   class vr_values *vr_values;
6738 
6739 private:
6740   class const_and_copies *m_const_and_copies;
6741   class avail_exprs_stack *m_avail_exprs_stack;
6742 
6743   gcond *m_dummy_cond;
6744 
6745 };
6746 
6747 /* Called before processing dominator children of BB.  We want to look
6748    at ASSERT_EXPRs and record information from them in the appropriate
6749    tables.
6750 
6751    We could look at other statements here.  It's not seen as likely
6752    to significantly increase the jump threads we discover.  */
6753 
6754 edge
6755 vrp_dom_walker::before_dom_children (basic_block bb)
6756 {
6757   gimple_stmt_iterator gsi;
6758 
6759   m_avail_exprs_stack->push_marker ();
6760   m_const_and_copies->push_marker ();
6761   for (gsi = gsi_start_nondebug_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
6762     {
6763       gimple *stmt = gsi_stmt (gsi);
6764       if (gimple_assign_single_p (stmt)
6765          && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
6766 	{
6767 	  tree rhs1 = gimple_assign_rhs1 (stmt);
6768 	  tree cond = TREE_OPERAND (rhs1, 1);
6769 	  tree inverted = invert_truthvalue (cond);
6770 	  vec<cond_equivalence> p;
6771 	  p.create (3);
6772 	  record_conditions (&p, cond, inverted);
6773 	  for (unsigned int i = 0; i < p.length (); i++)
6774 	    m_avail_exprs_stack->record_cond (&p[i]);
6775 
6776 	  tree lhs = gimple_assign_lhs (stmt);
6777 	  m_const_and_copies->record_const_or_copy (lhs,
6778 						    TREE_OPERAND (rhs1, 0));
6779 	  p.release ();
6780 	  continue;
6781 	}
6782       break;
6783     }
6784   return NULL;
6785 }
6786 
6787 /* Called after processing dominator children of BB.  This is where we
6788    actually call into the threader.  */
6789 void
6790 vrp_dom_walker::after_dom_children (basic_block bb)
6791 {
6792   if (!m_dummy_cond)
6793     m_dummy_cond = gimple_build_cond (NE_EXPR,
6794 				      integer_zero_node, integer_zero_node,
6795 				      NULL, NULL);
6796 
6797   x_vr_values = vr_values;
6798   thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
6799 			 m_avail_exprs_stack, NULL,
6800 			 simplify_stmt_for_jump_threading);
6801   x_vr_values = NULL;
6802 
6803   m_avail_exprs_stack->pop_to_marker ();
6804   m_const_and_copies->pop_to_marker ();
6805 }
6806 
6807 /* Blocks which have more than one predecessor and more than
6808    one successor present jump threading opportunities, i.e.,
6809    when the block is reached from a specific predecessor, we
6810    may be able to determine which of the outgoing edges will
6811    be traversed.  When this optimization applies, we are able
6812    to avoid conditionals at runtime and we may expose secondary
6813    optimization opportunities.
6814 
6815    This routine is effectively a driver for the generic jump
6816    threading code.  It basically just presents the generic code
6817    with edges that may be suitable for jump threading.
6818 
6819    Unlike DOM, we do not iterate VRP if jump threading was successful.
6820    While iterating may expose new opportunities for VRP, it is expected
6821    those opportunities would be very limited and the compile time cost
6822    to expose those opportunities would be significant.
6823 
6824    As jump threading opportunities are discovered, they are registered
6825    for later realization.  */
6826 
6827 static void
6828 identify_jump_threads (class vr_values *vr_values)
6829 {
6830   int i;
6831   edge e;
6832 
6833   /* Ugh.  When substituting values earlier in this pass we can
6834      wipe the dominance information.  So rebuild the dominator
6835      information as we need it within the jump threading code.  */
6836   calculate_dominance_info (CDI_DOMINATORS);
6837 
6838   /* We do not allow VRP information to be used for jump threading
6839      across a back edge in the CFG.  Otherwise it becomes too
6840      difficult to avoid eliminating loop exit tests.  Of course
6841      EDGE_DFS_BACK is not accurate at this time so we have to
6842      recompute it.  */
6843   mark_dfs_back_edges ();
6844 
6845   /* Do not thread across edges we are about to remove.  Just marking
6846      them as EDGE_IGNORE will do.  */
6847   FOR_EACH_VEC_ELT (to_remove_edges, i, e)
6848     e->flags |= EDGE_IGNORE;
6849 
6850   /* Allocate our unwinder stack to unwind any temporary equivalences
6851      that might be recorded.  */
6852   const_and_copies *equiv_stack = new const_and_copies ();
6853 
6854   hash_table<expr_elt_hasher> *avail_exprs
6855     = new hash_table<expr_elt_hasher> (1024);
6856   avail_exprs_stack *avail_exprs_stack
6857     = new class avail_exprs_stack (avail_exprs);
6858 
6859   vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
6860   walker.vr_values = vr_values;
6861   walker.walk (cfun->cfg->x_entry_block_ptr);
6862 
6863   /* Clear EDGE_IGNORE.  */
6864   FOR_EACH_VEC_ELT (to_remove_edges, i, e)
6865     e->flags &= ~EDGE_IGNORE;
6866 
6867   /* We do not actually update the CFG or SSA graphs at this point as
6868      ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
6869      handle ASSERT_EXPRs gracefully.  */
6870   delete equiv_stack;
6871   delete avail_exprs;
6872   delete avail_exprs_stack;
6873 }
6874 
6875 /* Traverse all the blocks folding conditionals with known ranges.  */
6876 
6877 void
6878 vrp_prop::vrp_finalize (bool warn_array_bounds_p)
6879 {
6880   size_t i;
6881 
6882   /* We have completed propagating through the lattice.  */
6883   vr_values.set_lattice_propagation_complete ();
6884 
6885   if (dump_file)
6886     {
6887       fprintf (dump_file, "\nValue ranges after VRP:\n\n");
6888       vr_values.dump_all_value_ranges (dump_file);
6889       fprintf (dump_file, "\n");
6890     }
6891 
6892   /* Set value range to non pointer SSA_NAMEs.  */
6893   for (i = 0; i < num_ssa_names; i++)
6894     {
6895       tree name = ssa_name (i);
6896       if (!name)
6897 	continue;
6898 
6899       value_range *vr = get_value_range (name);
6900       if (!name
6901 	  || (vr->type == VR_VARYING)
6902 	  || (vr->type == VR_UNDEFINED)
6903 	  || (TREE_CODE (vr->min) != INTEGER_CST)
6904 	  || (TREE_CODE (vr->max) != INTEGER_CST))
6905 	continue;
6906 
6907       if (POINTER_TYPE_P (TREE_TYPE (name))
6908 	  && ((vr->type == VR_RANGE
6909 	       && range_includes_zero_p (vr->min, vr->max) == 0)
6910 	      || (vr->type == VR_ANTI_RANGE
6911 		  && range_includes_zero_p (vr->min, vr->max) == 1)))
6912 	set_ptr_nonnull (name);
6913       else if (!POINTER_TYPE_P (TREE_TYPE (name)))
6914 	set_range_info (name, vr->type,
6915 			wi::to_wide (vr->min),
6916 			wi::to_wide (vr->max));
6917     }
6918 
6919   /* If we're checking array refs, we want to merge information on
6920      the executability of each edge between vrp_folder and the
6921      check_array_bounds_dom_walker: each can clear the
6922      EDGE_EXECUTABLE flag on edges, in different ways.
6923 
6924      Hence, if we're going to call check_all_array_refs, set
6925      the flag on every edge now, rather than in
6926      check_array_bounds_dom_walker's ctor; vrp_folder may clear
6927      it from some edges.  */
6928   if (warn_array_bounds && warn_array_bounds_p)
6929     set_all_edges_as_executable (cfun);
6930 
6931   class vrp_folder vrp_folder;
6932   vrp_folder.vr_values = &vr_values;
6933   vrp_folder.substitute_and_fold ();
6934 
6935   if (warn_array_bounds && warn_array_bounds_p)
6936     check_all_array_refs ();
6937 }
6938 
6939 /* Main entry point to VRP (Value Range Propagation).  This pass is
6940    loosely based on J. R. C. Patterson, ``Accurate Static Branch
6941    Prediction by Value Range Propagation,'' in SIGPLAN Conference on
6942    Programming Language Design and Implementation, pp. 67-78, 1995.
6943    Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
6944 
6945    This is essentially an SSA-CCP pass modified to deal with ranges
6946    instead of constants.
6947 
6948    While propagating ranges, we may find that two or more SSA name
6949    have equivalent, though distinct ranges.  For instance,
6950 
6951      1	x_9 = p_3->a;
6952      2	p_4 = ASSERT_EXPR <p_3, p_3 != 0>
6953      3	if (p_4 == q_2)
6954      4	  p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
6955      5	endif
6956      6	if (q_2)
6957 
6958    In the code above, pointer p_5 has range [q_2, q_2], but from the
6959    code we can also determine that p_5 cannot be NULL and, if q_2 had
6960    a non-varying range, p_5's range should also be compatible with it.
6961 
6962    These equivalences are created by two expressions: ASSERT_EXPR and
6963    copy operations.  Since p_5 is an assertion on p_4, and p_4 was the
6964    result of another assertion, then we can use the fact that p_5 and
6965    p_4 are equivalent when evaluating p_5's range.
6966 
6967    Together with value ranges, we also propagate these equivalences
6968    between names so that we can take advantage of information from
6969    multiple ranges when doing final replacement.  Note that this
6970    equivalency relation is transitive but not symmetric.
6971 
6972    In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
6973    cannot assert that q_2 is equivalent to p_5 because q_2 may be used
6974    in contexts where that assertion does not hold (e.g., in line 6).
6975 
6976    TODO, the main difference between this pass and Patterson's is that
6977    we do not propagate edge probabilities.  We only compute whether
6978    edges can be taken or not.  That is, instead of having a spectrum
6979    of jump probabilities between 0 and 1, we only deal with 0, 1 and
6980    DON'T KNOW.  In the future, it may be worthwhile to propagate
6981    probabilities to aid branch prediction.  */
6982 
6983 static unsigned int
6984 execute_vrp (bool warn_array_bounds_p)
6985 {
6986   int i;
6987   edge e;
6988   switch_update *su;
6989 
6990   loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
6991   rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
6992   scev_initialize ();
6993 
6994   /* ???  This ends up using stale EDGE_DFS_BACK for liveness computation.
6995      Inserting assertions may split edges which will invalidate
6996      EDGE_DFS_BACK.  */
6997   insert_range_assertions ();
6998 
6999   to_remove_edges.create (10);
7000   to_update_switch_stmts.create (5);
7001   threadedge_initialize_values ();
7002 
7003   /* For visiting PHI nodes we need EDGE_DFS_BACK computed.  */
7004   mark_dfs_back_edges ();
7005 
7006   class vrp_prop vrp_prop;
7007   vrp_prop.vrp_initialize ();
7008   vrp_prop.ssa_propagate ();
7009   vrp_prop.vrp_finalize (warn_array_bounds_p);
7010 
7011   /* We must identify jump threading opportunities before we release
7012      the datastructures built by VRP.  */
7013   identify_jump_threads (&vrp_prop.vr_values);
7014 
7015   /* A comparison of an SSA_NAME against a constant where the SSA_NAME
7016      was set by a type conversion can often be rewritten to use the
7017      RHS of the type conversion.
7018 
7019      However, doing so inhibits jump threading through the comparison.
7020      So that transformation is not performed until after jump threading
7021      is complete.  */
7022   basic_block bb;
7023   FOR_EACH_BB_FN (bb, cfun)
7024     {
7025       gimple *last = last_stmt (bb);
7026       if (last && gimple_code (last) == GIMPLE_COND)
7027 	vrp_prop.vr_values.simplify_cond_using_ranges_2 (as_a <gcond *> (last));
7028     }
7029 
7030   free_numbers_of_iterations_estimates (cfun);
7031 
7032   /* ASSERT_EXPRs must be removed before finalizing jump threads
7033      as finalizing jump threads calls the CFG cleanup code which
7034      does not properly handle ASSERT_EXPRs.  */
7035   remove_range_assertions ();
7036 
7037   /* If we exposed any new variables, go ahead and put them into
7038      SSA form now, before we handle jump threading.  This simplifies
7039      interactions between rewriting of _DECL nodes into SSA form
7040      and rewriting SSA_NAME nodes into SSA form after block
7041      duplication and CFG manipulation.  */
7042   update_ssa (TODO_update_ssa);
7043 
7044   /* We identified all the jump threading opportunities earlier, but could
7045      not transform the CFG at that time.  This routine transforms the
7046      CFG and arranges for the dominator tree to be rebuilt if necessary.
7047 
7048      Note the SSA graph update will occur during the normal TODO
7049      processing by the pass manager.  */
7050   thread_through_all_blocks (false);
7051 
7052   /* Remove dead edges from SWITCH_EXPR optimization.  This leaves the
7053      CFG in a broken state and requires a cfg_cleanup run.  */
7054   FOR_EACH_VEC_ELT (to_remove_edges, i, e)
7055     remove_edge (e);
7056   /* Update SWITCH_EXPR case label vector.  */
7057   FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su)
7058     {
7059       size_t j;
7060       size_t n = TREE_VEC_LENGTH (su->vec);
7061       tree label;
7062       gimple_switch_set_num_labels (su->stmt, n);
7063       for (j = 0; j < n; j++)
7064 	gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
7065       /* As we may have replaced the default label with a regular one
7066 	 make sure to make it a real default label again.  This ensures
7067 	 optimal expansion.  */
7068       label = gimple_switch_label (su->stmt, 0);
7069       CASE_LOW (label) = NULL_TREE;
7070       CASE_HIGH (label) = NULL_TREE;
7071     }
7072 
7073   if (to_remove_edges.length () > 0)
7074     {
7075       free_dominance_info (CDI_DOMINATORS);
7076       loops_state_set (LOOPS_NEED_FIXUP);
7077     }
7078 
7079   to_remove_edges.release ();
7080   to_update_switch_stmts.release ();
7081   threadedge_finalize_values ();
7082 
7083   scev_finalize ();
7084   loop_optimizer_finalize ();
7085   return 0;
7086 }
7087 
7088 namespace {
7089 
7090 const pass_data pass_data_vrp =
7091 {
7092   GIMPLE_PASS, /* type */
7093   "vrp", /* name */
7094   OPTGROUP_NONE, /* optinfo_flags */
7095   TV_TREE_VRP, /* tv_id */
7096   PROP_ssa, /* properties_required */
7097   0, /* properties_provided */
7098   0, /* properties_destroyed */
7099   0, /* todo_flags_start */
7100   ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
7101 };
7102 
7103 class pass_vrp : public gimple_opt_pass
7104 {
7105 public:
7106   pass_vrp (gcc::context *ctxt)
7107     : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
7108   {}
7109 
7110   /* opt_pass methods: */
7111   opt_pass * clone () { return new pass_vrp (m_ctxt); }
7112   void set_pass_param (unsigned int n, bool param)
7113     {
7114       gcc_assert (n == 0);
7115       warn_array_bounds_p = param;
7116     }
7117   virtual bool gate (function *) { return flag_tree_vrp != 0; }
7118   virtual unsigned int execute (function *)
7119     { return execute_vrp (warn_array_bounds_p); }
7120 
7121  private:
7122   bool warn_array_bounds_p;
7123 }; // class pass_vrp
7124 
7125 } // anon namespace
7126 
7127 gimple_opt_pass *
7128 make_pass_vrp (gcc::context *ctxt)
7129 {
7130   return new pass_vrp (ctxt);
7131 }
7132