xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-vrp.c (revision 946379e7b37692fc43f68eb0d1c10daa0a7f3b6c)
1 /* Support routines for Value Range Propagation (VRP).
2    Copyright (C) 2005-2013 Free Software Foundation, Inc.
3    Contributed by Diego Novillo <dnovillo@redhat.com>.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11 
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 GNU General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "ggc.h"
26 #include "flags.h"
27 #include "tree.h"
28 #include "basic-block.h"
29 #include "tree-flow.h"
30 #include "tree-pass.h"
31 #include "tree-dump.h"
32 #include "gimple-pretty-print.h"
33 #include "diagnostic-core.h"
34 #include "intl.h"
35 #include "cfgloop.h"
36 #include "tree-scalar-evolution.h"
37 #include "tree-ssa-propagate.h"
38 #include "tree-chrec.h"
39 #include "gimple-fold.h"
40 #include "expr.h"
41 #include "optabs.h"
42 
43 
44 /* Type of value ranges.  See value_range_d for a description of these
45    types.  */
46 enum value_range_type { VR_UNDEFINED, VR_RANGE, VR_ANTI_RANGE, VR_VARYING };
47 
48 /* Range of values that can be associated with an SSA_NAME after VRP
49    has executed.  */
50 struct value_range_d
51 {
52   /* Lattice value represented by this range.  */
53   enum value_range_type type;
54 
55   /* Minimum and maximum values represented by this range.  These
56      values should be interpreted as follows:
57 
58 	- If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must
59 	  be NULL.
60 
61 	- If TYPE == VR_RANGE then MIN holds the minimum value and
62 	  MAX holds the maximum value of the range [MIN, MAX].
63 
64 	- If TYPE == ANTI_RANGE the variable is known to NOT
65 	  take any values in the range [MIN, MAX].  */
66   tree min;
67   tree max;
68 
69   /* Set of SSA names whose value ranges are equivalent to this one.
70      This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE.  */
71   bitmap equiv;
72 };
73 
74 typedef struct value_range_d value_range_t;
75 
76 #define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }
77 
78 /* Set of SSA names found live during the RPO traversal of the function
79    for still active basic-blocks.  */
80 static sbitmap *live;
81 
82 /* Return true if the SSA name NAME is live on the edge E.  */
83 
84 static bool
85 live_on_edge (edge e, tree name)
86 {
87   return (live[e->dest->index]
88 	  && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
89 }
90 
91 /* Local functions.  */
92 static int compare_values (tree val1, tree val2);
93 static int compare_values_warnv (tree val1, tree val2, bool *);
94 static void vrp_meet (value_range_t *, value_range_t *);
95 static void vrp_intersect_ranges (value_range_t *, value_range_t *);
96 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
97 						     tree, tree, bool, bool *,
98 						     bool *);
99 
100 /* Location information for ASSERT_EXPRs.  Each instance of this
101    structure describes an ASSERT_EXPR for an SSA name.  Since a single
102    SSA name may have more than one assertion associated with it, these
103    locations are kept in a linked list attached to the corresponding
104    SSA name.  */
105 struct assert_locus_d
106 {
107   /* Basic block where the assertion would be inserted.  */
108   basic_block bb;
109 
110   /* Some assertions need to be inserted on an edge (e.g., assertions
111      generated by COND_EXPRs).  In those cases, BB will be NULL.  */
112   edge e;
113 
114   /* Pointer to the statement that generated this assertion.  */
115   gimple_stmt_iterator si;
116 
117   /* Predicate code for the ASSERT_EXPR.  Must be COMPARISON_CLASS_P.  */
118   enum tree_code comp_code;
119 
120   /* Value being compared against.  */
121   tree val;
122 
123   /* Expression to compare.  */
124   tree expr;
125 
126   /* Next node in the linked list.  */
127   struct assert_locus_d *next;
128 };
129 
130 typedef struct assert_locus_d *assert_locus_t;
131 
132 /* If bit I is present, it means that SSA name N_i has a list of
133    assertions that should be inserted in the IL.  */
134 static bitmap need_assert_for;
135 
136 /* Array of locations lists where to insert assertions.  ASSERTS_FOR[I]
137    holds a list of ASSERT_LOCUS_T nodes that describe where
138    ASSERT_EXPRs for SSA name N_I should be inserted.  */
139 static assert_locus_t *asserts_for;
140 
141 /* Value range array.  After propagation, VR_VALUE[I] holds the range
142    of values that SSA name N_I may take.  */
143 static unsigned num_vr_values;
144 static value_range_t **vr_value;
145 static bool values_propagated;
146 
147 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
148    number of executable edges we saw the last time we visited the
149    node.  */
150 static int *vr_phi_edge_counts;
151 
152 typedef struct {
153   gimple stmt;
154   tree vec;
155 } switch_update;
156 
157 static vec<edge> to_remove_edges;
158 static vec<switch_update> to_update_switch_stmts;
159 
160 
161 /* Return the maximum value for TYPE.  */
162 
163 static inline tree
164 vrp_val_max (const_tree type)
165 {
166   if (!INTEGRAL_TYPE_P (type))
167     return NULL_TREE;
168 
169   return TYPE_MAX_VALUE (type);
170 }
171 
172 /* Return the minimum value for TYPE.  */
173 
174 static inline tree
175 vrp_val_min (const_tree type)
176 {
177   if (!INTEGRAL_TYPE_P (type))
178     return NULL_TREE;
179 
180   return TYPE_MIN_VALUE (type);
181 }
182 
183 /* Return whether VAL is equal to the maximum value of its type.  This
184    will be true for a positive overflow infinity.  We can't do a
185    simple equality comparison with TYPE_MAX_VALUE because C typedefs
186    and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
187    to the integer constant with the same value in the type.  */
188 
189 static inline bool
190 vrp_val_is_max (const_tree val)
191 {
192   tree type_max = vrp_val_max (TREE_TYPE (val));
193   return (val == type_max
194 	  || (type_max != NULL_TREE
195 	      && operand_equal_p (val, type_max, 0)));
196 }
197 
198 /* Return whether VAL is equal to the minimum value of its type.  This
199    will be true for a negative overflow infinity.  */
200 
201 static inline bool
202 vrp_val_is_min (const_tree val)
203 {
204   tree type_min = vrp_val_min (TREE_TYPE (val));
205   return (val == type_min
206 	  || (type_min != NULL_TREE
207 	      && operand_equal_p (val, type_min, 0)));
208 }
209 
210 
211 /* Return whether TYPE should use an overflow infinity distinct from
212    TYPE_{MIN,MAX}_VALUE.  We use an overflow infinity value to
213    represent a signed overflow during VRP computations.  An infinity
214    is distinct from a half-range, which will go from some number to
215    TYPE_{MIN,MAX}_VALUE.  */
216 
217 static inline bool
218 needs_overflow_infinity (const_tree type)
219 {
220   return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
221 }
222 
223 /* Return whether TYPE can support our overflow infinity
224    representation: we use the TREE_OVERFLOW flag, which only exists
225    for constants.  If TYPE doesn't support this, we don't optimize
226    cases which would require signed overflow--we drop them to
227    VARYING.  */
228 
229 static inline bool
230 supports_overflow_infinity (const_tree type)
231 {
232   tree min = vrp_val_min (type), max = vrp_val_max (type);
233 #ifdef ENABLE_CHECKING
234   gcc_assert (needs_overflow_infinity (type));
235 #endif
236   return (min != NULL_TREE
237 	  && CONSTANT_CLASS_P (min)
238 	  && max != NULL_TREE
239 	  && CONSTANT_CLASS_P (max));
240 }
241 
242 /* VAL is the maximum or minimum value of a type.  Return a
243    corresponding overflow infinity.  */
244 
245 static inline tree
246 make_overflow_infinity (tree val)
247 {
248   gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
249   val = copy_node (val);
250   TREE_OVERFLOW (val) = 1;
251   return val;
252 }
253 
254 /* Return a negative overflow infinity for TYPE.  */
255 
256 static inline tree
257 negative_overflow_infinity (tree type)
258 {
259   gcc_checking_assert (supports_overflow_infinity (type));
260   return make_overflow_infinity (vrp_val_min (type));
261 }
262 
263 /* Return a positive overflow infinity for TYPE.  */
264 
265 static inline tree
266 positive_overflow_infinity (tree type)
267 {
268   gcc_checking_assert (supports_overflow_infinity (type));
269   return make_overflow_infinity (vrp_val_max (type));
270 }
271 
272 /* Return whether VAL is a negative overflow infinity.  */
273 
274 static inline bool
275 is_negative_overflow_infinity (const_tree val)
276 {
277   return (needs_overflow_infinity (TREE_TYPE (val))
278 	  && CONSTANT_CLASS_P (val)
279 	  && TREE_OVERFLOW (val)
280 	  && vrp_val_is_min (val));
281 }
282 
283 /* Return whether VAL is a positive overflow infinity.  */
284 
285 static inline bool
286 is_positive_overflow_infinity (const_tree val)
287 {
288   return (needs_overflow_infinity (TREE_TYPE (val))
289 	  && CONSTANT_CLASS_P (val)
290 	  && TREE_OVERFLOW (val)
291 	  && vrp_val_is_max (val));
292 }
293 
294 /* Return whether VAL is a positive or negative overflow infinity.  */
295 
296 static inline bool
297 is_overflow_infinity (const_tree val)
298 {
299   return (needs_overflow_infinity (TREE_TYPE (val))
300 	  && CONSTANT_CLASS_P (val)
301 	  && TREE_OVERFLOW (val)
302 	  && (vrp_val_is_min (val) || vrp_val_is_max (val)));
303 }
304 
305 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
306 
307 static inline bool
308 stmt_overflow_infinity (gimple stmt)
309 {
310   if (is_gimple_assign (stmt)
311       && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
312       GIMPLE_SINGLE_RHS)
313     return is_overflow_infinity (gimple_assign_rhs1 (stmt));
314   return false;
315 }
316 
317 /* If VAL is now an overflow infinity, return VAL.  Otherwise, return
318    the same value with TREE_OVERFLOW clear.  This can be used to avoid
319    confusing a regular value with an overflow value.  */
320 
321 static inline tree
322 avoid_overflow_infinity (tree val)
323 {
324   if (!is_overflow_infinity (val))
325     return val;
326 
327   if (vrp_val_is_max (val))
328     return vrp_val_max (TREE_TYPE (val));
329   else
330     {
331       gcc_checking_assert (vrp_val_is_min (val));
332       return vrp_val_min (TREE_TYPE (val));
333     }
334 }
335 
336 
337 /* Return true if ARG is marked with the nonnull attribute in the
338    current function signature.  */
339 
340 static bool
341 nonnull_arg_p (const_tree arg)
342 {
343   tree t, attrs, fntype;
344   unsigned HOST_WIDE_INT arg_num;
345 
346   gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg)));
347 
348   /* The static chain decl is always non null.  */
349   if (arg == cfun->static_chain_decl)
350     return true;
351 
352   fntype = TREE_TYPE (current_function_decl);
353   for (attrs = TYPE_ATTRIBUTES (fntype); attrs; attrs = TREE_CHAIN (attrs))
354     {
355       attrs = lookup_attribute ("nonnull", attrs);
356 
357       /* If "nonnull" wasn't specified, we know nothing about the argument.  */
358       if (attrs == NULL_TREE)
359 	return false;
360 
361       /* If "nonnull" applies to all the arguments, then ARG is non-null.  */
362       if (TREE_VALUE (attrs) == NULL_TREE)
363 	return true;
364 
365       /* Get the position number for ARG in the function signature.  */
366       for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl);
367 	   t;
368 	   t = DECL_CHAIN (t), arg_num++)
369 	{
370 	  if (t == arg)
371 	    break;
372 	}
373 
374       gcc_assert (t == arg);
375 
376       /* Now see if ARG_NUM is mentioned in the nonnull list.  */
377       for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t))
378 	{
379 	  if (compare_tree_int (TREE_VALUE (t), arg_num) == 0)
380 	    return true;
381 	}
382     }
383 
384   return false;
385 }
386 
387 
388 /* Set value range VR to VR_UNDEFINED.  */
389 
390 static inline void
391 set_value_range_to_undefined (value_range_t *vr)
392 {
393   vr->type = VR_UNDEFINED;
394   vr->min = vr->max = NULL_TREE;
395   if (vr->equiv)
396     bitmap_clear (vr->equiv);
397 }
398 
399 
400 /* Set value range VR to VR_VARYING.  */
401 
402 static inline void
403 set_value_range_to_varying (value_range_t *vr)
404 {
405   vr->type = VR_VARYING;
406   vr->min = vr->max = NULL_TREE;
407   if (vr->equiv)
408     bitmap_clear (vr->equiv);
409 }
410 
411 
412 /* Set value range VR to {T, MIN, MAX, EQUIV}.  */
413 
414 static void
415 set_value_range (value_range_t *vr, enum value_range_type t, tree min,
416 		 tree max, bitmap equiv)
417 {
418 #if defined ENABLE_CHECKING
419   /* Check the validity of the range.  */
420   if (t == VR_RANGE || t == VR_ANTI_RANGE)
421     {
422       int cmp;
423 
424       gcc_assert (min && max);
425 
426       if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
427 	gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
428 
429       cmp = compare_values (min, max);
430       gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
431 
432       if (needs_overflow_infinity (TREE_TYPE (min)))
433 	gcc_assert (!is_overflow_infinity (min)
434 		    || !is_overflow_infinity (max));
435     }
436 
437   if (t == VR_UNDEFINED || t == VR_VARYING)
438     gcc_assert (min == NULL_TREE && max == NULL_TREE);
439 
440   if (t == VR_UNDEFINED || t == VR_VARYING)
441     gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
442 #endif
443 
444   vr->type = t;
445   vr->min = min;
446   vr->max = max;
447 
448   /* Since updating the equivalence set involves deep copying the
449      bitmaps, only do it if absolutely necessary.  */
450   if (vr->equiv == NULL
451       && equiv != NULL)
452     vr->equiv = BITMAP_ALLOC (NULL);
453 
454   if (equiv != vr->equiv)
455     {
456       if (equiv && !bitmap_empty_p (equiv))
457 	bitmap_copy (vr->equiv, equiv);
458       else
459 	bitmap_clear (vr->equiv);
460     }
461 }
462 
463 
464 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
465    This means adjusting T, MIN and MAX representing the case of a
466    wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
467    as anti-rage ~[MAX+1, MIN-1].  Likewise for wrapping anti-ranges.
468    In corner cases where MAX+1 or MIN-1 wraps this will fall back
469    to varying.
470    This routine exists to ease canonicalization in the case where we
471    extract ranges from var + CST op limit.  */
472 
473 static void
474 set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t,
475 				  tree min, tree max, bitmap equiv)
476 {
477   /* Use the canonical setters for VR_UNDEFINED and VR_VARYING.  */
478   if (t == VR_UNDEFINED)
479     {
480       set_value_range_to_undefined (vr);
481       return;
482     }
483   else if (t == VR_VARYING)
484     {
485       set_value_range_to_varying (vr);
486       return;
487     }
488 
489   /* Nothing to canonicalize for symbolic ranges.  */
490   if (TREE_CODE (min) != INTEGER_CST
491       || TREE_CODE (max) != INTEGER_CST)
492     {
493       set_value_range (vr, t, min, max, equiv);
494       return;
495     }
496 
497   /* Wrong order for min and max, to swap them and the VR type we need
498      to adjust them.  */
499   if (tree_int_cst_lt (max, min))
500     {
501       tree one, tmp;
502 
503       /* For one bit precision if max < min, then the swapped
504 	 range covers all values, so for VR_RANGE it is varying and
505 	 for VR_ANTI_RANGE empty range, so drop to varying as well.  */
506       if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
507 	{
508 	  set_value_range_to_varying (vr);
509 	  return;
510 	}
511 
512       one = build_int_cst (TREE_TYPE (min), 1);
513       tmp = int_const_binop (PLUS_EXPR, max, one);
514       max = int_const_binop (MINUS_EXPR, min, one);
515       min = tmp;
516 
517       /* There's one corner case, if we had [C+1, C] before we now have
518 	 that again.  But this represents an empty value range, so drop
519 	 to varying in this case.  */
520       if (tree_int_cst_lt (max, min))
521 	{
522 	  set_value_range_to_varying (vr);
523 	  return;
524 	}
525 
526       t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
527     }
528 
529   /* Anti-ranges that can be represented as ranges should be so.  */
530   if (t == VR_ANTI_RANGE)
531     {
532       bool is_min = vrp_val_is_min (min);
533       bool is_max = vrp_val_is_max (max);
534 
535       if (is_min && is_max)
536 	{
537 	  /* We cannot deal with empty ranges, drop to varying.
538 	     ???  This could be VR_UNDEFINED instead.  */
539 	  set_value_range_to_varying (vr);
540 	  return;
541 	}
542       else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
543 	       && (is_min || is_max))
544 	{
545 	  /* Non-empty boolean ranges can always be represented
546 	     as a singleton range.  */
547 	  if (is_min)
548 	    min = max = vrp_val_max (TREE_TYPE (min));
549 	  else
550 	    min = max = vrp_val_min (TREE_TYPE (min));
551 	  t = VR_RANGE;
552 	}
553       else if (is_min
554 	       /* As a special exception preserve non-null ranges.  */
555 	       && !(TYPE_UNSIGNED (TREE_TYPE (min))
556 		    && integer_zerop (max)))
557         {
558 	  tree one = build_int_cst (TREE_TYPE (max), 1);
559 	  min = int_const_binop (PLUS_EXPR, max, one);
560 	  max = vrp_val_max (TREE_TYPE (max));
561 	  t = VR_RANGE;
562         }
563       else if (is_max)
564         {
565 	  tree one = build_int_cst (TREE_TYPE (min), 1);
566 	  max = int_const_binop (MINUS_EXPR, min, one);
567 	  min = vrp_val_min (TREE_TYPE (min));
568 	  t = VR_RANGE;
569         }
570     }
571 
572   /* Drop [-INF(OVF), +INF(OVF)] to varying.  */
573   if (needs_overflow_infinity (TREE_TYPE (min))
574       && is_overflow_infinity (min)
575       && is_overflow_infinity (max))
576     {
577       set_value_range_to_varying (vr);
578       return;
579     }
580 
581   set_value_range (vr, t, min, max, equiv);
582 }
583 
584 /* Copy value range FROM into value range TO.  */
585 
586 static inline void
587 copy_value_range (value_range_t *to, value_range_t *from)
588 {
589   set_value_range (to, from->type, from->min, from->max, from->equiv);
590 }
591 
592 /* Set value range VR to a single value.  This function is only called
593    with values we get from statements, and exists to clear the
594    TREE_OVERFLOW flag so that we don't think we have an overflow
595    infinity when we shouldn't.  */
596 
597 static inline void
598 set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv)
599 {
600   gcc_assert (is_gimple_min_invariant (val));
601   val = avoid_overflow_infinity (val);
602   set_value_range (vr, VR_RANGE, val, val, equiv);
603 }
604 
605 /* Set value range VR to a non-negative range of type TYPE.
606    OVERFLOW_INFINITY indicates whether to use an overflow infinity
607    rather than TYPE_MAX_VALUE; this should be true if we determine
608    that the range is nonnegative based on the assumption that signed
609    overflow does not occur.  */
610 
611 static inline void
612 set_value_range_to_nonnegative (value_range_t *vr, tree type,
613 				bool overflow_infinity)
614 {
615   tree zero;
616 
617   if (overflow_infinity && !supports_overflow_infinity (type))
618     {
619       set_value_range_to_varying (vr);
620       return;
621     }
622 
623   zero = build_int_cst (type, 0);
624   set_value_range (vr, VR_RANGE, zero,
625 		   (overflow_infinity
626 		    ? positive_overflow_infinity (type)
627 		    : TYPE_MAX_VALUE (type)),
628 		   vr->equiv);
629 }
630 
631 /* Set value range VR to a non-NULL range of type TYPE.  */
632 
633 static inline void
634 set_value_range_to_nonnull (value_range_t *vr, tree type)
635 {
636   tree zero = build_int_cst (type, 0);
637   set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
638 }
639 
640 
641 /* Set value range VR to a NULL range of type TYPE.  */
642 
643 static inline void
644 set_value_range_to_null (value_range_t *vr, tree type)
645 {
646   set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
647 }
648 
649 
650 /* Set value range VR to a range of a truthvalue of type TYPE.  */
651 
652 static inline void
653 set_value_range_to_truthvalue (value_range_t *vr, tree type)
654 {
655   if (TYPE_PRECISION (type) == 1)
656     set_value_range_to_varying (vr);
657   else
658     set_value_range (vr, VR_RANGE,
659 		     build_int_cst (type, 0), build_int_cst (type, 1),
660 		     vr->equiv);
661 }
662 
663 
664 /* If abs (min) < abs (max), set VR to [-max, max], if
665    abs (min) >= abs (max), set VR to [-min, min].  */
666 
667 static void
668 abs_extent_range (value_range_t *vr, tree min, tree max)
669 {
670   int cmp;
671 
672   gcc_assert (TREE_CODE (min) == INTEGER_CST);
673   gcc_assert (TREE_CODE (max) == INTEGER_CST);
674   gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
675   gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
676   min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
677   max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
678   if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
679     {
680       set_value_range_to_varying (vr);
681       return;
682     }
683   cmp = compare_values (min, max);
684   if (cmp == -1)
685     min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
686   else if (cmp == 0 || cmp == 1)
687     {
688       max = min;
689       min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
690     }
691   else
692     {
693       set_value_range_to_varying (vr);
694       return;
695     }
696   set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
697 }
698 
699 
700 /* Return value range information for VAR.
701 
702    If we have no values ranges recorded (ie, VRP is not running), then
703    return NULL.  Otherwise create an empty range if none existed for VAR.  */
704 
705 static value_range_t *
706 get_value_range (const_tree var)
707 {
708   static const struct value_range_d vr_const_varying
709     = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
710   value_range_t *vr;
711   tree sym;
712   unsigned ver = SSA_NAME_VERSION (var);
713 
714   /* If we have no recorded ranges, then return NULL.  */
715   if (! vr_value)
716     return NULL;
717 
718   /* If we query the range for a new SSA name return an unmodifiable VARYING.
719      We should get here at most from the substitute-and-fold stage which
720      will never try to change values.  */
721   if (ver >= num_vr_values)
722     return CONST_CAST (value_range_t *, &vr_const_varying);
723 
724   vr = vr_value[ver];
725   if (vr)
726     return vr;
727 
728   /* After propagation finished do not allocate new value-ranges.  */
729   if (values_propagated)
730     return CONST_CAST (value_range_t *, &vr_const_varying);
731 
732   /* Create a default value range.  */
733   vr_value[ver] = vr = XCNEW (value_range_t);
734 
735   /* Defer allocating the equivalence set.  */
736   vr->equiv = NULL;
737 
738   /* If VAR is a default definition of a parameter, the variable can
739      take any value in VAR's type.  */
740   if (SSA_NAME_IS_DEFAULT_DEF (var))
741     {
742       sym = SSA_NAME_VAR (var);
743       if (TREE_CODE (sym) == PARM_DECL)
744 	{
745 	  /* Try to use the "nonnull" attribute to create ~[0, 0]
746 	     anti-ranges for pointers.  Note that this is only valid with
747 	     default definitions of PARM_DECLs.  */
748 	  if (POINTER_TYPE_P (TREE_TYPE (sym))
749 	      && nonnull_arg_p (sym))
750 	    set_value_range_to_nonnull (vr, TREE_TYPE (sym));
751 	  else
752 	    set_value_range_to_varying (vr);
753 	}
754       else if (TREE_CODE (sym) == RESULT_DECL
755 	       && DECL_BY_REFERENCE (sym))
756 	set_value_range_to_nonnull (vr, TREE_TYPE (sym));
757     }
758 
759   return vr;
760 }
761 
762 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes.  */
763 
764 static inline bool
765 vrp_operand_equal_p (const_tree val1, const_tree val2)
766 {
767   if (val1 == val2)
768     return true;
769   if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
770     return false;
771   if (is_overflow_infinity (val1))
772     return is_overflow_infinity (val2);
773   return true;
774 }
775 
776 /* Return true, if the bitmaps B1 and B2 are equal.  */
777 
778 static inline bool
779 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
780 {
781   return (b1 == b2
782 	  || ((!b1 || bitmap_empty_p (b1))
783 	      && (!b2 || bitmap_empty_p (b2)))
784 	  || (b1 && b2
785 	      && bitmap_equal_p (b1, b2)));
786 }
787 
788 /* Update the value range and equivalence set for variable VAR to
789    NEW_VR.  Return true if NEW_VR is different from VAR's previous
790    value.
791 
792    NOTE: This function assumes that NEW_VR is a temporary value range
793    object created for the sole purpose of updating VAR's range.  The
794    storage used by the equivalence set from NEW_VR will be freed by
795    this function.  Do not call update_value_range when NEW_VR
796    is the range object associated with another SSA name.  */
797 
798 static inline bool
799 update_value_range (const_tree var, value_range_t *new_vr)
800 {
801   value_range_t *old_vr;
802   bool is_new;
803 
804   /* Update the value range, if necessary.  */
805   old_vr = get_value_range (var);
806   is_new = old_vr->type != new_vr->type
807 	   || !vrp_operand_equal_p (old_vr->min, new_vr->min)
808 	   || !vrp_operand_equal_p (old_vr->max, new_vr->max)
809 	   || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
810 
811   if (is_new)
812     {
813       /* Do not allow transitions up the lattice.  The following
814          is slightly more awkward than just new_vr->type < old_vr->type
815 	 because VR_RANGE and VR_ANTI_RANGE need to be considered
816 	 the same.  We may not have is_new when transitioning to
817 	 UNDEFINED or from VARYING.  */
818       if (new_vr->type == VR_UNDEFINED
819 	  || old_vr->type == VR_VARYING)
820 	set_value_range_to_varying (old_vr);
821       else
822 	set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
823 			 new_vr->equiv);
824     }
825 
826   BITMAP_FREE (new_vr->equiv);
827 
828   return is_new;
829 }
830 
831 
832 /* Add VAR and VAR's equivalence set to EQUIV.  This is the central
833    point where equivalence processing can be turned on/off.  */
834 
835 static void
836 add_equivalence (bitmap *equiv, const_tree var)
837 {
838   unsigned ver = SSA_NAME_VERSION (var);
839   value_range_t *vr = vr_value[ver];
840 
841   if (*equiv == NULL)
842     *equiv = BITMAP_ALLOC (NULL);
843   bitmap_set_bit (*equiv, ver);
844   if (vr && vr->equiv)
845     bitmap_ior_into (*equiv, vr->equiv);
846 }
847 
848 
849 /* Return true if VR is ~[0, 0].  */
850 
851 static inline bool
852 range_is_nonnull (value_range_t *vr)
853 {
854   return vr->type == VR_ANTI_RANGE
855 	 && integer_zerop (vr->min)
856 	 && integer_zerop (vr->max);
857 }
858 
859 
860 /* Return true if VR is [0, 0].  */
861 
862 static inline bool
863 range_is_null (value_range_t *vr)
864 {
865   return vr->type == VR_RANGE
866 	 && integer_zerop (vr->min)
867 	 && integer_zerop (vr->max);
868 }
869 
870 /* Return true if max and min of VR are INTEGER_CST.  It's not necessary
871    a singleton.  */
872 
873 static inline bool
874 range_int_cst_p (value_range_t *vr)
875 {
876   return (vr->type == VR_RANGE
877 	  && TREE_CODE (vr->max) == INTEGER_CST
878 	  && TREE_CODE (vr->min) == INTEGER_CST);
879 }
880 
881 /* Return true if VR is a INTEGER_CST singleton.  */
882 
883 static inline bool
884 range_int_cst_singleton_p (value_range_t *vr)
885 {
886   return (range_int_cst_p (vr)
887 	  && !TREE_OVERFLOW (vr->min)
888 	  && !TREE_OVERFLOW (vr->max)
889 	  && tree_int_cst_equal (vr->min, vr->max));
890 }
891 
892 /* Return true if value range VR involves at least one symbol.  */
893 
894 static inline bool
895 symbolic_range_p (value_range_t *vr)
896 {
897   return (!is_gimple_min_invariant (vr->min)
898           || !is_gimple_min_invariant (vr->max));
899 }
900 
901 /* Return true if value range VR uses an overflow infinity.  */
902 
903 static inline bool
904 overflow_infinity_range_p (value_range_t *vr)
905 {
906   return (vr->type == VR_RANGE
907 	  && (is_overflow_infinity (vr->min)
908 	      || is_overflow_infinity (vr->max)));
909 }
910 
911 /* Return false if we can not make a valid comparison based on VR;
912    this will be the case if it uses an overflow infinity and overflow
913    is not undefined (i.e., -fno-strict-overflow is in effect).
914    Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
915    uses an overflow infinity.  */
916 
917 static bool
918 usable_range_p (value_range_t *vr, bool *strict_overflow_p)
919 {
920   gcc_assert (vr->type == VR_RANGE);
921   if (is_overflow_infinity (vr->min))
922     {
923       *strict_overflow_p = true;
924       if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
925 	return false;
926     }
927   if (is_overflow_infinity (vr->max))
928     {
929       *strict_overflow_p = true;
930       if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
931 	return false;
932     }
933   return true;
934 }
935 
936 
937 /* Return true if the result of assignment STMT is know to be non-negative.
938    If the return value is based on the assumption that signed overflow is
939    undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
940    *STRICT_OVERFLOW_P.*/
941 
942 static bool
943 gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
944 {
945   enum tree_code code = gimple_assign_rhs_code (stmt);
946   switch (get_gimple_rhs_class (code))
947     {
948     case GIMPLE_UNARY_RHS:
949       return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
950 					     gimple_expr_type (stmt),
951 					     gimple_assign_rhs1 (stmt),
952 					     strict_overflow_p);
953     case GIMPLE_BINARY_RHS:
954       return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
955 					      gimple_expr_type (stmt),
956 					      gimple_assign_rhs1 (stmt),
957 					      gimple_assign_rhs2 (stmt),
958 					      strict_overflow_p);
959     case GIMPLE_TERNARY_RHS:
960       return false;
961     case GIMPLE_SINGLE_RHS:
962       return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt),
963 					      strict_overflow_p);
964     case GIMPLE_INVALID_RHS:
965       gcc_unreachable ();
966     default:
967       gcc_unreachable ();
968     }
969 }
970 
971 /* Return true if return value of call STMT is know to be non-negative.
972    If the return value is based on the assumption that signed overflow is
973    undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
974    *STRICT_OVERFLOW_P.*/
975 
976 static bool
977 gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
978 {
979   tree arg0 = gimple_call_num_args (stmt) > 0 ?
980     gimple_call_arg (stmt, 0) : NULL_TREE;
981   tree arg1 = gimple_call_num_args (stmt) > 1 ?
982     gimple_call_arg (stmt, 1) : NULL_TREE;
983 
984   return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt),
985 					gimple_call_fndecl (stmt),
986 					arg0,
987 					arg1,
988 					strict_overflow_p);
989 }
990 
991 /* Return true if STMT is know to to compute a non-negative value.
992    If the return value is based on the assumption that signed overflow is
993    undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
994    *STRICT_OVERFLOW_P.*/
995 
996 static bool
997 gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
998 {
999   switch (gimple_code (stmt))
1000     {
1001     case GIMPLE_ASSIGN:
1002       return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p);
1003     case GIMPLE_CALL:
1004       return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p);
1005     default:
1006       gcc_unreachable ();
1007     }
1008 }
1009 
1010 /* Return true if the result of assignment STMT is know to be non-zero.
1011    If the return value is based on the assumption that signed overflow is
1012    undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1013    *STRICT_OVERFLOW_P.*/
1014 
1015 static bool
1016 gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
1017 {
1018   enum tree_code code = gimple_assign_rhs_code (stmt);
1019   switch (get_gimple_rhs_class (code))
1020     {
1021     case GIMPLE_UNARY_RHS:
1022       return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1023 					 gimple_expr_type (stmt),
1024 					 gimple_assign_rhs1 (stmt),
1025 					 strict_overflow_p);
1026     case GIMPLE_BINARY_RHS:
1027       return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1028 					  gimple_expr_type (stmt),
1029 					  gimple_assign_rhs1 (stmt),
1030 					  gimple_assign_rhs2 (stmt),
1031 					  strict_overflow_p);
1032     case GIMPLE_TERNARY_RHS:
1033       return false;
1034     case GIMPLE_SINGLE_RHS:
1035       return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
1036 					  strict_overflow_p);
1037     case GIMPLE_INVALID_RHS:
1038       gcc_unreachable ();
1039     default:
1040       gcc_unreachable ();
1041     }
1042 }
1043 
1044 /* Return true if STMT is know to to compute a non-zero value.
1045    If the return value is based on the assumption that signed overflow is
1046    undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1047    *STRICT_OVERFLOW_P.*/
1048 
1049 static bool
1050 gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
1051 {
1052   switch (gimple_code (stmt))
1053     {
1054     case GIMPLE_ASSIGN:
1055       return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
1056     case GIMPLE_CALL:
1057       return gimple_alloca_call_p (stmt);
1058     default:
1059       gcc_unreachable ();
1060     }
1061 }
1062 
1063 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
1064    obtained so far.  */
1065 
1066 static bool
1067 vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p)
1068 {
1069   if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
1070     return true;
1071 
1072   /* If we have an expression of the form &X->a, then the expression
1073      is nonnull if X is nonnull.  */
1074   if (is_gimple_assign (stmt)
1075       && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
1076     {
1077       tree expr = gimple_assign_rhs1 (stmt);
1078       tree base = get_base_address (TREE_OPERAND (expr, 0));
1079 
1080       if (base != NULL_TREE
1081 	  && TREE_CODE (base) == MEM_REF
1082 	  && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1083 	{
1084 	  value_range_t *vr = get_value_range (TREE_OPERAND (base, 0));
1085 	  if (range_is_nonnull (vr))
1086 	    return true;
1087 	}
1088     }
1089 
1090   return false;
1091 }
1092 
1093 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1094    a gimple invariant, or SSA_NAME +- CST.  */
1095 
1096 static bool
1097 valid_value_p (tree expr)
1098 {
1099   if (TREE_CODE (expr) == SSA_NAME)
1100     return true;
1101 
1102   if (TREE_CODE (expr) == PLUS_EXPR
1103       || TREE_CODE (expr) == MINUS_EXPR)
1104     return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1105 	    && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1106 
1107   return is_gimple_min_invariant (expr);
1108 }
1109 
1110 /* Return
1111    1 if VAL < VAL2
1112    0 if !(VAL < VAL2)
1113    -2 if those are incomparable.  */
1114 static inline int
1115 operand_less_p (tree val, tree val2)
1116 {
1117   /* LT is folded faster than GE and others.  Inline the common case.  */
1118   if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1119     {
1120       if (TYPE_UNSIGNED (TREE_TYPE (val)))
1121 	return INT_CST_LT_UNSIGNED (val, val2);
1122       else
1123 	{
1124 	  if (INT_CST_LT (val, val2))
1125 	    return 1;
1126 	}
1127     }
1128   else
1129     {
1130       tree tcmp;
1131 
1132       fold_defer_overflow_warnings ();
1133 
1134       tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1135 
1136       fold_undefer_and_ignore_overflow_warnings ();
1137 
1138       if (!tcmp
1139 	  || TREE_CODE (tcmp) != INTEGER_CST)
1140 	return -2;
1141 
1142       if (!integer_zerop (tcmp))
1143 	return 1;
1144     }
1145 
1146   /* val >= val2, not considering overflow infinity.  */
1147   if (is_negative_overflow_infinity (val))
1148     return is_negative_overflow_infinity (val2) ? 0 : 1;
1149   else if (is_positive_overflow_infinity (val2))
1150     return is_positive_overflow_infinity (val) ? 0 : 1;
1151 
1152   return 0;
1153 }
1154 
1155 /* Compare two values VAL1 and VAL2.  Return
1156 
1157    	-2 if VAL1 and VAL2 cannot be compared at compile-time,
1158    	-1 if VAL1 < VAL2,
1159    	 0 if VAL1 == VAL2,
1160 	+1 if VAL1 > VAL2, and
1161 	+2 if VAL1 != VAL2
1162 
1163    This is similar to tree_int_cst_compare but supports pointer values
1164    and values that cannot be compared at compile time.
1165 
1166    If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1167    true if the return value is only valid if we assume that signed
1168    overflow is undefined.  */
1169 
1170 static int
1171 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1172 {
1173   if (val1 == val2)
1174     return 0;
1175 
1176   /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1177      both integers.  */
1178   gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1179 	      == POINTER_TYPE_P (TREE_TYPE (val2)));
1180   /* Convert the two values into the same type.  This is needed because
1181      sizetype causes sign extension even for unsigned types.  */
1182   val2 = fold_convert (TREE_TYPE (val1), val2);
1183   STRIP_USELESS_TYPE_CONVERSION (val2);
1184 
1185   if ((TREE_CODE (val1) == SSA_NAME
1186        || TREE_CODE (val1) == PLUS_EXPR
1187        || TREE_CODE (val1) == MINUS_EXPR)
1188       && (TREE_CODE (val2) == SSA_NAME
1189 	  || TREE_CODE (val2) == PLUS_EXPR
1190 	  || TREE_CODE (val2) == MINUS_EXPR))
1191     {
1192       tree n1, c1, n2, c2;
1193       enum tree_code code1, code2;
1194 
1195       /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME',
1196 	 return -1 or +1 accordingly.  If VAL1 and VAL2 don't use the
1197 	 same name, return -2.  */
1198       if (TREE_CODE (val1) == SSA_NAME)
1199 	{
1200 	  code1 = SSA_NAME;
1201 	  n1 = val1;
1202 	  c1 = NULL_TREE;
1203 	}
1204       else
1205 	{
1206 	  code1 = TREE_CODE (val1);
1207 	  n1 = TREE_OPERAND (val1, 0);
1208 	  c1 = TREE_OPERAND (val1, 1);
1209 	  if (tree_int_cst_sgn (c1) == -1)
1210 	    {
1211 	      if (is_negative_overflow_infinity (c1))
1212 		return -2;
1213 	      c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1);
1214 	      if (!c1)
1215 		return -2;
1216 	      code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1217 	    }
1218 	}
1219 
1220       if (TREE_CODE (val2) == SSA_NAME)
1221 	{
1222 	  code2 = SSA_NAME;
1223 	  n2 = val2;
1224 	  c2 = NULL_TREE;
1225 	}
1226       else
1227 	{
1228 	  code2 = TREE_CODE (val2);
1229 	  n2 = TREE_OPERAND (val2, 0);
1230 	  c2 = TREE_OPERAND (val2, 1);
1231 	  if (tree_int_cst_sgn (c2) == -1)
1232 	    {
1233 	      if (is_negative_overflow_infinity (c2))
1234 		return -2;
1235 	      c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2);
1236 	      if (!c2)
1237 		return -2;
1238 	      code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1239 	    }
1240 	}
1241 
1242       /* Both values must use the same name.  */
1243       if (n1 != n2)
1244 	return -2;
1245 
1246       if (code1 == SSA_NAME
1247 	  && code2 == SSA_NAME)
1248 	/* NAME == NAME  */
1249 	return 0;
1250 
1251       /* If overflow is defined we cannot simplify more.  */
1252       if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1253 	return -2;
1254 
1255       if (strict_overflow_p != NULL
1256 	  && (code1 == SSA_NAME || !TREE_NO_WARNING (val1))
1257 	  && (code2 == SSA_NAME || !TREE_NO_WARNING (val2)))
1258 	*strict_overflow_p = true;
1259 
1260       if (code1 == SSA_NAME)
1261 	{
1262 	  if (code2 == PLUS_EXPR)
1263 	    /* NAME < NAME + CST  */
1264 	    return -1;
1265 	  else if (code2 == MINUS_EXPR)
1266 	    /* NAME > NAME - CST  */
1267 	    return 1;
1268 	}
1269       else if (code1 == PLUS_EXPR)
1270 	{
1271 	  if (code2 == SSA_NAME)
1272 	    /* NAME + CST > NAME  */
1273 	    return 1;
1274 	  else if (code2 == PLUS_EXPR)
1275 	    /* NAME + CST1 > NAME + CST2, if CST1 > CST2  */
1276 	    return compare_values_warnv (c1, c2, strict_overflow_p);
1277 	  else if (code2 == MINUS_EXPR)
1278 	    /* NAME + CST1 > NAME - CST2  */
1279 	    return 1;
1280 	}
1281       else if (code1 == MINUS_EXPR)
1282 	{
1283 	  if (code2 == SSA_NAME)
1284 	    /* NAME - CST < NAME  */
1285 	    return -1;
1286 	  else if (code2 == PLUS_EXPR)
1287 	    /* NAME - CST1 < NAME + CST2  */
1288 	    return -1;
1289 	  else if (code2 == MINUS_EXPR)
1290 	    /* NAME - CST1 > NAME - CST2, if CST1 < CST2.  Notice that
1291 	       C1 and C2 are swapped in the call to compare_values.  */
1292 	    return compare_values_warnv (c2, c1, strict_overflow_p);
1293 	}
1294 
1295       gcc_unreachable ();
1296     }
1297 
1298   /* We cannot compare non-constants.  */
1299   if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))
1300     return -2;
1301 
1302   if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1303     {
1304       /* We cannot compare overflowed values, except for overflow
1305 	 infinities.  */
1306       if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1307 	{
1308 	  if (strict_overflow_p != NULL)
1309 	    *strict_overflow_p = true;
1310 	  if (is_negative_overflow_infinity (val1))
1311 	    return is_negative_overflow_infinity (val2) ? 0 : -1;
1312 	  else if (is_negative_overflow_infinity (val2))
1313 	    return 1;
1314 	  else if (is_positive_overflow_infinity (val1))
1315 	    return is_positive_overflow_infinity (val2) ? 0 : 1;
1316 	  else if (is_positive_overflow_infinity (val2))
1317 	    return -1;
1318 	  return -2;
1319 	}
1320 
1321       return tree_int_cst_compare (val1, val2);
1322     }
1323   else
1324     {
1325       tree t;
1326 
1327       /* First see if VAL1 and VAL2 are not the same.  */
1328       if (val1 == val2 || operand_equal_p (val1, val2, 0))
1329 	return 0;
1330 
1331       /* If VAL1 is a lower address than VAL2, return -1.  */
1332       if (operand_less_p (val1, val2) == 1)
1333 	return -1;
1334 
1335       /* If VAL1 is a higher address than VAL2, return +1.  */
1336       if (operand_less_p (val2, val1) == 1)
1337 	return 1;
1338 
1339       /* If VAL1 is different than VAL2, return +2.
1340 	 For integer constants we either have already returned -1 or 1
1341 	 or they are equivalent.  We still might succeed in proving
1342 	 something about non-trivial operands.  */
1343       if (TREE_CODE (val1) != INTEGER_CST
1344 	  || TREE_CODE (val2) != INTEGER_CST)
1345 	{
1346           t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1347 	  if (t && integer_onep (t))
1348 	    return 2;
1349 	}
1350 
1351       return -2;
1352     }
1353 }
1354 
1355 /* Compare values like compare_values_warnv, but treat comparisons of
1356    nonconstants which rely on undefined overflow as incomparable.  */
1357 
1358 static int
1359 compare_values (tree val1, tree val2)
1360 {
1361   bool sop;
1362   int ret;
1363 
1364   sop = false;
1365   ret = compare_values_warnv (val1, val2, &sop);
1366   if (sop
1367       && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1368     ret = -2;
1369   return ret;
1370 }
1371 
1372 
1373 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
1374           0 if VAL is not inside [MIN, MAX],
1375 	 -2 if we cannot tell either way.
1376 
1377    Benchmark compile/20001226-1.c compilation time after changing this
1378    function.  */
1379 
1380 static inline int
1381 value_inside_range (tree val, tree min, tree max)
1382 {
1383   int cmp1, cmp2;
1384 
1385   cmp1 = operand_less_p (val, min);
1386   if (cmp1 == -2)
1387     return -2;
1388   if (cmp1 == 1)
1389     return 0;
1390 
1391   cmp2 = operand_less_p (max, val);
1392   if (cmp2 == -2)
1393     return -2;
1394 
1395   return !cmp2;
1396 }
1397 
1398 
1399 /* Return true if value ranges VR0 and VR1 have a non-empty
1400    intersection.
1401 
1402    Benchmark compile/20001226-1.c compilation time after changing this
1403    function.
1404    */
1405 
1406 static inline bool
1407 value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1)
1408 {
1409   /* The value ranges do not intersect if the maximum of the first range is
1410      less than the minimum of the second range or vice versa.
1411      When those relations are unknown, we can't do any better.  */
1412   if (operand_less_p (vr0->max, vr1->min) != 0)
1413     return false;
1414   if (operand_less_p (vr1->max, vr0->min) != 0)
1415     return false;
1416   return true;
1417 }
1418 
1419 
1420 /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not
1421    include the value zero, -2 if we cannot tell.  */
1422 
1423 static inline int
1424 range_includes_zero_p (tree min, tree max)
1425 {
1426   tree zero = build_int_cst (TREE_TYPE (min), 0);
1427   return value_inside_range (zero, min, max);
1428 }
1429 
1430 /* Return true if *VR is know to only contain nonnegative values.  */
1431 
1432 static inline bool
1433 value_range_nonnegative_p (value_range_t *vr)
1434 {
1435   /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1436      which would return a useful value should be encoded as a
1437      VR_RANGE.  */
1438   if (vr->type == VR_RANGE)
1439     {
1440       int result = compare_values (vr->min, integer_zero_node);
1441       return (result == 0 || result == 1);
1442     }
1443 
1444   return false;
1445 }
1446 
1447 /* Return true if T, an SSA_NAME, is known to be nonnegative.  Return
1448    false otherwise or if no value range information is available.  */
1449 
1450 bool
1451 ssa_name_nonnegative_p (const_tree t)
1452 {
1453   value_range_t *vr = get_value_range (t);
1454 
1455   if (INTEGRAL_TYPE_P (t)
1456       && TYPE_UNSIGNED (t))
1457     return true;
1458 
1459   if (!vr)
1460     return false;
1461 
1462   return value_range_nonnegative_p (vr);
1463 }
1464 
1465 /* If *VR has a value rante that is a single constant value return that,
1466    otherwise return NULL_TREE.  */
1467 
1468 static tree
1469 value_range_constant_singleton (value_range_t *vr)
1470 {
1471   if (vr->type == VR_RANGE
1472       && operand_equal_p (vr->min, vr->max, 0)
1473       && is_gimple_min_invariant (vr->min))
1474     return vr->min;
1475 
1476   return NULL_TREE;
1477 }
1478 
1479 /* If OP has a value range with a single constant value return that,
1480    otherwise return NULL_TREE.  This returns OP itself if OP is a
1481    constant.  */
1482 
1483 static tree
1484 op_with_constant_singleton_value_range (tree op)
1485 {
1486   if (is_gimple_min_invariant (op))
1487     return op;
1488 
1489   if (TREE_CODE (op) != SSA_NAME)
1490     return NULL_TREE;
1491 
1492   return value_range_constant_singleton (get_value_range (op));
1493 }
1494 
1495 /* Return true if op is in a boolean [0, 1] value-range.  */
1496 
1497 static bool
1498 op_with_boolean_value_range_p (tree op)
1499 {
1500   value_range_t *vr;
1501 
1502   if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
1503     return true;
1504 
1505   if (integer_zerop (op)
1506       || integer_onep (op))
1507     return true;
1508 
1509   if (TREE_CODE (op) != SSA_NAME)
1510     return false;
1511 
1512   vr = get_value_range (op);
1513   return (vr->type == VR_RANGE
1514 	  && integer_zerop (vr->min)
1515 	  && integer_onep (vr->max));
1516 }
1517 
1518 /* Extract value range information from an ASSERT_EXPR EXPR and store
1519    it in *VR_P.  */
1520 
1521 static void
1522 extract_range_from_assert (value_range_t *vr_p, tree expr)
1523 {
1524   tree var, cond, limit, min, max, type;
1525   value_range_t *limit_vr;
1526   enum tree_code cond_code;
1527 
1528   var = ASSERT_EXPR_VAR (expr);
1529   cond = ASSERT_EXPR_COND (expr);
1530 
1531   gcc_assert (COMPARISON_CLASS_P (cond));
1532 
1533   /* Find VAR in the ASSERT_EXPR conditional.  */
1534   if (var == TREE_OPERAND (cond, 0)
1535       || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1536       || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1537     {
1538       /* If the predicate is of the form VAR COMP LIMIT, then we just
1539 	 take LIMIT from the RHS and use the same comparison code.  */
1540       cond_code = TREE_CODE (cond);
1541       limit = TREE_OPERAND (cond, 1);
1542       cond = TREE_OPERAND (cond, 0);
1543     }
1544   else
1545     {
1546       /* If the predicate is of the form LIMIT COMP VAR, then we need
1547 	 to flip around the comparison code to create the proper range
1548 	 for VAR.  */
1549       cond_code = swap_tree_comparison (TREE_CODE (cond));
1550       limit = TREE_OPERAND (cond, 0);
1551       cond = TREE_OPERAND (cond, 1);
1552     }
1553 
1554   limit = avoid_overflow_infinity (limit);
1555 
1556   type = TREE_TYPE (var);
1557   gcc_assert (limit != var);
1558 
1559   /* For pointer arithmetic, we only keep track of pointer equality
1560      and inequality.  */
1561   if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1562     {
1563       set_value_range_to_varying (vr_p);
1564       return;
1565     }
1566 
1567   /* If LIMIT is another SSA name and LIMIT has a range of its own,
1568      try to use LIMIT's range to avoid creating symbolic ranges
1569      unnecessarily. */
1570   limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1571 
1572   /* LIMIT's range is only interesting if it has any useful information.  */
1573   if (limit_vr
1574       && (limit_vr->type == VR_UNDEFINED
1575 	  || limit_vr->type == VR_VARYING
1576 	  || symbolic_range_p (limit_vr)))
1577     limit_vr = NULL;
1578 
1579   /* Initially, the new range has the same set of equivalences of
1580      VAR's range.  This will be revised before returning the final
1581      value.  Since assertions may be chained via mutually exclusive
1582      predicates, we will need to trim the set of equivalences before
1583      we are done.  */
1584   gcc_assert (vr_p->equiv == NULL);
1585   add_equivalence (&vr_p->equiv, var);
1586 
1587   /* Extract a new range based on the asserted comparison for VAR and
1588      LIMIT's value range.  Notice that if LIMIT has an anti-range, we
1589      will only use it for equality comparisons (EQ_EXPR).  For any
1590      other kind of assertion, we cannot derive a range from LIMIT's
1591      anti-range that can be used to describe the new range.  For
1592      instance, ASSERT_EXPR <x_2, x_2 <= b_4>.  If b_4 is ~[2, 10],
1593      then b_4 takes on the ranges [-INF, 1] and [11, +INF].  There is
1594      no single range for x_2 that could describe LE_EXPR, so we might
1595      as well build the range [b_4, +INF] for it.
1596      One special case we handle is extracting a range from a
1597      range test encoded as (unsigned)var + CST <= limit.  */
1598   if (TREE_CODE (cond) == NOP_EXPR
1599       || TREE_CODE (cond) == PLUS_EXPR)
1600     {
1601       if (TREE_CODE (cond) == PLUS_EXPR)
1602         {
1603           min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)),
1604 			     TREE_OPERAND (cond, 1));
1605           max = int_const_binop (PLUS_EXPR, limit, min);
1606 	  cond = TREE_OPERAND (cond, 0);
1607 	}
1608       else
1609 	{
1610 	  min = build_int_cst (TREE_TYPE (var), 0);
1611 	  max = limit;
1612 	}
1613 
1614       /* Make sure to not set TREE_OVERFLOW on the final type
1615 	 conversion.  We are willingly interpreting large positive
1616 	 unsigned values as negative singed values here.  */
1617       min = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (min),
1618 				   0, false);
1619       max = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (max),
1620 				   0, false);
1621 
1622       /* We can transform a max, min range to an anti-range or
1623          vice-versa.  Use set_and_canonicalize_value_range which does
1624 	 this for us.  */
1625       if (cond_code == LE_EXPR)
1626         set_and_canonicalize_value_range (vr_p, VR_RANGE,
1627 					  min, max, vr_p->equiv);
1628       else if (cond_code == GT_EXPR)
1629         set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1630 					  min, max, vr_p->equiv);
1631       else
1632 	gcc_unreachable ();
1633     }
1634   else if (cond_code == EQ_EXPR)
1635     {
1636       enum value_range_type range_type;
1637 
1638       if (limit_vr)
1639 	{
1640 	  range_type = limit_vr->type;
1641 	  min = limit_vr->min;
1642 	  max = limit_vr->max;
1643 	}
1644       else
1645 	{
1646 	  range_type = VR_RANGE;
1647 	  min = limit;
1648 	  max = limit;
1649 	}
1650 
1651       set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1652 
1653       /* When asserting the equality VAR == LIMIT and LIMIT is another
1654 	 SSA name, the new range will also inherit the equivalence set
1655 	 from LIMIT.  */
1656       if (TREE_CODE (limit) == SSA_NAME)
1657 	add_equivalence (&vr_p->equiv, limit);
1658     }
1659   else if (cond_code == NE_EXPR)
1660     {
1661       /* As described above, when LIMIT's range is an anti-range and
1662 	 this assertion is an inequality (NE_EXPR), then we cannot
1663 	 derive anything from the anti-range.  For instance, if
1664 	 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1665 	 not imply that VAR's range is [0, 0].  So, in the case of
1666 	 anti-ranges, we just assert the inequality using LIMIT and
1667 	 not its anti-range.
1668 
1669 	 If LIMIT_VR is a range, we can only use it to build a new
1670 	 anti-range if LIMIT_VR is a single-valued range.  For
1671 	 instance, if LIMIT_VR is [0, 1], the predicate
1672 	 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1673 	 Rather, it means that for value 0 VAR should be ~[0, 0]
1674 	 and for value 1, VAR should be ~[1, 1].  We cannot
1675 	 represent these ranges.
1676 
1677 	 The only situation in which we can build a valid
1678 	 anti-range is when LIMIT_VR is a single-valued range
1679 	 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX).  In that case,
1680 	 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX].  */
1681       if (limit_vr
1682 	  && limit_vr->type == VR_RANGE
1683 	  && compare_values (limit_vr->min, limit_vr->max) == 0)
1684 	{
1685 	  min = limit_vr->min;
1686 	  max = limit_vr->max;
1687 	}
1688       else
1689 	{
1690 	  /* In any other case, we cannot use LIMIT's range to build a
1691 	     valid anti-range.  */
1692 	  min = max = limit;
1693 	}
1694 
1695       /* If MIN and MAX cover the whole range for their type, then
1696 	 just use the original LIMIT.  */
1697       if (INTEGRAL_TYPE_P (type)
1698 	  && vrp_val_is_min (min)
1699 	  && vrp_val_is_max (max))
1700 	min = max = limit;
1701 
1702       set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1703 					min, max, vr_p->equiv);
1704     }
1705   else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1706     {
1707       min = TYPE_MIN_VALUE (type);
1708 
1709       if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1710 	max = limit;
1711       else
1712 	{
1713 	  /* If LIMIT_VR is of the form [N1, N2], we need to build the
1714 	     range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1715 	     LT_EXPR.  */
1716 	  max = limit_vr->max;
1717 	}
1718 
1719       /* If the maximum value forces us to be out of bounds, simply punt.
1720 	 It would be pointless to try and do anything more since this
1721 	 all should be optimized away above us.  */
1722       if ((cond_code == LT_EXPR
1723 	   && compare_values (max, min) == 0)
1724 	  || (CONSTANT_CLASS_P (max) && TREE_OVERFLOW (max)))
1725 	set_value_range_to_varying (vr_p);
1726       else
1727 	{
1728 	  /* For LT_EXPR, we create the range [MIN, MAX - 1].  */
1729 	  if (cond_code == LT_EXPR)
1730 	    {
1731 	      if (TYPE_PRECISION (TREE_TYPE (max)) == 1
1732 		  && !TYPE_UNSIGNED (TREE_TYPE (max)))
1733 		max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
1734 				   build_int_cst (TREE_TYPE (max), -1));
1735 	      else
1736 		max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
1737 				   build_int_cst (TREE_TYPE (max), 1));
1738 	      if (EXPR_P (max))
1739 		TREE_NO_WARNING (max) = 1;
1740 	    }
1741 
1742 	  set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1743 	}
1744     }
1745   else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1746     {
1747       max = TYPE_MAX_VALUE (type);
1748 
1749       if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1750 	min = limit;
1751       else
1752 	{
1753 	  /* If LIMIT_VR is of the form [N1, N2], we need to build the
1754 	     range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1755 	     GT_EXPR.  */
1756 	  min = limit_vr->min;
1757 	}
1758 
1759       /* If the minimum value forces us to be out of bounds, simply punt.
1760 	 It would be pointless to try and do anything more since this
1761 	 all should be optimized away above us.  */
1762       if ((cond_code == GT_EXPR
1763 	   && compare_values (min, max) == 0)
1764 	  || (CONSTANT_CLASS_P (min) && TREE_OVERFLOW (min)))
1765 	set_value_range_to_varying (vr_p);
1766       else
1767 	{
1768 	  /* For GT_EXPR, we create the range [MIN + 1, MAX].  */
1769 	  if (cond_code == GT_EXPR)
1770 	    {
1771 	      if (TYPE_PRECISION (TREE_TYPE (min)) == 1
1772 		  && !TYPE_UNSIGNED (TREE_TYPE (min)))
1773 		min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
1774 				   build_int_cst (TREE_TYPE (min), -1));
1775 	      else
1776 		min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
1777 				   build_int_cst (TREE_TYPE (min), 1));
1778 	      if (EXPR_P (min))
1779 		TREE_NO_WARNING (min) = 1;
1780 	    }
1781 
1782 	  set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1783 	}
1784     }
1785   else
1786     gcc_unreachable ();
1787 
1788   /* Finally intersect the new range with what we already know about var.  */
1789   vrp_intersect_ranges (vr_p, get_value_range (var));
1790 }
1791 
1792 
1793 /* Extract range information from SSA name VAR and store it in VR.  If
1794    VAR has an interesting range, use it.  Otherwise, create the
1795    range [VAR, VAR] and return it.  This is useful in situations where
1796    we may have conditionals testing values of VARYING names.  For
1797    instance,
1798 
1799    	x_3 = y_5;
1800 	if (x_3 > y_5)
1801 	  ...
1802 
1803     Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1804     always false.  */
1805 
1806 static void
1807 extract_range_from_ssa_name (value_range_t *vr, tree var)
1808 {
1809   value_range_t *var_vr = get_value_range (var);
1810 
1811   if (var_vr->type != VR_UNDEFINED && var_vr->type != VR_VARYING)
1812     copy_value_range (vr, var_vr);
1813   else
1814     set_value_range (vr, VR_RANGE, var, var, NULL);
1815 
1816   add_equivalence (&vr->equiv, var);
1817 }
1818 
1819 
1820 /* Wrapper around int_const_binop.  If the operation overflows and we
1821    are not using wrapping arithmetic, then adjust the result to be
1822    -INF or +INF depending on CODE, VAL1 and VAL2.  This can return
1823    NULL_TREE if we need to use an overflow infinity representation but
1824    the type does not support it.  */
1825 
1826 static tree
1827 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
1828 {
1829   tree res;
1830 
1831   res = int_const_binop (code, val1, val2);
1832 
1833   /* If we are using unsigned arithmetic, operate symbolically
1834      on -INF and +INF as int_const_binop only handles signed overflow.  */
1835   if (TYPE_UNSIGNED (TREE_TYPE (val1)))
1836     {
1837       int checkz = compare_values (res, val1);
1838       bool overflow = false;
1839 
1840       /* Ensure that res = val1 [+*] val2 >= val1
1841          or that res = val1 - val2 <= val1.  */
1842       if ((code == PLUS_EXPR
1843 	   && !(checkz == 1 || checkz == 0))
1844           || (code == MINUS_EXPR
1845 	      && !(checkz == 0 || checkz == -1)))
1846 	{
1847 	  overflow = true;
1848 	}
1849       /* Checking for multiplication overflow is done by dividing the
1850 	 output of the multiplication by the first input of the
1851 	 multiplication.  If the result of that division operation is
1852 	 not equal to the second input of the multiplication, then the
1853 	 multiplication overflowed.  */
1854       else if (code == MULT_EXPR && !integer_zerop (val1))
1855 	{
1856 	  tree tmp = int_const_binop (TRUNC_DIV_EXPR,
1857 				      res,
1858 				      val1);
1859 	  int check = compare_values (tmp, val2);
1860 
1861 	  if (check != 0)
1862 	    overflow = true;
1863 	}
1864 
1865       if (overflow)
1866 	{
1867 	  res = copy_node (res);
1868 	  TREE_OVERFLOW (res) = 1;
1869 	}
1870 
1871     }
1872   else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
1873     /* If the singed operation wraps then int_const_binop has done
1874        everything we want.  */
1875     ;
1876   else if ((TREE_OVERFLOW (res)
1877 	    && !TREE_OVERFLOW (val1)
1878 	    && !TREE_OVERFLOW (val2))
1879 	   || is_overflow_infinity (val1)
1880 	   || is_overflow_infinity (val2))
1881     {
1882       /* If the operation overflowed but neither VAL1 nor VAL2 are
1883 	 overflown, return -INF or +INF depending on the operation
1884 	 and the combination of signs of the operands.  */
1885       int sgn1 = tree_int_cst_sgn (val1);
1886       int sgn2 = tree_int_cst_sgn (val2);
1887 
1888       if (needs_overflow_infinity (TREE_TYPE (res))
1889 	  && !supports_overflow_infinity (TREE_TYPE (res)))
1890 	return NULL_TREE;
1891 
1892       /* We have to punt on adding infinities of different signs,
1893 	 since we can't tell what the sign of the result should be.
1894 	 Likewise for subtracting infinities of the same sign.  */
1895       if (((code == PLUS_EXPR && sgn1 != sgn2)
1896 	   || (code == MINUS_EXPR && sgn1 == sgn2))
1897 	  && is_overflow_infinity (val1)
1898 	  && is_overflow_infinity (val2))
1899 	return NULL_TREE;
1900 
1901       /* Don't try to handle division or shifting of infinities.  */
1902       if ((code == TRUNC_DIV_EXPR
1903 	   || code == FLOOR_DIV_EXPR
1904 	   || code == CEIL_DIV_EXPR
1905 	   || code == EXACT_DIV_EXPR
1906 	   || code == ROUND_DIV_EXPR
1907 	   || code == RSHIFT_EXPR)
1908 	  && (is_overflow_infinity (val1)
1909 	      || is_overflow_infinity (val2)))
1910 	return NULL_TREE;
1911 
1912       /* Notice that we only need to handle the restricted set of
1913 	 operations handled by extract_range_from_binary_expr.
1914 	 Among them, only multiplication, addition and subtraction
1915 	 can yield overflow without overflown operands because we
1916 	 are working with integral types only... except in the
1917 	 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
1918 	 for division too.  */
1919 
1920       /* For multiplication, the sign of the overflow is given
1921 	 by the comparison of the signs of the operands.  */
1922       if ((code == MULT_EXPR && sgn1 == sgn2)
1923           /* For addition, the operands must be of the same sign
1924 	     to yield an overflow.  Its sign is therefore that
1925 	     of one of the operands, for example the first.  For
1926 	     infinite operands X + -INF is negative, not positive.  */
1927 	  || (code == PLUS_EXPR
1928 	      && (sgn1 >= 0
1929 		  ? !is_negative_overflow_infinity (val2)
1930 		  : is_positive_overflow_infinity (val2)))
1931 	  /* For subtraction, non-infinite operands must be of
1932 	     different signs to yield an overflow.  Its sign is
1933 	     therefore that of the first operand or the opposite of
1934 	     that of the second operand.  A first operand of 0 counts
1935 	     as positive here, for the corner case 0 - (-INF), which
1936 	     overflows, but must yield +INF.  For infinite operands 0
1937 	     - INF is negative, not positive.  */
1938 	  || (code == MINUS_EXPR
1939 	      && (sgn1 >= 0
1940 		  ? !is_positive_overflow_infinity (val2)
1941 		  : is_negative_overflow_infinity (val2)))
1942 	  /* We only get in here with positive shift count, so the
1943 	     overflow direction is the same as the sign of val1.
1944 	     Actually rshift does not overflow at all, but we only
1945 	     handle the case of shifting overflowed -INF and +INF.  */
1946 	  || (code == RSHIFT_EXPR
1947 	      && sgn1 >= 0)
1948 	  /* For division, the only case is -INF / -1 = +INF.  */
1949 	  || code == TRUNC_DIV_EXPR
1950 	  || code == FLOOR_DIV_EXPR
1951 	  || code == CEIL_DIV_EXPR
1952 	  || code == EXACT_DIV_EXPR
1953 	  || code == ROUND_DIV_EXPR)
1954 	return (needs_overflow_infinity (TREE_TYPE (res))
1955 		? positive_overflow_infinity (TREE_TYPE (res))
1956 		: TYPE_MAX_VALUE (TREE_TYPE (res)));
1957       else
1958 	return (needs_overflow_infinity (TREE_TYPE (res))
1959 		? negative_overflow_infinity (TREE_TYPE (res))
1960 		: TYPE_MIN_VALUE (TREE_TYPE (res)));
1961     }
1962 
1963   return res;
1964 }
1965 
1966 
1967 /* For range VR compute two double_int bitmasks.  In *MAY_BE_NONZERO
1968    bitmask if some bit is unset, it means for all numbers in the range
1969    the bit is 0, otherwise it might be 0 or 1.  In *MUST_BE_NONZERO
1970    bitmask if some bit is set, it means for all numbers in the range
1971    the bit is 1, otherwise it might be 0 or 1.  */
1972 
1973 static bool
1974 zero_nonzero_bits_from_vr (value_range_t *vr,
1975 			   double_int *may_be_nonzero,
1976 			   double_int *must_be_nonzero)
1977 {
1978   *may_be_nonzero = double_int_minus_one;
1979   *must_be_nonzero = double_int_zero;
1980   if (!range_int_cst_p (vr)
1981       || TREE_OVERFLOW (vr->min)
1982       || TREE_OVERFLOW (vr->max))
1983     return false;
1984 
1985   if (range_int_cst_singleton_p (vr))
1986     {
1987       *may_be_nonzero = tree_to_double_int (vr->min);
1988       *must_be_nonzero = *may_be_nonzero;
1989     }
1990   else if (tree_int_cst_sgn (vr->min) >= 0
1991 	   || tree_int_cst_sgn (vr->max) < 0)
1992     {
1993       double_int dmin = tree_to_double_int (vr->min);
1994       double_int dmax = tree_to_double_int (vr->max);
1995       double_int xor_mask = dmin ^ dmax;
1996       *may_be_nonzero = dmin | dmax;
1997       *must_be_nonzero = dmin & dmax;
1998       if (xor_mask.high != 0)
1999 	{
2000 	  unsigned HOST_WIDE_INT mask
2001 	      = ((unsigned HOST_WIDE_INT) 1
2002 		 << floor_log2 (xor_mask.high)) - 1;
2003 	  may_be_nonzero->low = ALL_ONES;
2004 	  may_be_nonzero->high |= mask;
2005 	  must_be_nonzero->low = 0;
2006 	  must_be_nonzero->high &= ~mask;
2007 	}
2008       else if (xor_mask.low != 0)
2009 	{
2010 	  unsigned HOST_WIDE_INT mask
2011 	      = ((unsigned HOST_WIDE_INT) 1
2012 		 << floor_log2 (xor_mask.low)) - 1;
2013 	  may_be_nonzero->low |= mask;
2014 	  must_be_nonzero->low &= ~mask;
2015 	}
2016     }
2017 
2018   return true;
2019 }
2020 
2021 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
2022    so that *VR0 U *VR1 == *AR.  Returns true if that is possible,
2023    false otherwise.  If *AR can be represented with a single range
2024    *VR1 will be VR_UNDEFINED.  */
2025 
2026 static bool
2027 ranges_from_anti_range (value_range_t *ar,
2028 			value_range_t *vr0, value_range_t *vr1)
2029 {
2030   tree type = TREE_TYPE (ar->min);
2031 
2032   vr0->type = VR_UNDEFINED;
2033   vr1->type = VR_UNDEFINED;
2034 
2035   if (ar->type != VR_ANTI_RANGE
2036       || TREE_CODE (ar->min) != INTEGER_CST
2037       || TREE_CODE (ar->max) != INTEGER_CST
2038       || !vrp_val_min (type)
2039       || !vrp_val_max (type))
2040     return false;
2041 
2042   if (!vrp_val_is_min (ar->min))
2043     {
2044       vr0->type = VR_RANGE;
2045       vr0->min = vrp_val_min (type);
2046       vr0->max
2047 	= double_int_to_tree (type,
2048 			      tree_to_double_int (ar->min) - double_int_one);
2049     }
2050   if (!vrp_val_is_max (ar->max))
2051     {
2052       vr1->type = VR_RANGE;
2053       vr1->min
2054 	= double_int_to_tree (type,
2055 			      tree_to_double_int (ar->max) + double_int_one);
2056       vr1->max = vrp_val_max (type);
2057     }
2058   if (vr0->type == VR_UNDEFINED)
2059     {
2060       *vr0 = *vr1;
2061       vr1->type = VR_UNDEFINED;
2062     }
2063 
2064   return vr0->type != VR_UNDEFINED;
2065 }
2066 
2067 /* Helper to extract a value-range *VR for a multiplicative operation
2068    *VR0 CODE *VR1.  */
2069 
2070 static void
2071 extract_range_from_multiplicative_op_1 (value_range_t *vr,
2072 					enum tree_code code,
2073 					value_range_t *vr0, value_range_t *vr1)
2074 {
2075   enum value_range_type type;
2076   tree val[4];
2077   size_t i;
2078   tree min, max;
2079   bool sop;
2080   int cmp;
2081 
2082   /* Multiplications, divisions and shifts are a bit tricky to handle,
2083      depending on the mix of signs we have in the two ranges, we
2084      need to operate on different values to get the minimum and
2085      maximum values for the new range.  One approach is to figure
2086      out all the variations of range combinations and do the
2087      operations.
2088 
2089      However, this involves several calls to compare_values and it
2090      is pretty convoluted.  It's simpler to do the 4 operations
2091      (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2092      MAX1) and then figure the smallest and largest values to form
2093      the new range.  */
2094   gcc_assert (code == MULT_EXPR
2095 	      || code == TRUNC_DIV_EXPR
2096 	      || code == FLOOR_DIV_EXPR
2097 	      || code == CEIL_DIV_EXPR
2098 	      || code == EXACT_DIV_EXPR
2099 	      || code == ROUND_DIV_EXPR
2100 	      || code == RSHIFT_EXPR
2101 	      || code == LSHIFT_EXPR);
2102   gcc_assert ((vr0->type == VR_RANGE
2103 	       || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE))
2104 	      && vr0->type == vr1->type);
2105 
2106   type = vr0->type;
2107 
2108   /* Compute the 4 cross operations.  */
2109   sop = false;
2110   val[0] = vrp_int_const_binop (code, vr0->min, vr1->min);
2111   if (val[0] == NULL_TREE)
2112     sop = true;
2113 
2114   if (vr1->max == vr1->min)
2115     val[1] = NULL_TREE;
2116   else
2117     {
2118       val[1] = vrp_int_const_binop (code, vr0->min, vr1->max);
2119       if (val[1] == NULL_TREE)
2120 	sop = true;
2121     }
2122 
2123   if (vr0->max == vr0->min)
2124     val[2] = NULL_TREE;
2125   else
2126     {
2127       val[2] = vrp_int_const_binop (code, vr0->max, vr1->min);
2128       if (val[2] == NULL_TREE)
2129 	sop = true;
2130     }
2131 
2132   if (vr0->min == vr0->max || vr1->min == vr1->max)
2133     val[3] = NULL_TREE;
2134   else
2135     {
2136       val[3] = vrp_int_const_binop (code, vr0->max, vr1->max);
2137       if (val[3] == NULL_TREE)
2138 	sop = true;
2139     }
2140 
2141   if (sop)
2142     {
2143       set_value_range_to_varying (vr);
2144       return;
2145     }
2146 
2147   /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2148      of VAL[i].  */
2149   min = val[0];
2150   max = val[0];
2151   for (i = 1; i < 4; i++)
2152     {
2153       if (!is_gimple_min_invariant (min)
2154 	  || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2155 	  || !is_gimple_min_invariant (max)
2156 	  || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2157 	break;
2158 
2159       if (val[i])
2160 	{
2161 	  if (!is_gimple_min_invariant (val[i])
2162 	      || (TREE_OVERFLOW (val[i])
2163 		  && !is_overflow_infinity (val[i])))
2164 	    {
2165 	      /* If we found an overflowed value, set MIN and MAX
2166 		 to it so that we set the resulting range to
2167 		 VARYING.  */
2168 	      min = max = val[i];
2169 	      break;
2170 	    }
2171 
2172 	  if (compare_values (val[i], min) == -1)
2173 	    min = val[i];
2174 
2175 	  if (compare_values (val[i], max) == 1)
2176 	    max = val[i];
2177 	}
2178     }
2179 
2180   /* If either MIN or MAX overflowed, then set the resulting range to
2181      VARYING.  But we do accept an overflow infinity
2182      representation.  */
2183   if (min == NULL_TREE
2184       || !is_gimple_min_invariant (min)
2185       || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2186       || max == NULL_TREE
2187       || !is_gimple_min_invariant (max)
2188       || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2189     {
2190       set_value_range_to_varying (vr);
2191       return;
2192     }
2193 
2194   /* We punt if:
2195      1) [-INF, +INF]
2196      2) [-INF, +-INF(OVF)]
2197      3) [+-INF(OVF), +INF]
2198      4) [+-INF(OVF), +-INF(OVF)]
2199      We learn nothing when we have INF and INF(OVF) on both sides.
2200      Note that we do accept [-INF, -INF] and [+INF, +INF] without
2201      overflow.  */
2202   if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2203       && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2204     {
2205       set_value_range_to_varying (vr);
2206       return;
2207     }
2208 
2209   cmp = compare_values (min, max);
2210   if (cmp == -2 || cmp == 1)
2211     {
2212       /* If the new range has its limits swapped around (MIN > MAX),
2213 	 then the operation caused one of them to wrap around, mark
2214 	 the new range VARYING.  */
2215       set_value_range_to_varying (vr);
2216     }
2217   else
2218     set_value_range (vr, type, min, max, NULL);
2219 }
2220 
2221 /* Some quadruple precision helpers.  */
2222 static int
2223 quad_int_cmp (double_int l0, double_int h0,
2224 	      double_int l1, double_int h1, bool uns)
2225 {
2226   int c = h0.cmp (h1, uns);
2227   if (c != 0) return c;
2228   return l0.ucmp (l1);
2229 }
2230 
2231 static void
2232 quad_int_pair_sort (double_int *l0, double_int *h0,
2233 		    double_int *l1, double_int *h1, bool uns)
2234 {
2235   if (quad_int_cmp (*l0, *h0, *l1, *h1, uns) > 0)
2236     {
2237       double_int tmp;
2238       tmp = *l0; *l0 = *l1; *l1 = tmp;
2239       tmp = *h0; *h0 = *h1; *h1 = tmp;
2240     }
2241 }
2242 
2243 /* Extract range information from a binary operation CODE based on
2244    the ranges of each of its operands, *VR0 and *VR1 with resulting
2245    type EXPR_TYPE.  The resulting range is stored in *VR.  */
2246 
2247 static void
2248 extract_range_from_binary_expr_1 (value_range_t *vr,
2249 				  enum tree_code code, tree expr_type,
2250 				  value_range_t *vr0_, value_range_t *vr1_)
2251 {
2252   value_range_t vr0 = *vr0_, vr1 = *vr1_;
2253   value_range_t vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
2254   enum value_range_type type;
2255   tree min = NULL_TREE, max = NULL_TREE;
2256   int cmp;
2257 
2258   if (!INTEGRAL_TYPE_P (expr_type)
2259       && !POINTER_TYPE_P (expr_type))
2260     {
2261       set_value_range_to_varying (vr);
2262       return;
2263     }
2264 
2265   /* Not all binary expressions can be applied to ranges in a
2266      meaningful way.  Handle only arithmetic operations.  */
2267   if (code != PLUS_EXPR
2268       && code != MINUS_EXPR
2269       && code != POINTER_PLUS_EXPR
2270       && code != MULT_EXPR
2271       && code != TRUNC_DIV_EXPR
2272       && code != FLOOR_DIV_EXPR
2273       && code != CEIL_DIV_EXPR
2274       && code != EXACT_DIV_EXPR
2275       && code != ROUND_DIV_EXPR
2276       && code != TRUNC_MOD_EXPR
2277       && code != RSHIFT_EXPR
2278       && code != LSHIFT_EXPR
2279       && code != MIN_EXPR
2280       && code != MAX_EXPR
2281       && code != BIT_AND_EXPR
2282       && code != BIT_IOR_EXPR
2283       && code != BIT_XOR_EXPR)
2284     {
2285       set_value_range_to_varying (vr);
2286       return;
2287     }
2288 
2289   /* If both ranges are UNDEFINED, so is the result.  */
2290   if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
2291     {
2292       set_value_range_to_undefined (vr);
2293       return;
2294     }
2295   /* If one of the ranges is UNDEFINED drop it to VARYING for the following
2296      code.  At some point we may want to special-case operations that
2297      have UNDEFINED result for all or some value-ranges of the not UNDEFINED
2298      operand.  */
2299   else if (vr0.type == VR_UNDEFINED)
2300     set_value_range_to_varying (&vr0);
2301   else if (vr1.type == VR_UNDEFINED)
2302     set_value_range_to_varying (&vr1);
2303 
2304   /* Now canonicalize anti-ranges to ranges when they are not symbolic
2305      and express ~[] op X as ([]' op X) U ([]'' op X).  */
2306   if (vr0.type == VR_ANTI_RANGE
2307       && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
2308     {
2309       extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
2310       if (vrtem1.type != VR_UNDEFINED)
2311 	{
2312 	  value_range_t vrres = VR_INITIALIZER;
2313 	  extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2314 					    &vrtem1, vr1_);
2315 	  vrp_meet (vr, &vrres);
2316 	}
2317       return;
2318     }
2319   /* Likewise for X op ~[].  */
2320   if (vr1.type == VR_ANTI_RANGE
2321       && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
2322     {
2323       extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
2324       if (vrtem1.type != VR_UNDEFINED)
2325 	{
2326 	  value_range_t vrres = VR_INITIALIZER;
2327 	  extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2328 					    vr0_, &vrtem1);
2329 	  vrp_meet (vr, &vrres);
2330 	}
2331       return;
2332     }
2333 
2334   /* The type of the resulting value range defaults to VR0.TYPE.  */
2335   type = vr0.type;
2336 
2337   /* Refuse to operate on VARYING ranges, ranges of different kinds
2338      and symbolic ranges.  As an exception, we allow BIT_AND_EXPR
2339      because we may be able to derive a useful range even if one of
2340      the operands is VR_VARYING or symbolic range.  Similarly for
2341      divisions.  TODO, we may be able to derive anti-ranges in
2342      some cases.  */
2343   if (code != BIT_AND_EXPR
2344       && code != BIT_IOR_EXPR
2345       && code != TRUNC_DIV_EXPR
2346       && code != FLOOR_DIV_EXPR
2347       && code != CEIL_DIV_EXPR
2348       && code != EXACT_DIV_EXPR
2349       && code != ROUND_DIV_EXPR
2350       && code != TRUNC_MOD_EXPR
2351       && code != MIN_EXPR
2352       && code != MAX_EXPR
2353       && (vr0.type == VR_VARYING
2354 	  || vr1.type == VR_VARYING
2355 	  || vr0.type != vr1.type
2356 	  || symbolic_range_p (&vr0)
2357 	  || symbolic_range_p (&vr1)))
2358     {
2359       set_value_range_to_varying (vr);
2360       return;
2361     }
2362 
2363   /* Now evaluate the expression to determine the new range.  */
2364   if (POINTER_TYPE_P (expr_type))
2365     {
2366       if (code == MIN_EXPR || code == MAX_EXPR)
2367 	{
2368 	  /* For MIN/MAX expressions with pointers, we only care about
2369 	     nullness, if both are non null, then the result is nonnull.
2370 	     If both are null, then the result is null. Otherwise they
2371 	     are varying.  */
2372 	  if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2373 	    set_value_range_to_nonnull (vr, expr_type);
2374 	  else if (range_is_null (&vr0) && range_is_null (&vr1))
2375 	    set_value_range_to_null (vr, expr_type);
2376 	  else
2377 	    set_value_range_to_varying (vr);
2378 	}
2379       else if (code == POINTER_PLUS_EXPR)
2380 	{
2381 	  /* For pointer types, we are really only interested in asserting
2382 	     whether the expression evaluates to non-NULL.  */
2383 	  if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2384 	    set_value_range_to_nonnull (vr, expr_type);
2385 	  else if (range_is_null (&vr0) && range_is_null (&vr1))
2386 	    set_value_range_to_null (vr, expr_type);
2387 	  else
2388 	    set_value_range_to_varying (vr);
2389 	}
2390       else if (code == BIT_AND_EXPR)
2391 	{
2392 	  /* For pointer types, we are really only interested in asserting
2393 	     whether the expression evaluates to non-NULL.  */
2394 	  if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2395 	    set_value_range_to_nonnull (vr, expr_type);
2396 	  else if (range_is_null (&vr0) || range_is_null (&vr1))
2397 	    set_value_range_to_null (vr, expr_type);
2398 	  else
2399 	    set_value_range_to_varying (vr);
2400 	}
2401       else
2402 	set_value_range_to_varying (vr);
2403 
2404       return;
2405     }
2406 
2407   /* For integer ranges, apply the operation to each end of the
2408      range and see what we end up with.  */
2409   if (code == PLUS_EXPR || code == MINUS_EXPR)
2410     {
2411       /* If we have a PLUS_EXPR with two VR_RANGE integer constant
2412          ranges compute the precise range for such case if possible.  */
2413       if (range_int_cst_p (&vr0)
2414 	  && range_int_cst_p (&vr1)
2415 	  /* We need as many bits as the possibly unsigned inputs.  */
2416 	  && TYPE_PRECISION (expr_type) <= HOST_BITS_PER_DOUBLE_INT)
2417 	{
2418 	  double_int min0 = tree_to_double_int (vr0.min);
2419 	  double_int max0 = tree_to_double_int (vr0.max);
2420 	  double_int min1 = tree_to_double_int (vr1.min);
2421 	  double_int max1 = tree_to_double_int (vr1.max);
2422 	  bool uns = TYPE_UNSIGNED (expr_type);
2423 	  double_int type_min
2424 	    = double_int::min_value (TYPE_PRECISION (expr_type), uns);
2425 	  double_int type_max
2426 	    = double_int::max_value (TYPE_PRECISION (expr_type), uns);
2427 	  double_int dmin, dmax;
2428 	  int min_ovf = 0;
2429 	  int max_ovf = 0;
2430 
2431 	  if (code == PLUS_EXPR)
2432 	    {
2433 	      dmin = min0 + min1;
2434 	      dmax = max0 + max1;
2435 
2436 	      /* Check for overflow in double_int.  */
2437 	      if (min1.cmp (double_int_zero, uns) != dmin.cmp (min0, uns))
2438 		min_ovf = min0.cmp (dmin, uns);
2439 	      if (max1.cmp (double_int_zero, uns) != dmax.cmp (max0, uns))
2440 		max_ovf = max0.cmp (dmax, uns);
2441 	    }
2442 	  else /* if (code == MINUS_EXPR) */
2443 	    {
2444 	      dmin = min0 - max1;
2445 	      dmax = max0 - min1;
2446 
2447 	      if (double_int_zero.cmp (max1, uns) != dmin.cmp (min0, uns))
2448 		min_ovf = min0.cmp (max1, uns);
2449 	      if (double_int_zero.cmp (min1, uns) != dmax.cmp (max0, uns))
2450 		max_ovf = max0.cmp (min1, uns);
2451 	    }
2452 
2453 	  /* For non-wrapping arithmetic look at possibly smaller
2454 	     value-ranges of the type.  */
2455 	  if (!TYPE_OVERFLOW_WRAPS (expr_type))
2456 	    {
2457 	      if (vrp_val_min (expr_type))
2458 		type_min = tree_to_double_int (vrp_val_min (expr_type));
2459 	      if (vrp_val_max (expr_type))
2460 		type_max = tree_to_double_int (vrp_val_max (expr_type));
2461 	    }
2462 
2463 	  /* Check for type overflow.  */
2464 	  if (min_ovf == 0)
2465 	    {
2466 	      if (dmin.cmp (type_min, uns) == -1)
2467 		min_ovf = -1;
2468 	      else if (dmin.cmp (type_max, uns) == 1)
2469 		min_ovf = 1;
2470 	    }
2471 	  if (max_ovf == 0)
2472 	    {
2473 	      if (dmax.cmp (type_min, uns) == -1)
2474 		max_ovf = -1;
2475 	      else if (dmax.cmp (type_max, uns) == 1)
2476 		max_ovf = 1;
2477 	    }
2478 
2479 	  if (TYPE_OVERFLOW_WRAPS (expr_type))
2480 	    {
2481 	      /* If overflow wraps, truncate the values and adjust the
2482 		 range kind and bounds appropriately.  */
2483 	      double_int tmin
2484 		= dmin.ext (TYPE_PRECISION (expr_type), uns);
2485 	      double_int tmax
2486 		= dmax.ext (TYPE_PRECISION (expr_type), uns);
2487 	      if (min_ovf == max_ovf)
2488 		{
2489 		  /* No overflow or both overflow or underflow.  The
2490 		     range kind stays VR_RANGE.  */
2491 		  min = double_int_to_tree (expr_type, tmin);
2492 		  max = double_int_to_tree (expr_type, tmax);
2493 		}
2494 	      else if (min_ovf == -1
2495 		       && max_ovf == 1)
2496 		{
2497 		  /* Underflow and overflow, drop to VR_VARYING.  */
2498 		  set_value_range_to_varying (vr);
2499 		  return;
2500 		}
2501 	      else
2502 		{
2503 		  /* Min underflow or max overflow.  The range kind
2504 		     changes to VR_ANTI_RANGE.  */
2505 		  bool covers = false;
2506 		  double_int tem = tmin;
2507 		  gcc_assert ((min_ovf == -1 && max_ovf == 0)
2508 			      || (max_ovf == 1 && min_ovf == 0));
2509 		  type = VR_ANTI_RANGE;
2510 		  tmin = tmax + double_int_one;
2511 		  if (tmin.cmp (tmax, uns) < 0)
2512 		    covers = true;
2513 		  tmax = tem + double_int_minus_one;
2514 		  if (tmax.cmp (tem, uns) > 0)
2515 		    covers = true;
2516 		  /* If the anti-range would cover nothing, drop to varying.
2517 		     Likewise if the anti-range bounds are outside of the
2518 		     types values.  */
2519 		  if (covers || tmin.cmp (tmax, uns) > 0)
2520 		    {
2521 		      set_value_range_to_varying (vr);
2522 		      return;
2523 		    }
2524 		  min = double_int_to_tree (expr_type, tmin);
2525 		  max = double_int_to_tree (expr_type, tmax);
2526 		}
2527 	    }
2528 	  else
2529 	    {
2530 	      /* If overflow does not wrap, saturate to the types min/max
2531 	         value.  */
2532 	      if (min_ovf == -1)
2533 		{
2534 		  if (needs_overflow_infinity (expr_type)
2535 		      && supports_overflow_infinity (expr_type))
2536 		    min = negative_overflow_infinity (expr_type);
2537 		  else
2538 		    min = double_int_to_tree (expr_type, type_min);
2539 		}
2540 	      else if (min_ovf == 1)
2541 		{
2542 		  if (needs_overflow_infinity (expr_type)
2543 		      && supports_overflow_infinity (expr_type))
2544 		    min = positive_overflow_infinity (expr_type);
2545 		  else
2546 		    min = double_int_to_tree (expr_type, type_max);
2547 		}
2548 	      else
2549 		min = double_int_to_tree (expr_type, dmin);
2550 
2551 	      if (max_ovf == -1)
2552 		{
2553 		  if (needs_overflow_infinity (expr_type)
2554 		      && supports_overflow_infinity (expr_type))
2555 		    max = negative_overflow_infinity (expr_type);
2556 		  else
2557 		    max = double_int_to_tree (expr_type, type_min);
2558 		}
2559 	      else if (max_ovf == 1)
2560 		{
2561 		  if (needs_overflow_infinity (expr_type)
2562 		      && supports_overflow_infinity (expr_type))
2563 		    max = positive_overflow_infinity (expr_type);
2564 		  else
2565 		    max = double_int_to_tree (expr_type, type_max);
2566 		}
2567 	      else
2568 		max = double_int_to_tree (expr_type, dmax);
2569 	    }
2570 	  if (needs_overflow_infinity (expr_type)
2571 	      && supports_overflow_infinity (expr_type))
2572 	    {
2573 	      if (is_negative_overflow_infinity (vr0.min)
2574 		  || (code == PLUS_EXPR
2575 		      ? is_negative_overflow_infinity (vr1.min)
2576 		      : is_positive_overflow_infinity (vr1.max)))
2577 		min = negative_overflow_infinity (expr_type);
2578 	      if (is_positive_overflow_infinity (vr0.max)
2579 		  || (code == PLUS_EXPR
2580 		      ? is_positive_overflow_infinity (vr1.max)
2581 		      : is_negative_overflow_infinity (vr1.min)))
2582 		max = positive_overflow_infinity (expr_type);
2583 	    }
2584 	}
2585       else
2586 	{
2587 	  /* For other cases, for example if we have a PLUS_EXPR with two
2588 	     VR_ANTI_RANGEs, drop to VR_VARYING.  It would take more effort
2589 	     to compute a precise range for such a case.
2590 	     ???  General even mixed range kind operations can be expressed
2591 	     by for example transforming ~[3, 5] + [1, 2] to range-only
2592 	     operations and a union primitive:
2593 	       [-INF, 2] + [1, 2]  U  [5, +INF] + [1, 2]
2594 	           [-INF+1, 4]     U    [6, +INF(OVF)]
2595 	     though usually the union is not exactly representable with
2596 	     a single range or anti-range as the above is
2597 		 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
2598 	     but one could use a scheme similar to equivalences for this. */
2599 	  set_value_range_to_varying (vr);
2600 	  return;
2601 	}
2602     }
2603   else if (code == MIN_EXPR
2604 	   || code == MAX_EXPR)
2605     {
2606       if (vr0.type == VR_RANGE
2607 	  && !symbolic_range_p (&vr0))
2608 	{
2609 	  type = VR_RANGE;
2610 	  if (vr1.type == VR_RANGE
2611 	      && !symbolic_range_p (&vr1))
2612 	    {
2613 	      /* For operations that make the resulting range directly
2614 		 proportional to the original ranges, apply the operation to
2615 		 the same end of each range.  */
2616 	      min = vrp_int_const_binop (code, vr0.min, vr1.min);
2617 	      max = vrp_int_const_binop (code, vr0.max, vr1.max);
2618 	    }
2619 	  else if (code == MIN_EXPR)
2620 	    {
2621 	      min = vrp_val_min (expr_type);
2622 	      max = vr0.max;
2623 	    }
2624 	  else if (code == MAX_EXPR)
2625 	    {
2626 	      min = vr0.min;
2627 	      max = vrp_val_max (expr_type);
2628 	    }
2629 	}
2630       else if (vr1.type == VR_RANGE
2631 	       && !symbolic_range_p (&vr1))
2632 	{
2633 	  type = VR_RANGE;
2634 	  if (code == MIN_EXPR)
2635 	    {
2636 	      min = vrp_val_min (expr_type);
2637 	      max = vr1.max;
2638 	    }
2639 	  else if (code == MAX_EXPR)
2640 	    {
2641 	      min = vr1.min;
2642 	      max = vrp_val_max (expr_type);
2643 	    }
2644 	}
2645       else
2646 	{
2647 	  set_value_range_to_varying (vr);
2648 	  return;
2649 	}
2650     }
2651   else if (code == MULT_EXPR)
2652     {
2653       /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not
2654 	 drop to varying.  */
2655       if (range_int_cst_p (&vr0)
2656 	  && range_int_cst_p (&vr1)
2657 	  && TYPE_OVERFLOW_WRAPS (expr_type))
2658 	{
2659 	  double_int min0, max0, min1, max1, sizem1, size;
2660 	  double_int prod0l, prod0h, prod1l, prod1h,
2661 		     prod2l, prod2h, prod3l, prod3h;
2662 	  bool uns0, uns1, uns;
2663 
2664 	  sizem1 = double_int::max_value (TYPE_PRECISION (expr_type), true);
2665 	  size = sizem1 + double_int_one;
2666 
2667 	  min0 = tree_to_double_int (vr0.min);
2668 	  max0 = tree_to_double_int (vr0.max);
2669 	  min1 = tree_to_double_int (vr1.min);
2670 	  max1 = tree_to_double_int (vr1.max);
2671 
2672 	  uns0 = TYPE_UNSIGNED (expr_type);
2673 	  uns1 = uns0;
2674 
2675 	  /* Canonicalize the intervals.  */
2676 	  if (TYPE_UNSIGNED (expr_type))
2677 	    {
2678 	      double_int min2 = size - min0;
2679 	      if (!min2.is_zero () && min2.cmp (max0, true) < 0)
2680 		{
2681 		  min0 = -min2;
2682 		  max0 -= size;
2683 		  uns0 = false;
2684 		}
2685 
2686 	      min2 = size - min1;
2687 	      if (!min2.is_zero () && min2.cmp (max1, true) < 0)
2688 		{
2689 		  min1 = -min2;
2690 		  max1 -= size;
2691 		  uns1 = false;
2692 		}
2693 	    }
2694 	  uns = uns0 & uns1;
2695 
2696 	  bool overflow;
2697 	  prod0l = min0.wide_mul_with_sign (min1, true, &prod0h, &overflow);
2698 	  if (!uns0 && min0.is_negative ())
2699 	    prod0h -= min1;
2700 	  if (!uns1 && min1.is_negative ())
2701 	    prod0h -= min0;
2702 
2703 	  prod1l = min0.wide_mul_with_sign (max1, true, &prod1h, &overflow);
2704 	  if (!uns0 && min0.is_negative ())
2705 	    prod1h -= max1;
2706 	  if (!uns1 && max1.is_negative ())
2707 	    prod1h -= min0;
2708 
2709 	  prod2l = max0.wide_mul_with_sign (min1, true, &prod2h, &overflow);
2710 	  if (!uns0 && max0.is_negative ())
2711 	    prod2h -= min1;
2712 	  if (!uns1 && min1.is_negative ())
2713 	    prod2h -= max0;
2714 
2715 	  prod3l = max0.wide_mul_with_sign (max1, true, &prod3h, &overflow);
2716 	  if (!uns0 && max0.is_negative ())
2717 	    prod3h -= max1;
2718 	  if (!uns1 && max1.is_negative ())
2719 	    prod3h -= max0;
2720 
2721 	  /* Sort the 4 products.  */
2722 	  quad_int_pair_sort (&prod0l, &prod0h, &prod3l, &prod3h, uns);
2723 	  quad_int_pair_sort (&prod1l, &prod1h, &prod2l, &prod2h, uns);
2724 	  quad_int_pair_sort (&prod0l, &prod0h, &prod1l, &prod1h, uns);
2725 	  quad_int_pair_sort (&prod2l, &prod2h, &prod3l, &prod3h, uns);
2726 
2727 	  /* Max - min.  */
2728 	  if (prod0l.is_zero ())
2729 	    {
2730 	      prod1l = double_int_zero;
2731 	      prod1h = -prod0h;
2732 	    }
2733 	  else
2734 	    {
2735 	      prod1l = -prod0l;
2736 	      prod1h = ~prod0h;
2737 	    }
2738 	  prod2l = prod3l + prod1l;
2739 	  prod2h = prod3h + prod1h;
2740 	  if (prod2l.ult (prod3l))
2741 	    prod2h += double_int_one; /* carry */
2742 
2743 	  if (!prod2h.is_zero ()
2744 	      || prod2l.cmp (sizem1, true) >= 0)
2745 	    {
2746 	      /* the range covers all values.  */
2747 	      set_value_range_to_varying (vr);
2748 	      return;
2749 	    }
2750 
2751 	  /* The following should handle the wrapping and selecting
2752 	     VR_ANTI_RANGE for us.  */
2753 	  min = double_int_to_tree (expr_type, prod0l);
2754 	  max = double_int_to_tree (expr_type, prod3l);
2755 	  set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
2756 	  return;
2757 	}
2758 
2759       /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2760 	 drop to VR_VARYING.  It would take more effort to compute a
2761 	 precise range for such a case.  For example, if we have
2762 	 op0 == 65536 and op1 == 65536 with their ranges both being
2763 	 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2764 	 we cannot claim that the product is in ~[0,0].  Note that we
2765 	 are guaranteed to have vr0.type == vr1.type at this
2766 	 point.  */
2767       if (vr0.type == VR_ANTI_RANGE
2768 	  && !TYPE_OVERFLOW_UNDEFINED (expr_type))
2769 	{
2770 	  set_value_range_to_varying (vr);
2771 	  return;
2772 	}
2773 
2774       extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2775       return;
2776     }
2777   else if (code == RSHIFT_EXPR
2778 	   || code == LSHIFT_EXPR)
2779     {
2780       /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2781 	 then drop to VR_VARYING.  Outside of this range we get undefined
2782 	 behavior from the shift operation.  We cannot even trust
2783 	 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2784 	 shifts, and the operation at the tree level may be widened.  */
2785       if (range_int_cst_p (&vr1)
2786 	  && compare_tree_int (vr1.min, 0) >= 0
2787 	  && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1)
2788 	{
2789 	  if (code == RSHIFT_EXPR)
2790 	    {
2791 	      extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2792 	      return;
2793 	    }
2794 	  /* We can map lshifts by constants to MULT_EXPR handling.  */
2795 	  else if (code == LSHIFT_EXPR
2796 		   && range_int_cst_singleton_p (&vr1))
2797 	    {
2798 	      bool saved_flag_wrapv;
2799 	      value_range_t vr1p = VR_INITIALIZER;
2800 	      vr1p.type = VR_RANGE;
2801 	      vr1p.min
2802 		= double_int_to_tree (expr_type,
2803 				      double_int_one
2804 				      .llshift (TREE_INT_CST_LOW (vr1.min),
2805 					        TYPE_PRECISION (expr_type)));
2806 	      vr1p.max = vr1p.min;
2807 	      /* We have to use a wrapping multiply though as signed overflow
2808 		 on lshifts is implementation defined in C89.  */
2809 	      saved_flag_wrapv = flag_wrapv;
2810 	      flag_wrapv = 1;
2811 	      extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type,
2812 						&vr0, &vr1p);
2813 	      flag_wrapv = saved_flag_wrapv;
2814 	      return;
2815 	    }
2816 	  else if (code == LSHIFT_EXPR
2817 		   && range_int_cst_p (&vr0))
2818 	    {
2819 	      int prec = TYPE_PRECISION (expr_type);
2820 	      int overflow_pos = prec;
2821 	      int bound_shift;
2822 	      double_int bound, complement, low_bound, high_bound;
2823 	      bool uns = TYPE_UNSIGNED (expr_type);
2824 	      bool in_bounds = false;
2825 
2826 	      if (!uns)
2827 		overflow_pos -= 1;
2828 
2829 	      bound_shift = overflow_pos - TREE_INT_CST_LOW (vr1.max);
2830 	      /* If bound_shift == HOST_BITS_PER_DOUBLE_INT, the llshift can
2831 		 overflow.  However, for that to happen, vr1.max needs to be
2832 		 zero, which means vr1 is a singleton range of zero, which
2833 		 means it should be handled by the previous LSHIFT_EXPR
2834 		 if-clause.  */
2835 	      bound = double_int_one.llshift (bound_shift, prec);
2836 	      complement = ~(bound - double_int_one);
2837 
2838 	      if (uns)
2839 		{
2840 		  low_bound = bound.zext (prec);
2841 		  high_bound = complement.zext (prec);
2842 		  if (tree_to_double_int (vr0.max).ult (low_bound))
2843 		    {
2844 		      /* [5, 6] << [1, 2] == [10, 24].  */
2845 		      /* We're shifting out only zeroes, the value increases
2846 			 monotonically.  */
2847 		      in_bounds = true;
2848 		    }
2849 		  else if (high_bound.ult (tree_to_double_int (vr0.min)))
2850 		    {
2851 		      /* [0xffffff00, 0xffffffff] << [1, 2]
2852 		         == [0xfffffc00, 0xfffffffe].  */
2853 		      /* We're shifting out only ones, the value decreases
2854 			 monotonically.  */
2855 		      in_bounds = true;
2856 		    }
2857 		}
2858 	      else
2859 		{
2860 		  /* [-1, 1] << [1, 2] == [-4, 4].  */
2861 		  low_bound = complement.sext (prec);
2862 		  high_bound = bound;
2863 		  if (tree_to_double_int (vr0.max).slt (high_bound)
2864 		      && low_bound.slt (tree_to_double_int (vr0.min)))
2865 		    {
2866 		      /* For non-negative numbers, we're shifting out only
2867 			 zeroes, the value increases monotonically.
2868 			 For negative numbers, we're shifting out only ones, the
2869 			 value decreases monotomically.  */
2870 		      in_bounds = true;
2871 		    }
2872 		}
2873 
2874 	      if (in_bounds)
2875 		{
2876 		  extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2877 		  return;
2878 		}
2879 	    }
2880 	}
2881       set_value_range_to_varying (vr);
2882       return;
2883     }
2884   else if (code == TRUNC_DIV_EXPR
2885 	   || code == FLOOR_DIV_EXPR
2886 	   || code == CEIL_DIV_EXPR
2887 	   || code == EXACT_DIV_EXPR
2888 	   || code == ROUND_DIV_EXPR)
2889     {
2890       if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2891 	{
2892 	  /* For division, if op1 has VR_RANGE but op0 does not, something
2893 	     can be deduced just from that range.  Say [min, max] / [4, max]
2894 	     gives [min / 4, max / 4] range.  */
2895 	  if (vr1.type == VR_RANGE
2896 	      && !symbolic_range_p (&vr1)
2897 	      && range_includes_zero_p (vr1.min, vr1.max) == 0)
2898 	    {
2899 	      vr0.type = type = VR_RANGE;
2900 	      vr0.min = vrp_val_min (expr_type);
2901 	      vr0.max = vrp_val_max (expr_type);
2902 	    }
2903 	  else
2904 	    {
2905 	      set_value_range_to_varying (vr);
2906 	      return;
2907 	    }
2908 	}
2909 
2910       /* For divisions, if flag_non_call_exceptions is true, we must
2911 	 not eliminate a division by zero.  */
2912       if (cfun->can_throw_non_call_exceptions
2913 	  && (vr1.type != VR_RANGE
2914 	      || range_includes_zero_p (vr1.min, vr1.max) != 0))
2915 	{
2916 	  set_value_range_to_varying (vr);
2917 	  return;
2918 	}
2919 
2920       /* For divisions, if op0 is VR_RANGE, we can deduce a range
2921 	 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2922 	 include 0.  */
2923       if (vr0.type == VR_RANGE
2924 	  && (vr1.type != VR_RANGE
2925 	      || range_includes_zero_p (vr1.min, vr1.max) != 0))
2926 	{
2927 	  tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2928 	  int cmp;
2929 
2930 	  min = NULL_TREE;
2931 	  max = NULL_TREE;
2932 	  if (TYPE_UNSIGNED (expr_type)
2933 	      || value_range_nonnegative_p (&vr1))
2934 	    {
2935 	      /* For unsigned division or when divisor is known
2936 		 to be non-negative, the range has to cover
2937 		 all numbers from 0 to max for positive max
2938 		 and all numbers from min to 0 for negative min.  */
2939 	      cmp = compare_values (vr0.max, zero);
2940 	      if (cmp == -1)
2941 		max = zero;
2942 	      else if (cmp == 0 || cmp == 1)
2943 		max = vr0.max;
2944 	      else
2945 		type = VR_VARYING;
2946 	      cmp = compare_values (vr0.min, zero);
2947 	      if (cmp == 1)
2948 		min = zero;
2949 	      else if (cmp == 0 || cmp == -1)
2950 		min = vr0.min;
2951 	      else
2952 		type = VR_VARYING;
2953 	    }
2954 	  else
2955 	    {
2956 	      /* Otherwise the range is -max .. max or min .. -min
2957 		 depending on which bound is bigger in absolute value,
2958 		 as the division can change the sign.  */
2959 	      abs_extent_range (vr, vr0.min, vr0.max);
2960 	      return;
2961 	    }
2962 	  if (type == VR_VARYING)
2963 	    {
2964 	      set_value_range_to_varying (vr);
2965 	      return;
2966 	    }
2967 	}
2968       else
2969 	{
2970 	  extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2971 	  return;
2972 	}
2973     }
2974   else if (code == TRUNC_MOD_EXPR)
2975     {
2976       if (vr1.type != VR_RANGE
2977 	  || range_includes_zero_p (vr1.min, vr1.max) != 0
2978 	  || vrp_val_is_min (vr1.min))
2979 	{
2980 	  set_value_range_to_varying (vr);
2981 	  return;
2982 	}
2983       type = VR_RANGE;
2984       /* Compute MAX <|vr1.min|, |vr1.max|> - 1.  */
2985       max = fold_unary_to_constant (ABS_EXPR, expr_type, vr1.min);
2986       if (tree_int_cst_lt (max, vr1.max))
2987 	max = vr1.max;
2988       max = int_const_binop (MINUS_EXPR, max, integer_one_node);
2989       /* If the dividend is non-negative the modulus will be
2990 	 non-negative as well.  */
2991       if (TYPE_UNSIGNED (expr_type)
2992 	  || value_range_nonnegative_p (&vr0))
2993 	min = build_int_cst (TREE_TYPE (max), 0);
2994       else
2995 	min = fold_unary_to_constant (NEGATE_EXPR, expr_type, max);
2996     }
2997   else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
2998     {
2999       bool int_cst_range0, int_cst_range1;
3000       double_int may_be_nonzero0, may_be_nonzero1;
3001       double_int must_be_nonzero0, must_be_nonzero1;
3002 
3003       int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0,
3004 						  &must_be_nonzero0);
3005       int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1,
3006 						  &must_be_nonzero1);
3007 
3008       type = VR_RANGE;
3009       if (code == BIT_AND_EXPR)
3010 	{
3011 	  double_int dmax;
3012 	  min = double_int_to_tree (expr_type,
3013 				    must_be_nonzero0 & must_be_nonzero1);
3014 	  dmax = may_be_nonzero0 & may_be_nonzero1;
3015 	  /* If both input ranges contain only negative values we can
3016 	     truncate the result range maximum to the minimum of the
3017 	     input range maxima.  */
3018 	  if (int_cst_range0 && int_cst_range1
3019 	      && tree_int_cst_sgn (vr0.max) < 0
3020 	      && tree_int_cst_sgn (vr1.max) < 0)
3021 	    {
3022 	      dmax = dmax.min (tree_to_double_int (vr0.max),
3023 				     TYPE_UNSIGNED (expr_type));
3024 	      dmax = dmax.min (tree_to_double_int (vr1.max),
3025 				     TYPE_UNSIGNED (expr_type));
3026 	    }
3027 	  /* If either input range contains only non-negative values
3028 	     we can truncate the result range maximum to the respective
3029 	     maximum of the input range.  */
3030 	  if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
3031 	    dmax = dmax.min (tree_to_double_int (vr0.max),
3032 				   TYPE_UNSIGNED (expr_type));
3033 	  if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
3034 	    dmax = dmax.min (tree_to_double_int (vr1.max),
3035 				   TYPE_UNSIGNED (expr_type));
3036 	  max = double_int_to_tree (expr_type, dmax);
3037 	}
3038       else if (code == BIT_IOR_EXPR)
3039 	{
3040 	  double_int dmin;
3041 	  max = double_int_to_tree (expr_type,
3042 				    may_be_nonzero0 | may_be_nonzero1);
3043 	  dmin = must_be_nonzero0 | must_be_nonzero1;
3044 	  /* If the input ranges contain only positive values we can
3045 	     truncate the minimum of the result range to the maximum
3046 	     of the input range minima.  */
3047 	  if (int_cst_range0 && int_cst_range1
3048 	      && tree_int_cst_sgn (vr0.min) >= 0
3049 	      && tree_int_cst_sgn (vr1.min) >= 0)
3050 	    {
3051 	      dmin = dmin.max (tree_to_double_int (vr0.min),
3052 			       TYPE_UNSIGNED (expr_type));
3053 	      dmin = dmin.max (tree_to_double_int (vr1.min),
3054 			       TYPE_UNSIGNED (expr_type));
3055 	    }
3056 	  /* If either input range contains only negative values
3057 	     we can truncate the minimum of the result range to the
3058 	     respective minimum range.  */
3059 	  if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
3060 	    dmin = dmin.max (tree_to_double_int (vr0.min),
3061 			     TYPE_UNSIGNED (expr_type));
3062 	  if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
3063 	    dmin = dmin.max (tree_to_double_int (vr1.min),
3064 			     TYPE_UNSIGNED (expr_type));
3065 	  min = double_int_to_tree (expr_type, dmin);
3066 	}
3067       else if (code == BIT_XOR_EXPR)
3068 	{
3069 	  double_int result_zero_bits, result_one_bits;
3070 	  result_zero_bits = (must_be_nonzero0 & must_be_nonzero1)
3071 			     | ~(may_be_nonzero0 | may_be_nonzero1);
3072 	  result_one_bits = must_be_nonzero0.and_not (may_be_nonzero1)
3073 			    | must_be_nonzero1.and_not (may_be_nonzero0);
3074 	  max = double_int_to_tree (expr_type, ~result_zero_bits);
3075 	  min = double_int_to_tree (expr_type, result_one_bits);
3076 	  /* If the range has all positive or all negative values the
3077 	     result is better than VARYING.  */
3078 	  if (tree_int_cst_sgn (min) < 0
3079 	      || tree_int_cst_sgn (max) >= 0)
3080 	    ;
3081 	  else
3082 	    max = min = NULL_TREE;
3083 	}
3084     }
3085   else
3086     gcc_unreachable ();
3087 
3088   /* If either MIN or MAX overflowed, then set the resulting range to
3089      VARYING.  But we do accept an overflow infinity
3090      representation.  */
3091   if (min == NULL_TREE
3092       || !is_gimple_min_invariant (min)
3093       || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
3094       || max == NULL_TREE
3095       || !is_gimple_min_invariant (max)
3096       || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
3097     {
3098       set_value_range_to_varying (vr);
3099       return;
3100     }
3101 
3102   /* We punt if:
3103      1) [-INF, +INF]
3104      2) [-INF, +-INF(OVF)]
3105      3) [+-INF(OVF), +INF]
3106      4) [+-INF(OVF), +-INF(OVF)]
3107      We learn nothing when we have INF and INF(OVF) on both sides.
3108      Note that we do accept [-INF, -INF] and [+INF, +INF] without
3109      overflow.  */
3110   if ((vrp_val_is_min (min) || is_overflow_infinity (min))
3111       && (vrp_val_is_max (max) || is_overflow_infinity (max)))
3112     {
3113       set_value_range_to_varying (vr);
3114       return;
3115     }
3116 
3117   cmp = compare_values (min, max);
3118   if (cmp == -2 || cmp == 1)
3119     {
3120       /* If the new range has its limits swapped around (MIN > MAX),
3121 	 then the operation caused one of them to wrap around, mark
3122 	 the new range VARYING.  */
3123       set_value_range_to_varying (vr);
3124     }
3125   else
3126     set_value_range (vr, type, min, max, NULL);
3127 }
3128 
3129 /* Extract range information from a binary expression OP0 CODE OP1 based on
3130    the ranges of each of its operands with resulting type EXPR_TYPE.
3131    The resulting range is stored in *VR.  */
3132 
3133 static void
3134 extract_range_from_binary_expr (value_range_t *vr,
3135 				enum tree_code code,
3136 				tree expr_type, tree op0, tree op1)
3137 {
3138   value_range_t vr0 = VR_INITIALIZER;
3139   value_range_t vr1 = VR_INITIALIZER;
3140 
3141   /* Get value ranges for each operand.  For constant operands, create
3142      a new value range with the operand to simplify processing.  */
3143   if (TREE_CODE (op0) == SSA_NAME)
3144     vr0 = *(get_value_range (op0));
3145   else if (is_gimple_min_invariant (op0))
3146     set_value_range_to_value (&vr0, op0, NULL);
3147   else
3148     set_value_range_to_varying (&vr0);
3149 
3150   if (TREE_CODE (op1) == SSA_NAME)
3151     vr1 = *(get_value_range (op1));
3152   else if (is_gimple_min_invariant (op1))
3153     set_value_range_to_value (&vr1, op1, NULL);
3154   else
3155     set_value_range_to_varying (&vr1);
3156 
3157   extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
3158 }
3159 
3160 /* Extract range information from a unary operation CODE based on
3161    the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
3162    The The resulting range is stored in *VR.  */
3163 
3164 static void
3165 extract_range_from_unary_expr_1 (value_range_t *vr,
3166 				 enum tree_code code, tree type,
3167 				 value_range_t *vr0_, tree op0_type)
3168 {
3169   value_range_t vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
3170 
3171   /* VRP only operates on integral and pointer types.  */
3172   if (!(INTEGRAL_TYPE_P (op0_type)
3173 	|| POINTER_TYPE_P (op0_type))
3174       || !(INTEGRAL_TYPE_P (type)
3175 	   || POINTER_TYPE_P (type)))
3176     {
3177       set_value_range_to_varying (vr);
3178       return;
3179     }
3180 
3181   /* If VR0 is UNDEFINED, so is the result.  */
3182   if (vr0.type == VR_UNDEFINED)
3183     {
3184       set_value_range_to_undefined (vr);
3185       return;
3186     }
3187 
3188   /* Handle operations that we express in terms of others.  */
3189   if (code == PAREN_EXPR)
3190     {
3191       /* PAREN_EXPR is a simple copy.  */
3192       copy_value_range (vr, &vr0);
3193       return;
3194     }
3195   else if (code == NEGATE_EXPR)
3196     {
3197       /* -X is simply 0 - X, so re-use existing code that also handles
3198          anti-ranges fine.  */
3199       value_range_t zero = VR_INITIALIZER;
3200       set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
3201       extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
3202       return;
3203     }
3204   else if (code == BIT_NOT_EXPR)
3205     {
3206       /* ~X is simply -1 - X, so re-use existing code that also handles
3207          anti-ranges fine.  */
3208       value_range_t minusone = VR_INITIALIZER;
3209       set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
3210       extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
3211 					type, &minusone, &vr0);
3212       return;
3213     }
3214 
3215   /* Now canonicalize anti-ranges to ranges when they are not symbolic
3216      and express op ~[]  as (op []') U (op []'').  */
3217   if (vr0.type == VR_ANTI_RANGE
3218       && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
3219     {
3220       extract_range_from_unary_expr_1 (vr, code, type, &vrtem0, op0_type);
3221       if (vrtem1.type != VR_UNDEFINED)
3222 	{
3223 	  value_range_t vrres = VR_INITIALIZER;
3224 	  extract_range_from_unary_expr_1 (&vrres, code, type,
3225 					   &vrtem1, op0_type);
3226 	  vrp_meet (vr, &vrres);
3227 	}
3228       return;
3229     }
3230 
3231   if (CONVERT_EXPR_CODE_P (code))
3232     {
3233       tree inner_type = op0_type;
3234       tree outer_type = type;
3235 
3236       /* If the expression evaluates to a pointer, we are only interested in
3237 	 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]).  */
3238       if (POINTER_TYPE_P (type))
3239 	{
3240 	  if (range_is_nonnull (&vr0))
3241 	    set_value_range_to_nonnull (vr, type);
3242 	  else if (range_is_null (&vr0))
3243 	    set_value_range_to_null (vr, type);
3244 	  else
3245 	    set_value_range_to_varying (vr);
3246 	  return;
3247 	}
3248 
3249       /* If VR0 is varying and we increase the type precision, assume
3250 	 a full range for the following transformation.  */
3251       if (vr0.type == VR_VARYING
3252 	  && INTEGRAL_TYPE_P (inner_type)
3253 	  && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
3254 	{
3255 	  vr0.type = VR_RANGE;
3256 	  vr0.min = TYPE_MIN_VALUE (inner_type);
3257 	  vr0.max = TYPE_MAX_VALUE (inner_type);
3258 	}
3259 
3260       /* If VR0 is a constant range or anti-range and the conversion is
3261 	 not truncating we can convert the min and max values and
3262 	 canonicalize the resulting range.  Otherwise we can do the
3263 	 conversion if the size of the range is less than what the
3264 	 precision of the target type can represent and the range is
3265 	 not an anti-range.  */
3266       if ((vr0.type == VR_RANGE
3267 	   || vr0.type == VR_ANTI_RANGE)
3268 	  && TREE_CODE (vr0.min) == INTEGER_CST
3269 	  && TREE_CODE (vr0.max) == INTEGER_CST
3270 	  && (!is_overflow_infinity (vr0.min)
3271 	      || (vr0.type == VR_RANGE
3272 		  && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3273 		  && needs_overflow_infinity (outer_type)
3274 		  && supports_overflow_infinity (outer_type)))
3275 	  && (!is_overflow_infinity (vr0.max)
3276 	      || (vr0.type == VR_RANGE
3277 		  && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3278 		  && needs_overflow_infinity (outer_type)
3279 		  && supports_overflow_infinity (outer_type)))
3280 	  && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
3281 	      || (vr0.type == VR_RANGE
3282 		  && integer_zerop (int_const_binop (RSHIFT_EXPR,
3283 		       int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
3284 		         size_int (TYPE_PRECISION (outer_type)))))))
3285 	{
3286 	  tree new_min, new_max;
3287 	  if (is_overflow_infinity (vr0.min))
3288 	    new_min = negative_overflow_infinity (outer_type);
3289 	  else
3290 	    new_min = force_fit_type_double (outer_type,
3291 					     tree_to_double_int (vr0.min),
3292 					     0, false);
3293 	  if (is_overflow_infinity (vr0.max))
3294 	    new_max = positive_overflow_infinity (outer_type);
3295 	  else
3296 	    new_max = force_fit_type_double (outer_type,
3297 					     tree_to_double_int (vr0.max),
3298 					     0, false);
3299 	  set_and_canonicalize_value_range (vr, vr0.type,
3300 					    new_min, new_max, NULL);
3301 	  return;
3302 	}
3303 
3304       set_value_range_to_varying (vr);
3305       return;
3306     }
3307   else if (code == ABS_EXPR)
3308     {
3309       tree min, max;
3310       int cmp;
3311 
3312       /* Pass through vr0 in the easy cases.  */
3313       if (TYPE_UNSIGNED (type)
3314 	  || value_range_nonnegative_p (&vr0))
3315 	{
3316 	  copy_value_range (vr, &vr0);
3317 	  return;
3318 	}
3319 
3320       /* For the remaining varying or symbolic ranges we can't do anything
3321 	 useful.  */
3322       if (vr0.type == VR_VARYING
3323 	  || symbolic_range_p (&vr0))
3324 	{
3325 	  set_value_range_to_varying (vr);
3326 	  return;
3327 	}
3328 
3329       /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3330          useful range.  */
3331       if (!TYPE_OVERFLOW_UNDEFINED (type)
3332 	  && ((vr0.type == VR_RANGE
3333 	       && vrp_val_is_min (vr0.min))
3334 	      || (vr0.type == VR_ANTI_RANGE
3335 		  && !vrp_val_is_min (vr0.min))))
3336 	{
3337 	  set_value_range_to_varying (vr);
3338 	  return;
3339 	}
3340 
3341       /* ABS_EXPR may flip the range around, if the original range
3342 	 included negative values.  */
3343       if (is_overflow_infinity (vr0.min))
3344 	min = positive_overflow_infinity (type);
3345       else if (!vrp_val_is_min (vr0.min))
3346 	min = fold_unary_to_constant (code, type, vr0.min);
3347       else if (!needs_overflow_infinity (type))
3348 	min = TYPE_MAX_VALUE (type);
3349       else if (supports_overflow_infinity (type))
3350 	min = positive_overflow_infinity (type);
3351       else
3352 	{
3353 	  set_value_range_to_varying (vr);
3354 	  return;
3355 	}
3356 
3357       if (is_overflow_infinity (vr0.max))
3358 	max = positive_overflow_infinity (type);
3359       else if (!vrp_val_is_min (vr0.max))
3360 	max = fold_unary_to_constant (code, type, vr0.max);
3361       else if (!needs_overflow_infinity (type))
3362 	max = TYPE_MAX_VALUE (type);
3363       else if (supports_overflow_infinity (type)
3364 	       /* We shouldn't generate [+INF, +INF] as set_value_range
3365 		  doesn't like this and ICEs.  */
3366 	       && !is_positive_overflow_infinity (min))
3367 	max = positive_overflow_infinity (type);
3368       else
3369 	{
3370 	  set_value_range_to_varying (vr);
3371 	  return;
3372 	}
3373 
3374       cmp = compare_values (min, max);
3375 
3376       /* If a VR_ANTI_RANGEs contains zero, then we have
3377 	 ~[-INF, min(MIN, MAX)].  */
3378       if (vr0.type == VR_ANTI_RANGE)
3379 	{
3380 	  if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3381 	    {
3382 	      /* Take the lower of the two values.  */
3383 	      if (cmp != 1)
3384 		max = min;
3385 
3386 	      /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3387 	         or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3388 		 flag_wrapv is set and the original anti-range doesn't include
3389 	         TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE.  */
3390 	      if (TYPE_OVERFLOW_WRAPS (type))
3391 		{
3392 		  tree type_min_value = TYPE_MIN_VALUE (type);
3393 
3394 		  min = (vr0.min != type_min_value
3395 			 ? int_const_binop (PLUS_EXPR, type_min_value,
3396 					    integer_one_node)
3397 			 : type_min_value);
3398 		}
3399 	      else
3400 		{
3401 		  if (overflow_infinity_range_p (&vr0))
3402 		    min = negative_overflow_infinity (type);
3403 		  else
3404 		    min = TYPE_MIN_VALUE (type);
3405 		}
3406 	    }
3407 	  else
3408 	    {
3409 	      /* All else has failed, so create the range [0, INF], even for
3410 	         flag_wrapv since TYPE_MIN_VALUE is in the original
3411 	         anti-range.  */
3412 	      vr0.type = VR_RANGE;
3413 	      min = build_int_cst (type, 0);
3414 	      if (needs_overflow_infinity (type))
3415 		{
3416 		  if (supports_overflow_infinity (type))
3417 		    max = positive_overflow_infinity (type);
3418 		  else
3419 		    {
3420 		      set_value_range_to_varying (vr);
3421 		      return;
3422 		    }
3423 		}
3424 	      else
3425 		max = TYPE_MAX_VALUE (type);
3426 	    }
3427 	}
3428 
3429       /* If the range contains zero then we know that the minimum value in the
3430          range will be zero.  */
3431       else if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3432 	{
3433 	  if (cmp == 1)
3434 	    max = min;
3435 	  min = build_int_cst (type, 0);
3436 	}
3437       else
3438 	{
3439           /* If the range was reversed, swap MIN and MAX.  */
3440 	  if (cmp == 1)
3441 	    {
3442 	      tree t = min;
3443 	      min = max;
3444 	      max = t;
3445 	    }
3446 	}
3447 
3448       cmp = compare_values (min, max);
3449       if (cmp == -2 || cmp == 1)
3450 	{
3451 	  /* If the new range has its limits swapped around (MIN > MAX),
3452 	     then the operation caused one of them to wrap around, mark
3453 	     the new range VARYING.  */
3454 	  set_value_range_to_varying (vr);
3455 	}
3456       else
3457 	set_value_range (vr, vr0.type, min, max, NULL);
3458       return;
3459     }
3460 
3461   /* For unhandled operations fall back to varying.  */
3462   set_value_range_to_varying (vr);
3463   return;
3464 }
3465 
3466 
3467 /* Extract range information from a unary expression CODE OP0 based on
3468    the range of its operand with resulting type TYPE.
3469    The resulting range is stored in *VR.  */
3470 
3471 static void
3472 extract_range_from_unary_expr (value_range_t *vr, enum tree_code code,
3473 			       tree type, tree op0)
3474 {
3475   value_range_t vr0 = VR_INITIALIZER;
3476 
3477   /* Get value ranges for the operand.  For constant operands, create
3478      a new value range with the operand to simplify processing.  */
3479   if (TREE_CODE (op0) == SSA_NAME)
3480     vr0 = *(get_value_range (op0));
3481   else if (is_gimple_min_invariant (op0))
3482     set_value_range_to_value (&vr0, op0, NULL);
3483   else
3484     set_value_range_to_varying (&vr0);
3485 
3486   extract_range_from_unary_expr_1 (vr, code, type, &vr0, TREE_TYPE (op0));
3487 }
3488 
3489 
3490 /* Extract range information from a conditional expression STMT based on
3491    the ranges of each of its operands and the expression code.  */
3492 
3493 static void
3494 extract_range_from_cond_expr (value_range_t *vr, gimple stmt)
3495 {
3496   tree op0, op1;
3497   value_range_t vr0 = VR_INITIALIZER;
3498   value_range_t vr1 = VR_INITIALIZER;
3499 
3500   /* Get value ranges for each operand.  For constant operands, create
3501      a new value range with the operand to simplify processing.  */
3502   op0 = gimple_assign_rhs2 (stmt);
3503   if (TREE_CODE (op0) == SSA_NAME)
3504     vr0 = *(get_value_range (op0));
3505   else if (is_gimple_min_invariant (op0))
3506     set_value_range_to_value (&vr0, op0, NULL);
3507   else
3508     set_value_range_to_varying (&vr0);
3509 
3510   op1 = gimple_assign_rhs3 (stmt);
3511   if (TREE_CODE (op1) == SSA_NAME)
3512     vr1 = *(get_value_range (op1));
3513   else if (is_gimple_min_invariant (op1))
3514     set_value_range_to_value (&vr1, op1, NULL);
3515   else
3516     set_value_range_to_varying (&vr1);
3517 
3518   /* The resulting value range is the union of the operand ranges */
3519   copy_value_range (vr, &vr0);
3520   vrp_meet (vr, &vr1);
3521 }
3522 
3523 
3524 /* Extract range information from a comparison expression EXPR based
3525    on the range of its operand and the expression code.  */
3526 
3527 static void
3528 extract_range_from_comparison (value_range_t *vr, enum tree_code code,
3529 			       tree type, tree op0, tree op1)
3530 {
3531   bool sop = false;
3532   tree val;
3533 
3534   val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3535   						 NULL);
3536 
3537   /* A disadvantage of using a special infinity as an overflow
3538      representation is that we lose the ability to record overflow
3539      when we don't have an infinity.  So we have to ignore a result
3540      which relies on overflow.  */
3541 
3542   if (val && !is_overflow_infinity (val) && !sop)
3543     {
3544       /* Since this expression was found on the RHS of an assignment,
3545 	 its type may be different from _Bool.  Convert VAL to EXPR's
3546 	 type.  */
3547       val = fold_convert (type, val);
3548       if (is_gimple_min_invariant (val))
3549 	set_value_range_to_value (vr, val, vr->equiv);
3550       else
3551 	set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3552     }
3553   else
3554     /* The result of a comparison is always true or false.  */
3555     set_value_range_to_truthvalue (vr, type);
3556 }
3557 
3558 /* Try to derive a nonnegative or nonzero range out of STMT relying
3559    primarily on generic routines in fold in conjunction with range data.
3560    Store the result in *VR */
3561 
3562 static void
3563 extract_range_basic (value_range_t *vr, gimple stmt)
3564 {
3565   bool sop = false;
3566   tree type = gimple_expr_type (stmt);
3567 
3568   /* If the call is __builtin_constant_p and the argument is a
3569      function parameter resolve it to false.  This avoids bogus
3570      array bound warnings.
3571      ???  We could do this as early as inlining is finished.  */
3572   if (gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P))
3573     {
3574       tree arg = gimple_call_arg (stmt, 0);
3575       if (TREE_CODE (arg) == SSA_NAME
3576 	  && SSA_NAME_IS_DEFAULT_DEF (arg)
3577 	  && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL)
3578 	set_value_range_to_null (vr, type);
3579     }
3580   else if (INTEGRAL_TYPE_P (type)
3581 	   && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
3582     set_value_range_to_nonnegative (vr, type,
3583 				    sop || stmt_overflow_infinity (stmt));
3584   else if (vrp_stmt_computes_nonzero (stmt, &sop)
3585 	   && !sop)
3586     set_value_range_to_nonnull (vr, type);
3587   else
3588     set_value_range_to_varying (vr);
3589 }
3590 
3591 
3592 /* Try to compute a useful range out of assignment STMT and store it
3593    in *VR.  */
3594 
3595 static void
3596 extract_range_from_assignment (value_range_t *vr, gimple stmt)
3597 {
3598   enum tree_code code = gimple_assign_rhs_code (stmt);
3599 
3600   if (code == ASSERT_EXPR)
3601     extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
3602   else if (code == SSA_NAME)
3603     extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
3604   else if (TREE_CODE_CLASS (code) == tcc_binary)
3605     extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
3606 				    gimple_expr_type (stmt),
3607 				    gimple_assign_rhs1 (stmt),
3608 				    gimple_assign_rhs2 (stmt));
3609   else if (TREE_CODE_CLASS (code) == tcc_unary)
3610     extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
3611 				   gimple_expr_type (stmt),
3612 				   gimple_assign_rhs1 (stmt));
3613   else if (code == COND_EXPR)
3614     extract_range_from_cond_expr (vr, stmt);
3615   else if (TREE_CODE_CLASS (code) == tcc_comparison)
3616     extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
3617 				   gimple_expr_type (stmt),
3618 				   gimple_assign_rhs1 (stmt),
3619 				   gimple_assign_rhs2 (stmt));
3620   else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
3621 	   && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
3622     set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
3623   else
3624     set_value_range_to_varying (vr);
3625 
3626   if (vr->type == VR_VARYING)
3627     extract_range_basic (vr, stmt);
3628 }
3629 
3630 /* Given a range VR, a LOOP and a variable VAR, determine whether it
3631    would be profitable to adjust VR using scalar evolution information
3632    for VAR.  If so, update VR with the new limits.  */
3633 
3634 static void
3635 adjust_range_with_scev (value_range_t *vr, struct loop *loop,
3636 			gimple stmt, tree var)
3637 {
3638   tree init, step, chrec, tmin, tmax, min, max, type, tem;
3639   enum ev_direction dir;
3640 
3641   /* TODO.  Don't adjust anti-ranges.  An anti-range may provide
3642      better opportunities than a regular range, but I'm not sure.  */
3643   if (vr->type == VR_ANTI_RANGE)
3644     return;
3645 
3646   chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
3647 
3648   /* Like in PR19590, scev can return a constant function.  */
3649   if (is_gimple_min_invariant (chrec))
3650     {
3651       set_value_range_to_value (vr, chrec, vr->equiv);
3652       return;
3653     }
3654 
3655   if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3656     return;
3657 
3658   init = initial_condition_in_loop_num (chrec, loop->num);
3659   tem = op_with_constant_singleton_value_range (init);
3660   if (tem)
3661     init = tem;
3662   step = evolution_part_in_loop_num (chrec, loop->num);
3663   tem = op_with_constant_singleton_value_range (step);
3664   if (tem)
3665     step = tem;
3666 
3667   /* If STEP is symbolic, we can't know whether INIT will be the
3668      minimum or maximum value in the range.  Also, unless INIT is
3669      a simple expression, compare_values and possibly other functions
3670      in tree-vrp won't be able to handle it.  */
3671   if (step == NULL_TREE
3672       || !is_gimple_min_invariant (step)
3673       || !valid_value_p (init))
3674     return;
3675 
3676   dir = scev_direction (chrec);
3677   if (/* Do not adjust ranges if we do not know whether the iv increases
3678 	 or decreases,  ... */
3679       dir == EV_DIR_UNKNOWN
3680       /* ... or if it may wrap.  */
3681       || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3682 				true))
3683     return;
3684 
3685   /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
3686      negative_overflow_infinity and positive_overflow_infinity,
3687      because we have concluded that the loop probably does not
3688      wrap.  */
3689 
3690   type = TREE_TYPE (var);
3691   if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
3692     tmin = lower_bound_in_type (type, type);
3693   else
3694     tmin = TYPE_MIN_VALUE (type);
3695   if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
3696     tmax = upper_bound_in_type (type, type);
3697   else
3698     tmax = TYPE_MAX_VALUE (type);
3699 
3700   /* Try to use estimated number of iterations for the loop to constrain the
3701      final value in the evolution.  */
3702   if (TREE_CODE (step) == INTEGER_CST
3703       && is_gimple_val (init)
3704       && (TREE_CODE (init) != SSA_NAME
3705 	  || get_value_range (init)->type == VR_RANGE))
3706     {
3707       double_int nit;
3708 
3709       /* We are only entering here for loop header PHI nodes, so using
3710 	 the number of latch executions is the correct thing to use.  */
3711       if (max_loop_iterations (loop, &nit))
3712 	{
3713 	  value_range_t maxvr = VR_INITIALIZER;
3714 	  double_int dtmp;
3715 	  bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (step));
3716 	  bool overflow = false;
3717 
3718 	  dtmp = tree_to_double_int (step)
3719 		 .mul_with_sign (nit, unsigned_p, &overflow);
3720 	  /* If the multiplication overflowed we can't do a meaningful
3721 	     adjustment.  Likewise if the result doesn't fit in the type
3722 	     of the induction variable.  For a signed type we have to
3723 	     check whether the result has the expected signedness which
3724 	     is that of the step as number of iterations is unsigned.  */
3725 	  if (!overflow
3726 	      && double_int_fits_to_tree_p (TREE_TYPE (init), dtmp)
3727 	      && (unsigned_p
3728 		  || ((dtmp.high ^ TREE_INT_CST_HIGH (step)) >= 0)))
3729 	    {
3730 	      tem = double_int_to_tree (TREE_TYPE (init), dtmp);
3731 	      extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
3732 					      TREE_TYPE (init), init, tem);
3733 	      /* Likewise if the addition did.  */
3734 	      if (maxvr.type == VR_RANGE)
3735 		{
3736 		  tmin = maxvr.min;
3737 		  tmax = maxvr.max;
3738 		}
3739 	    }
3740 	}
3741     }
3742 
3743   if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3744     {
3745       min = tmin;
3746       max = tmax;
3747 
3748       /* For VARYING or UNDEFINED ranges, just about anything we get
3749 	 from scalar evolutions should be better.  */
3750 
3751       if (dir == EV_DIR_DECREASES)
3752 	max = init;
3753       else
3754 	min = init;
3755 
3756       /* If we would create an invalid range, then just assume we
3757 	 know absolutely nothing.  This may be over-conservative,
3758 	 but it's clearly safe, and should happen only in unreachable
3759          parts of code, or for invalid programs.  */
3760       if (compare_values (min, max) == 1)
3761 	return;
3762 
3763       set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3764     }
3765   else if (vr->type == VR_RANGE)
3766     {
3767       min = vr->min;
3768       max = vr->max;
3769 
3770       if (dir == EV_DIR_DECREASES)
3771 	{
3772 	  /* INIT is the maximum value.  If INIT is lower than VR->MAX
3773 	     but no smaller than VR->MIN, set VR->MAX to INIT.  */
3774 	  if (compare_values (init, max) == -1)
3775 	    max = init;
3776 
3777 	  /* According to the loop information, the variable does not
3778 	     overflow.  If we think it does, probably because of an
3779 	     overflow due to arithmetic on a different INF value,
3780 	     reset now.  */
3781 	  if (is_negative_overflow_infinity (min)
3782 	      || compare_values (min, tmin) == -1)
3783 	    min = tmin;
3784 
3785 	}
3786       else
3787 	{
3788 	  /* If INIT is bigger than VR->MIN, set VR->MIN to INIT.  */
3789 	  if (compare_values (init, min) == 1)
3790 	    min = init;
3791 
3792 	  if (is_positive_overflow_infinity (max)
3793 	      || compare_values (tmax, max) == -1)
3794 	    max = tmax;
3795 	}
3796 
3797       /* If we just created an invalid range with the minimum
3798 	 greater than the maximum, we fail conservatively.
3799 	 This should happen only in unreachable
3800 	 parts of code, or for invalid programs.  */
3801       if (compare_values (min, max) == 1)
3802 	return;
3803 
3804       set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3805     }
3806 }
3807 
3808 /* Return true if VAR may overflow at STMT.  This checks any available
3809    loop information to see if we can determine that VAR does not
3810    overflow.  */
3811 
3812 static bool
3813 vrp_var_may_overflow (tree var, gimple stmt)
3814 {
3815   struct loop *l;
3816   tree chrec, init, step;
3817 
3818   if (current_loops == NULL)
3819     return true;
3820 
3821   l = loop_containing_stmt (stmt);
3822   if (l == NULL
3823       || !loop_outer (l))
3824     return true;
3825 
3826   chrec = instantiate_parameters (l, analyze_scalar_evolution (l, var));
3827   if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3828     return true;
3829 
3830   init = initial_condition_in_loop_num (chrec, l->num);
3831   step = evolution_part_in_loop_num (chrec, l->num);
3832 
3833   if (step == NULL_TREE
3834       || !is_gimple_min_invariant (step)
3835       || !valid_value_p (init))
3836     return true;
3837 
3838   /* If we get here, we know something useful about VAR based on the
3839      loop information.  If it wraps, it may overflow.  */
3840 
3841   if (scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3842 			     true))
3843     return true;
3844 
3845   if (dump_file && (dump_flags & TDF_DETAILS) != 0)
3846     {
3847       print_generic_expr (dump_file, var, 0);
3848       fprintf (dump_file, ": loop information indicates does not overflow\n");
3849     }
3850 
3851   return false;
3852 }
3853 
3854 
3855 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
3856 
3857    - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
3858      all the values in the ranges.
3859 
3860    - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
3861 
3862    - Return NULL_TREE if it is not always possible to determine the
3863      value of the comparison.
3864 
3865    Also set *STRICT_OVERFLOW_P to indicate whether a range with an
3866    overflow infinity was used in the test.  */
3867 
3868 
3869 static tree
3870 compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1,
3871 		bool *strict_overflow_p)
3872 {
3873   /* VARYING or UNDEFINED ranges cannot be compared.  */
3874   if (vr0->type == VR_VARYING
3875       || vr0->type == VR_UNDEFINED
3876       || vr1->type == VR_VARYING
3877       || vr1->type == VR_UNDEFINED)
3878     return NULL_TREE;
3879 
3880   /* Anti-ranges need to be handled separately.  */
3881   if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
3882     {
3883       /* If both are anti-ranges, then we cannot compute any
3884 	 comparison.  */
3885       if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
3886 	return NULL_TREE;
3887 
3888       /* These comparisons are never statically computable.  */
3889       if (comp == GT_EXPR
3890 	  || comp == GE_EXPR
3891 	  || comp == LT_EXPR
3892 	  || comp == LE_EXPR)
3893 	return NULL_TREE;
3894 
3895       /* Equality can be computed only between a range and an
3896 	 anti-range.  ~[VAL1, VAL2] == [VAL1, VAL2] is always false.  */
3897       if (vr0->type == VR_RANGE)
3898 	{
3899 	  /* To simplify processing, make VR0 the anti-range.  */
3900 	  value_range_t *tmp = vr0;
3901 	  vr0 = vr1;
3902 	  vr1 = tmp;
3903 	}
3904 
3905       gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
3906 
3907       if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
3908 	  && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
3909 	return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3910 
3911       return NULL_TREE;
3912     }
3913 
3914   if (!usable_range_p (vr0, strict_overflow_p)
3915       || !usable_range_p (vr1, strict_overflow_p))
3916     return NULL_TREE;
3917 
3918   /* Simplify processing.  If COMP is GT_EXPR or GE_EXPR, switch the
3919      operands around and change the comparison code.  */
3920   if (comp == GT_EXPR || comp == GE_EXPR)
3921     {
3922       value_range_t *tmp;
3923       comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
3924       tmp = vr0;
3925       vr0 = vr1;
3926       vr1 = tmp;
3927     }
3928 
3929   if (comp == EQ_EXPR)
3930     {
3931       /* Equality may only be computed if both ranges represent
3932 	 exactly one value.  */
3933       if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
3934 	  && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
3935 	{
3936 	  int cmp_min = compare_values_warnv (vr0->min, vr1->min,
3937 					      strict_overflow_p);
3938 	  int cmp_max = compare_values_warnv (vr0->max, vr1->max,
3939 					      strict_overflow_p);
3940 	  if (cmp_min == 0 && cmp_max == 0)
3941 	    return boolean_true_node;
3942 	  else if (cmp_min != -2 && cmp_max != -2)
3943 	    return boolean_false_node;
3944 	}
3945       /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1.  */
3946       else if (compare_values_warnv (vr0->min, vr1->max,
3947 				     strict_overflow_p) == 1
3948 	       || compare_values_warnv (vr1->min, vr0->max,
3949 					strict_overflow_p) == 1)
3950 	return boolean_false_node;
3951 
3952       return NULL_TREE;
3953     }
3954   else if (comp == NE_EXPR)
3955     {
3956       int cmp1, cmp2;
3957 
3958       /* If VR0 is completely to the left or completely to the right
3959 	 of VR1, they are always different.  Notice that we need to
3960 	 make sure that both comparisons yield similar results to
3961 	 avoid comparing values that cannot be compared at
3962 	 compile-time.  */
3963       cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3964       cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3965       if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
3966 	return boolean_true_node;
3967 
3968       /* If VR0 and VR1 represent a single value and are identical,
3969 	 return false.  */
3970       else if (compare_values_warnv (vr0->min, vr0->max,
3971 				     strict_overflow_p) == 0
3972 	       && compare_values_warnv (vr1->min, vr1->max,
3973 					strict_overflow_p) == 0
3974 	       && compare_values_warnv (vr0->min, vr1->min,
3975 					strict_overflow_p) == 0
3976 	       && compare_values_warnv (vr0->max, vr1->max,
3977 					strict_overflow_p) == 0)
3978 	return boolean_false_node;
3979 
3980       /* Otherwise, they may or may not be different.  */
3981       else
3982 	return NULL_TREE;
3983     }
3984   else if (comp == LT_EXPR || comp == LE_EXPR)
3985     {
3986       int tst;
3987 
3988       /* If VR0 is to the left of VR1, return true.  */
3989       tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3990       if ((comp == LT_EXPR && tst == -1)
3991 	  || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3992 	{
3993 	  if (overflow_infinity_range_p (vr0)
3994 	      || overflow_infinity_range_p (vr1))
3995 	    *strict_overflow_p = true;
3996 	  return boolean_true_node;
3997 	}
3998 
3999       /* If VR0 is to the right of VR1, return false.  */
4000       tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4001       if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4002 	  || (comp == LE_EXPR && tst == 1))
4003 	{
4004 	  if (overflow_infinity_range_p (vr0)
4005 	      || overflow_infinity_range_p (vr1))
4006 	    *strict_overflow_p = true;
4007 	  return boolean_false_node;
4008 	}
4009 
4010       /* Otherwise, we don't know.  */
4011       return NULL_TREE;
4012     }
4013 
4014   gcc_unreachable ();
4015 }
4016 
4017 
4018 /* Given a value range VR, a value VAL and a comparison code COMP, return
4019    BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
4020    values in VR.  Return BOOLEAN_FALSE_NODE if the comparison
4021    always returns false.  Return NULL_TREE if it is not always
4022    possible to determine the value of the comparison.  Also set
4023    *STRICT_OVERFLOW_P to indicate whether a range with an overflow
4024    infinity was used in the test.  */
4025 
4026 static tree
4027 compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val,
4028 			  bool *strict_overflow_p)
4029 {
4030   if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
4031     return NULL_TREE;
4032 
4033   /* Anti-ranges need to be handled separately.  */
4034   if (vr->type == VR_ANTI_RANGE)
4035     {
4036       /* For anti-ranges, the only predicates that we can compute at
4037 	 compile time are equality and inequality.  */
4038       if (comp == GT_EXPR
4039 	  || comp == GE_EXPR
4040 	  || comp == LT_EXPR
4041 	  || comp == LE_EXPR)
4042 	return NULL_TREE;
4043 
4044       /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2.  */
4045       if (value_inside_range (val, vr->min, vr->max) == 1)
4046 	return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4047 
4048       return NULL_TREE;
4049     }
4050 
4051   if (!usable_range_p (vr, strict_overflow_p))
4052     return NULL_TREE;
4053 
4054   if (comp == EQ_EXPR)
4055     {
4056       /* EQ_EXPR may only be computed if VR represents exactly
4057 	 one value.  */
4058       if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
4059 	{
4060 	  int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
4061 	  if (cmp == 0)
4062 	    return boolean_true_node;
4063 	  else if (cmp == -1 || cmp == 1 || cmp == 2)
4064 	    return boolean_false_node;
4065 	}
4066       else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
4067 	       || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
4068 	return boolean_false_node;
4069 
4070       return NULL_TREE;
4071     }
4072   else if (comp == NE_EXPR)
4073     {
4074       /* If VAL is not inside VR, then they are always different.  */
4075       if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
4076 	  || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
4077 	return boolean_true_node;
4078 
4079       /* If VR represents exactly one value equal to VAL, then return
4080 	 false.  */
4081       if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
4082 	  && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
4083 	return boolean_false_node;
4084 
4085       /* Otherwise, they may or may not be different.  */
4086       return NULL_TREE;
4087     }
4088   else if (comp == LT_EXPR || comp == LE_EXPR)
4089     {
4090       int tst;
4091 
4092       /* If VR is to the left of VAL, return true.  */
4093       tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4094       if ((comp == LT_EXPR && tst == -1)
4095 	  || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4096 	{
4097 	  if (overflow_infinity_range_p (vr))
4098 	    *strict_overflow_p = true;
4099 	  return boolean_true_node;
4100 	}
4101 
4102       /* If VR is to the right of VAL, return false.  */
4103       tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4104       if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4105 	  || (comp == LE_EXPR && tst == 1))
4106 	{
4107 	  if (overflow_infinity_range_p (vr))
4108 	    *strict_overflow_p = true;
4109 	  return boolean_false_node;
4110 	}
4111 
4112       /* Otherwise, we don't know.  */
4113       return NULL_TREE;
4114     }
4115   else if (comp == GT_EXPR || comp == GE_EXPR)
4116     {
4117       int tst;
4118 
4119       /* If VR is to the right of VAL, return true.  */
4120       tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4121       if ((comp == GT_EXPR && tst == 1)
4122 	  || (comp == GE_EXPR && (tst == 0 || tst == 1)))
4123 	{
4124 	  if (overflow_infinity_range_p (vr))
4125 	    *strict_overflow_p = true;
4126 	  return boolean_true_node;
4127 	}
4128 
4129       /* If VR is to the left of VAL, return false.  */
4130       tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4131       if ((comp == GT_EXPR && (tst == -1 || tst == 0))
4132 	  || (comp == GE_EXPR && tst == -1))
4133 	{
4134 	  if (overflow_infinity_range_p (vr))
4135 	    *strict_overflow_p = true;
4136 	  return boolean_false_node;
4137 	}
4138 
4139       /* Otherwise, we don't know.  */
4140       return NULL_TREE;
4141     }
4142 
4143   gcc_unreachable ();
4144 }
4145 
4146 
4147 /* Debugging dumps.  */
4148 
4149 void dump_value_range (FILE *, value_range_t *);
4150 void debug_value_range (value_range_t *);
4151 void dump_all_value_ranges (FILE *);
4152 void debug_all_value_ranges (void);
4153 void dump_vr_equiv (FILE *, bitmap);
4154 void debug_vr_equiv (bitmap);
4155 
4156 
4157 /* Dump value range VR to FILE.  */
4158 
4159 void
4160 dump_value_range (FILE *file, value_range_t *vr)
4161 {
4162   if (vr == NULL)
4163     fprintf (file, "[]");
4164   else if (vr->type == VR_UNDEFINED)
4165     fprintf (file, "UNDEFINED");
4166   else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4167     {
4168       tree type = TREE_TYPE (vr->min);
4169 
4170       fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
4171 
4172       if (is_negative_overflow_infinity (vr->min))
4173 	fprintf (file, "-INF(OVF)");
4174       else if (INTEGRAL_TYPE_P (type)
4175 	       && !TYPE_UNSIGNED (type)
4176 	       && vrp_val_is_min (vr->min))
4177 	fprintf (file, "-INF");
4178       else
4179 	print_generic_expr (file, vr->min, 0);
4180 
4181       fprintf (file, ", ");
4182 
4183       if (is_positive_overflow_infinity (vr->max))
4184 	fprintf (file, "+INF(OVF)");
4185       else if (INTEGRAL_TYPE_P (type)
4186 	       && vrp_val_is_max (vr->max))
4187 	fprintf (file, "+INF");
4188       else
4189 	print_generic_expr (file, vr->max, 0);
4190 
4191       fprintf (file, "]");
4192 
4193       if (vr->equiv)
4194 	{
4195 	  bitmap_iterator bi;
4196 	  unsigned i, c = 0;
4197 
4198 	  fprintf (file, "  EQUIVALENCES: { ");
4199 
4200 	  EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
4201 	    {
4202 	      print_generic_expr (file, ssa_name (i), 0);
4203 	      fprintf (file, " ");
4204 	      c++;
4205 	    }
4206 
4207 	  fprintf (file, "} (%u elements)", c);
4208 	}
4209     }
4210   else if (vr->type == VR_VARYING)
4211     fprintf (file, "VARYING");
4212   else
4213     fprintf (file, "INVALID RANGE");
4214 }
4215 
4216 
4217 /* Dump value range VR to stderr.  */
4218 
4219 DEBUG_FUNCTION void
4220 debug_value_range (value_range_t *vr)
4221 {
4222   dump_value_range (stderr, vr);
4223   fprintf (stderr, "\n");
4224 }
4225 
4226 
4227 /* Dump value ranges of all SSA_NAMEs to FILE.  */
4228 
4229 void
4230 dump_all_value_ranges (FILE *file)
4231 {
4232   size_t i;
4233 
4234   for (i = 0; i < num_vr_values; i++)
4235     {
4236       if (vr_value[i])
4237 	{
4238 	  print_generic_expr (file, ssa_name (i), 0);
4239 	  fprintf (file, ": ");
4240 	  dump_value_range (file, vr_value[i]);
4241 	  fprintf (file, "\n");
4242 	}
4243     }
4244 
4245   fprintf (file, "\n");
4246 }
4247 
4248 
4249 /* Dump all value ranges to stderr.  */
4250 
4251 DEBUG_FUNCTION void
4252 debug_all_value_ranges (void)
4253 {
4254   dump_all_value_ranges (stderr);
4255 }
4256 
4257 
4258 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
4259    create a new SSA name N and return the assertion assignment
4260    'V = ASSERT_EXPR <V, V OP W>'.  */
4261 
4262 static gimple
4263 build_assert_expr_for (tree cond, tree v)
4264 {
4265   tree a;
4266   gimple assertion;
4267 
4268   gcc_assert (TREE_CODE (v) == SSA_NAME
4269 	      && COMPARISON_CLASS_P (cond));
4270 
4271   a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
4272   assertion = gimple_build_assign (NULL_TREE, a);
4273 
4274   /* The new ASSERT_EXPR, creates a new SSA name that replaces the
4275      operand of the ASSERT_EXPR.  Create it so the new name and the old one
4276      are registered in the replacement table so that we can fix the SSA web
4277      after adding all the ASSERT_EXPRs.  */
4278   create_new_def_for (v, assertion, NULL);
4279 
4280   return assertion;
4281 }
4282 
4283 
4284 /* Return false if EXPR is a predicate expression involving floating
4285    point values.  */
4286 
4287 static inline bool
4288 fp_predicate (gimple stmt)
4289 {
4290   GIMPLE_CHECK (stmt, GIMPLE_COND);
4291 
4292   return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
4293 }
4294 
4295 
4296 /* If the range of values taken by OP can be inferred after STMT executes,
4297    return the comparison code (COMP_CODE_P) and value (VAL_P) that
4298    describes the inferred range.  Return true if a range could be
4299    inferred.  */
4300 
4301 static bool
4302 infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p)
4303 {
4304   *val_p = NULL_TREE;
4305   *comp_code_p = ERROR_MARK;
4306 
4307   /* Do not attempt to infer anything in names that flow through
4308      abnormal edges.  */
4309   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
4310     return false;
4311 
4312   /* Similarly, don't infer anything from statements that may throw
4313      exceptions.  */
4314   if (stmt_could_throw_p (stmt))
4315     return false;
4316 
4317   /* If STMT is the last statement of a basic block with no
4318      successors, there is no point inferring anything about any of its
4319      operands.  We would not be able to find a proper insertion point
4320      for the assertion, anyway.  */
4321   if (stmt_ends_bb_p (stmt) && EDGE_COUNT (gimple_bb (stmt)->succs) == 0)
4322     return false;
4323 
4324   /* We can only assume that a pointer dereference will yield
4325      non-NULL if -fdelete-null-pointer-checks is enabled.  */
4326   if (flag_delete_null_pointer_checks
4327       && POINTER_TYPE_P (TREE_TYPE (op))
4328       && gimple_code (stmt) != GIMPLE_ASM)
4329     {
4330       unsigned num_uses, num_loads, num_stores;
4331 
4332       count_uses_and_derefs (op, stmt, &num_uses, &num_loads, &num_stores);
4333       if (num_loads + num_stores > 0)
4334 	{
4335 	  *val_p = build_int_cst (TREE_TYPE (op), 0);
4336 	  *comp_code_p = NE_EXPR;
4337 	  return true;
4338 	}
4339     }
4340 
4341   return false;
4342 }
4343 
4344 
4345 void dump_asserts_for (FILE *, tree);
4346 void debug_asserts_for (tree);
4347 void dump_all_asserts (FILE *);
4348 void debug_all_asserts (void);
4349 
4350 /* Dump all the registered assertions for NAME to FILE.  */
4351 
4352 void
4353 dump_asserts_for (FILE *file, tree name)
4354 {
4355   assert_locus_t loc;
4356 
4357   fprintf (file, "Assertions to be inserted for ");
4358   print_generic_expr (file, name, 0);
4359   fprintf (file, "\n");
4360 
4361   loc = asserts_for[SSA_NAME_VERSION (name)];
4362   while (loc)
4363     {
4364       fprintf (file, "\t");
4365       print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
4366       fprintf (file, "\n\tBB #%d", loc->bb->index);
4367       if (loc->e)
4368 	{
4369 	  fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
4370 	           loc->e->dest->index);
4371 	  dump_edge_info (file, loc->e, dump_flags, 0);
4372 	}
4373       fprintf (file, "\n\tPREDICATE: ");
4374       print_generic_expr (file, name, 0);
4375       fprintf (file, " %s ", tree_code_name[(int)loc->comp_code]);
4376       print_generic_expr (file, loc->val, 0);
4377       fprintf (file, "\n\n");
4378       loc = loc->next;
4379     }
4380 
4381   fprintf (file, "\n");
4382 }
4383 
4384 
4385 /* Dump all the registered assertions for NAME to stderr.  */
4386 
4387 DEBUG_FUNCTION void
4388 debug_asserts_for (tree name)
4389 {
4390   dump_asserts_for (stderr, name);
4391 }
4392 
4393 
4394 /* Dump all the registered assertions for all the names to FILE.  */
4395 
4396 void
4397 dump_all_asserts (FILE *file)
4398 {
4399   unsigned i;
4400   bitmap_iterator bi;
4401 
4402   fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
4403   EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4404     dump_asserts_for (file, ssa_name (i));
4405   fprintf (file, "\n");
4406 }
4407 
4408 
4409 /* Dump all the registered assertions for all the names to stderr.  */
4410 
4411 DEBUG_FUNCTION void
4412 debug_all_asserts (void)
4413 {
4414   dump_all_asserts (stderr);
4415 }
4416 
4417 
4418 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
4419    'EXPR COMP_CODE VAL' at a location that dominates block BB or
4420    E->DEST, then register this location as a possible insertion point
4421    for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
4422 
4423    BB, E and SI provide the exact insertion point for the new
4424    ASSERT_EXPR.  If BB is NULL, then the ASSERT_EXPR is to be inserted
4425    on edge E.  Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
4426    BB.  If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
4427    must not be NULL.  */
4428 
4429 static void
4430 register_new_assert_for (tree name, tree expr,
4431 			 enum tree_code comp_code,
4432 			 tree val,
4433 			 basic_block bb,
4434 			 edge e,
4435 			 gimple_stmt_iterator si)
4436 {
4437   assert_locus_t n, loc, last_loc;
4438   basic_block dest_bb;
4439 
4440   gcc_checking_assert (bb == NULL || e == NULL);
4441 
4442   if (e == NULL)
4443     gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
4444 			 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
4445 
4446   /* Never build an assert comparing against an integer constant with
4447      TREE_OVERFLOW set.  This confuses our undefined overflow warning
4448      machinery.  */
4449   if (TREE_CODE (val) == INTEGER_CST
4450       && TREE_OVERFLOW (val))
4451     val = build_int_cst_wide (TREE_TYPE (val),
4452 			      TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val));
4453 
4454   /* The new assertion A will be inserted at BB or E.  We need to
4455      determine if the new location is dominated by a previously
4456      registered location for A.  If we are doing an edge insertion,
4457      assume that A will be inserted at E->DEST.  Note that this is not
4458      necessarily true.
4459 
4460      If E is a critical edge, it will be split.  But even if E is
4461      split, the new block will dominate the same set of blocks that
4462      E->DEST dominates.
4463 
4464      The reverse, however, is not true, blocks dominated by E->DEST
4465      will not be dominated by the new block created to split E.  So,
4466      if the insertion location is on a critical edge, we will not use
4467      the new location to move another assertion previously registered
4468      at a block dominated by E->DEST.  */
4469   dest_bb = (bb) ? bb : e->dest;
4470 
4471   /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
4472      VAL at a block dominating DEST_BB, then we don't need to insert a new
4473      one.  Similarly, if the same assertion already exists at a block
4474      dominated by DEST_BB and the new location is not on a critical
4475      edge, then update the existing location for the assertion (i.e.,
4476      move the assertion up in the dominance tree).
4477 
4478      Note, this is implemented as a simple linked list because there
4479      should not be more than a handful of assertions registered per
4480      name.  If this becomes a performance problem, a table hashed by
4481      COMP_CODE and VAL could be implemented.  */
4482   loc = asserts_for[SSA_NAME_VERSION (name)];
4483   last_loc = loc;
4484   while (loc)
4485     {
4486       if (loc->comp_code == comp_code
4487 	  && (loc->val == val
4488 	      || operand_equal_p (loc->val, val, 0))
4489 	  && (loc->expr == expr
4490 	      || operand_equal_p (loc->expr, expr, 0)))
4491 	{
4492 	  /* If E is not a critical edge and DEST_BB
4493 	     dominates the existing location for the assertion, move
4494 	     the assertion up in the dominance tree by updating its
4495 	     location information.  */
4496 	  if ((e == NULL || !EDGE_CRITICAL_P (e))
4497 	      && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
4498 	    {
4499 	      loc->bb = dest_bb;
4500 	      loc->e = e;
4501 	      loc->si = si;
4502 	      return;
4503 	    }
4504 	}
4505 
4506       /* Update the last node of the list and move to the next one.  */
4507       last_loc = loc;
4508       loc = loc->next;
4509     }
4510 
4511   /* If we didn't find an assertion already registered for
4512      NAME COMP_CODE VAL, add a new one at the end of the list of
4513      assertions associated with NAME.  */
4514   n = XNEW (struct assert_locus_d);
4515   n->bb = dest_bb;
4516   n->e = e;
4517   n->si = si;
4518   n->comp_code = comp_code;
4519   n->val = val;
4520   n->expr = expr;
4521   n->next = NULL;
4522 
4523   if (last_loc)
4524     last_loc->next = n;
4525   else
4526     asserts_for[SSA_NAME_VERSION (name)] = n;
4527 
4528   bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
4529 }
4530 
4531 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
4532    Extract a suitable test code and value and store them into *CODE_P and
4533    *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
4534 
4535    If no extraction was possible, return FALSE, otherwise return TRUE.
4536 
4537    If INVERT is true, then we invert the result stored into *CODE_P.  */
4538 
4539 static bool
4540 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
4541 					 tree cond_op0, tree cond_op1,
4542 					 bool invert, enum tree_code *code_p,
4543 					 tree *val_p)
4544 {
4545   enum tree_code comp_code;
4546   tree val;
4547 
4548   /* Otherwise, we have a comparison of the form NAME COMP VAL
4549      or VAL COMP NAME.  */
4550   if (name == cond_op1)
4551     {
4552       /* If the predicate is of the form VAL COMP NAME, flip
4553 	 COMP around because we need to register NAME as the
4554 	 first operand in the predicate.  */
4555       comp_code = swap_tree_comparison (cond_code);
4556       val = cond_op0;
4557     }
4558   else
4559     {
4560       /* The comparison is of the form NAME COMP VAL, so the
4561 	 comparison code remains unchanged.  */
4562       comp_code = cond_code;
4563       val = cond_op1;
4564     }
4565 
4566   /* Invert the comparison code as necessary.  */
4567   if (invert)
4568     comp_code = invert_tree_comparison (comp_code, 0);
4569 
4570   /* VRP does not handle float types.  */
4571   if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val)))
4572     return false;
4573 
4574   /* Do not register always-false predicates.
4575      FIXME:  this works around a limitation in fold() when dealing with
4576      enumerations.  Given 'enum { N1, N2 } x;', fold will not
4577      fold 'if (x > N2)' to 'if (0)'.  */
4578   if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
4579       && INTEGRAL_TYPE_P (TREE_TYPE (val)))
4580     {
4581       tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
4582       tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
4583 
4584       if (comp_code == GT_EXPR
4585 	  && (!max
4586 	      || compare_values (val, max) == 0))
4587 	return false;
4588 
4589       if (comp_code == LT_EXPR
4590 	  && (!min
4591 	      || compare_values (val, min) == 0))
4592 	return false;
4593     }
4594   *code_p = comp_code;
4595   *val_p = val;
4596   return true;
4597 }
4598 
4599 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
4600    (otherwise return VAL).  VAL and MASK must be zero-extended for
4601    precision PREC.  If SGNBIT is non-zero, first xor VAL with SGNBIT
4602    (to transform signed values into unsigned) and at the end xor
4603    SGNBIT back.  */
4604 
4605 static double_int
4606 masked_increment (double_int val, double_int mask, double_int sgnbit,
4607 		  unsigned int prec)
4608 {
4609   double_int bit = double_int_one, res;
4610   unsigned int i;
4611 
4612   val ^= sgnbit;
4613   for (i = 0; i < prec; i++, bit += bit)
4614     {
4615       res = mask;
4616       if ((res & bit).is_zero ())
4617 	continue;
4618       res = bit - double_int_one;
4619       res = (val + bit).and_not (res);
4620       res &= mask;
4621       if (res.ugt (val))
4622 	return res ^ sgnbit;
4623     }
4624   return val ^ sgnbit;
4625 }
4626 
4627 /* Try to register an edge assertion for SSA name NAME on edge E for
4628    the condition COND contributing to the conditional jump pointed to by BSI.
4629    Invert the condition COND if INVERT is true.
4630    Return true if an assertion for NAME could be registered.  */
4631 
4632 static bool
4633 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
4634 			    enum tree_code cond_code,
4635 			    tree cond_op0, tree cond_op1, bool invert)
4636 {
4637   tree val;
4638   enum tree_code comp_code;
4639   bool retval = false;
4640 
4641   if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4642 						cond_op0,
4643 						cond_op1,
4644 						invert, &comp_code, &val))
4645     return false;
4646 
4647   /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
4648      reachable from E.  */
4649   if (live_on_edge (e, name)
4650       && !has_single_use (name))
4651     {
4652       register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
4653       retval = true;
4654     }
4655 
4656   /* In the case of NAME <= CST and NAME being defined as
4657      NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
4658      and NAME2 <= CST - CST2.  We can do the same for NAME > CST.
4659      This catches range and anti-range tests.  */
4660   if ((comp_code == LE_EXPR
4661        || comp_code == GT_EXPR)
4662       && TREE_CODE (val) == INTEGER_CST
4663       && TYPE_UNSIGNED (TREE_TYPE (val)))
4664     {
4665       gimple def_stmt = SSA_NAME_DEF_STMT (name);
4666       tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
4667 
4668       /* Extract CST2 from the (optional) addition.  */
4669       if (is_gimple_assign (def_stmt)
4670 	  && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
4671 	{
4672 	  name2 = gimple_assign_rhs1 (def_stmt);
4673 	  cst2 = gimple_assign_rhs2 (def_stmt);
4674 	  if (TREE_CODE (name2) == SSA_NAME
4675 	      && TREE_CODE (cst2) == INTEGER_CST)
4676 	    def_stmt = SSA_NAME_DEF_STMT (name2);
4677 	}
4678 
4679       /* Extract NAME2 from the (optional) sign-changing cast.  */
4680       if (gimple_assign_cast_p (def_stmt))
4681 	{
4682 	  if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
4683 	      && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
4684 	      && (TYPE_PRECISION (gimple_expr_type (def_stmt))
4685 		  == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
4686 	    name3 = gimple_assign_rhs1 (def_stmt);
4687 	}
4688 
4689       /* If name3 is used later, create an ASSERT_EXPR for it.  */
4690       if (name3 != NULL_TREE
4691       	  && TREE_CODE (name3) == SSA_NAME
4692 	  && (cst2 == NULL_TREE
4693 	      || TREE_CODE (cst2) == INTEGER_CST)
4694 	  && INTEGRAL_TYPE_P (TREE_TYPE (name3))
4695 	  && live_on_edge (e, name3)
4696 	  && !has_single_use (name3))
4697 	{
4698 	  tree tmp;
4699 
4700 	  /* Build an expression for the range test.  */
4701 	  tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
4702 	  if (cst2 != NULL_TREE)
4703 	    tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4704 
4705 	  if (dump_file)
4706 	    {
4707 	      fprintf (dump_file, "Adding assert for ");
4708 	      print_generic_expr (dump_file, name3, 0);
4709 	      fprintf (dump_file, " from ");
4710 	      print_generic_expr (dump_file, tmp, 0);
4711 	      fprintf (dump_file, "\n");
4712 	    }
4713 
4714 	  register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
4715 
4716 	  retval = true;
4717 	}
4718 
4719       /* If name2 is used later, create an ASSERT_EXPR for it.  */
4720       if (name2 != NULL_TREE
4721       	  && TREE_CODE (name2) == SSA_NAME
4722 	  && TREE_CODE (cst2) == INTEGER_CST
4723 	  && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4724 	  && live_on_edge (e, name2)
4725 	  && !has_single_use (name2))
4726 	{
4727 	  tree tmp;
4728 
4729 	  /* Build an expression for the range test.  */
4730 	  tmp = name2;
4731 	  if (TREE_TYPE (name) != TREE_TYPE (name2))
4732 	    tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
4733 	  if (cst2 != NULL_TREE)
4734 	    tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4735 
4736 	  if (dump_file)
4737 	    {
4738 	      fprintf (dump_file, "Adding assert for ");
4739 	      print_generic_expr (dump_file, name2, 0);
4740 	      fprintf (dump_file, " from ");
4741 	      print_generic_expr (dump_file, tmp, 0);
4742 	      fprintf (dump_file, "\n");
4743 	    }
4744 
4745 	  register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
4746 
4747 	  retval = true;
4748 	}
4749     }
4750 
4751   /* In the case of post-in/decrement tests like if (i++) ... and uses
4752      of the in/decremented value on the edge the extra name we want to
4753      assert for is not on the def chain of the name compared.  Instead
4754      it is in the set of use stmts.  */
4755   if ((comp_code == NE_EXPR
4756        || comp_code == EQ_EXPR)
4757       && TREE_CODE (val) == INTEGER_CST)
4758     {
4759       imm_use_iterator ui;
4760       gimple use_stmt;
4761       FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
4762 	{
4763 	  /* Cut off to use-stmts that are in the predecessor.  */
4764 	  if (gimple_bb (use_stmt) != e->src)
4765 	    continue;
4766 
4767 	  if (!is_gimple_assign (use_stmt))
4768 	    continue;
4769 
4770 	  enum tree_code code = gimple_assign_rhs_code (use_stmt);
4771 	  if (code != PLUS_EXPR
4772 	      && code != MINUS_EXPR)
4773 	    continue;
4774 
4775 	  tree cst = gimple_assign_rhs2 (use_stmt);
4776 	  if (TREE_CODE (cst) != INTEGER_CST)
4777 	    continue;
4778 
4779 	  tree name2 = gimple_assign_lhs (use_stmt);
4780 	  if (live_on_edge (e, name2))
4781 	    {
4782 	      cst = int_const_binop (code, val, cst);
4783 	      register_new_assert_for (name2, name2, comp_code, cst,
4784 				       NULL, e, bsi);
4785 	      retval = true;
4786 	    }
4787 	}
4788     }
4789 
4790   if (TREE_CODE_CLASS (comp_code) == tcc_comparison
4791       && TREE_CODE (val) == INTEGER_CST)
4792     {
4793       gimple def_stmt = SSA_NAME_DEF_STMT (name);
4794       tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
4795       tree val2 = NULL_TREE;
4796       double_int mask = double_int_zero;
4797       unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
4798       unsigned int nprec = prec;
4799       enum tree_code rhs_code = ERROR_MARK;
4800 
4801       if (is_gimple_assign (def_stmt))
4802 	rhs_code = gimple_assign_rhs_code (def_stmt);
4803 
4804       /* Add asserts for NAME cmp CST and NAME being defined
4805 	 as NAME = (int) NAME2.  */
4806       if (!TYPE_UNSIGNED (TREE_TYPE (val))
4807 	  && (comp_code == LE_EXPR || comp_code == LT_EXPR
4808 	      || comp_code == GT_EXPR || comp_code == GE_EXPR)
4809 	  && gimple_assign_cast_p (def_stmt))
4810 	{
4811 	  name2 = gimple_assign_rhs1 (def_stmt);
4812 	  if (CONVERT_EXPR_CODE_P (rhs_code)
4813 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4814 	      && TYPE_UNSIGNED (TREE_TYPE (name2))
4815 	      && prec == TYPE_PRECISION (TREE_TYPE (name2))
4816 	      && (comp_code == LE_EXPR || comp_code == GT_EXPR
4817 		  || !tree_int_cst_equal (val,
4818 					  TYPE_MIN_VALUE (TREE_TYPE (val))))
4819 	      && live_on_edge (e, name2)
4820 	      && !has_single_use (name2))
4821 	    {
4822 	      tree tmp, cst;
4823 	      enum tree_code new_comp_code = comp_code;
4824 
4825 	      cst = fold_convert (TREE_TYPE (name2),
4826 				  TYPE_MIN_VALUE (TREE_TYPE (val)));
4827 	      /* Build an expression for the range test.  */
4828 	      tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
4829 	      cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
4830 				 fold_convert (TREE_TYPE (name2), val));
4831 	      if (comp_code == LT_EXPR || comp_code == GE_EXPR)
4832 		{
4833 		  new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
4834 		  cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
4835 				     build_int_cst (TREE_TYPE (name2), 1));
4836 		}
4837 
4838 	      if (dump_file)
4839 		{
4840 		  fprintf (dump_file, "Adding assert for ");
4841 		  print_generic_expr (dump_file, name2, 0);
4842 		  fprintf (dump_file, " from ");
4843 		  print_generic_expr (dump_file, tmp, 0);
4844 		  fprintf (dump_file, "\n");
4845 		}
4846 
4847 	      register_new_assert_for (name2, tmp, new_comp_code, cst, NULL,
4848 				       e, bsi);
4849 
4850 	      retval = true;
4851 	    }
4852 	}
4853 
4854       /* Add asserts for NAME cmp CST and NAME being defined as
4855 	 NAME = NAME2 >> CST2.
4856 
4857 	 Extract CST2 from the right shift.  */
4858       if (rhs_code == RSHIFT_EXPR)
4859 	{
4860 	  name2 = gimple_assign_rhs1 (def_stmt);
4861 	  cst2 = gimple_assign_rhs2 (def_stmt);
4862 	  if (TREE_CODE (name2) == SSA_NAME
4863 	      && host_integerp (cst2, 1)
4864 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4865 	      && IN_RANGE (tree_low_cst (cst2, 1), 1, prec - 1)
4866 	      && prec <= HOST_BITS_PER_DOUBLE_INT
4867 	      && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val)))
4868 	      && live_on_edge (e, name2)
4869 	      && !has_single_use (name2))
4870 	    {
4871 	      mask = double_int::mask (tree_low_cst (cst2, 1));
4872 	      val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
4873 	    }
4874 	}
4875       if (val2 != NULL_TREE
4876 	  && TREE_CODE (val2) == INTEGER_CST
4877 	  && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
4878 					    TREE_TYPE (val),
4879 					    val2, cst2), val))
4880 	{
4881 	  enum tree_code new_comp_code = comp_code;
4882 	  tree tmp, new_val;
4883 
4884 	  tmp = name2;
4885 	  if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
4886 	    {
4887 	      if (!TYPE_UNSIGNED (TREE_TYPE (val)))
4888 		{
4889 		  tree type = build_nonstandard_integer_type (prec, 1);
4890 		  tmp = build1 (NOP_EXPR, type, name2);
4891 		  val2 = fold_convert (type, val2);
4892 		}
4893 	      tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
4894 	      new_val = double_int_to_tree (TREE_TYPE (tmp), mask);
4895 	      new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
4896 	    }
4897 	  else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
4898 	    {
4899 	      double_int minval
4900 		= double_int::min_value (prec, TYPE_UNSIGNED (TREE_TYPE (val)));
4901 	      new_val = val2;
4902 	      if (minval == tree_to_double_int (new_val))
4903 		new_val = NULL_TREE;
4904 	    }
4905 	  else
4906 	    {
4907 	      double_int maxval
4908 		= double_int::max_value (prec, TYPE_UNSIGNED (TREE_TYPE (val)));
4909 	      mask |= tree_to_double_int (val2);
4910 	      if (mask == maxval)
4911 		new_val = NULL_TREE;
4912 	      else
4913 		new_val = double_int_to_tree (TREE_TYPE (val2), mask);
4914 	    }
4915 
4916 	  if (new_val)
4917 	    {
4918 	      if (dump_file)
4919 		{
4920 		  fprintf (dump_file, "Adding assert for ");
4921 		  print_generic_expr (dump_file, name2, 0);
4922 		  fprintf (dump_file, " from ");
4923 		  print_generic_expr (dump_file, tmp, 0);
4924 		  fprintf (dump_file, "\n");
4925 		}
4926 
4927 	      register_new_assert_for (name2, tmp, new_comp_code, new_val,
4928 				       NULL, e, bsi);
4929 	      retval = true;
4930 	    }
4931 	}
4932 
4933       /* Add asserts for NAME cmp CST and NAME being defined as
4934 	 NAME = NAME2 & CST2.
4935 
4936 	 Extract CST2 from the and.
4937 
4938 	 Also handle
4939 	 NAME = (unsigned) NAME2;
4940 	 casts where NAME's type is unsigned and has smaller precision
4941 	 than NAME2's type as if it was NAME = NAME2 & MASK.  */
4942       names[0] = NULL_TREE;
4943       names[1] = NULL_TREE;
4944       cst2 = NULL_TREE;
4945       if (rhs_code == BIT_AND_EXPR
4946 	  || (CONVERT_EXPR_CODE_P (rhs_code)
4947 	      && TREE_CODE (TREE_TYPE (val)) == INTEGER_TYPE
4948 	      && TYPE_UNSIGNED (TREE_TYPE (val))
4949 	      && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
4950 		 > prec
4951 	      && !retval))
4952 	{
4953 	  name2 = gimple_assign_rhs1 (def_stmt);
4954 	  if (rhs_code == BIT_AND_EXPR)
4955 	    cst2 = gimple_assign_rhs2 (def_stmt);
4956 	  else
4957 	    {
4958 	      cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
4959 	      nprec = TYPE_PRECISION (TREE_TYPE (name2));
4960 	    }
4961 	  if (TREE_CODE (name2) == SSA_NAME
4962 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4963 	      && TREE_CODE (cst2) == INTEGER_CST
4964 	      && !integer_zerop (cst2)
4965 	      && nprec <= HOST_BITS_PER_DOUBLE_INT
4966 	      && (nprec > 1
4967 		  || TYPE_UNSIGNED (TREE_TYPE (val))))
4968 	    {
4969 	      gimple def_stmt2 = SSA_NAME_DEF_STMT (name2);
4970 	      if (gimple_assign_cast_p (def_stmt2))
4971 		{
4972 		  names[1] = gimple_assign_rhs1 (def_stmt2);
4973 		  if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
4974 		      || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
4975 		      || (TYPE_PRECISION (TREE_TYPE (name2))
4976 			  != TYPE_PRECISION (TREE_TYPE (names[1])))
4977 		      || !live_on_edge (e, names[1])
4978 		      || has_single_use (names[1]))
4979 		    names[1] = NULL_TREE;
4980 		}
4981 	      if (live_on_edge (e, name2)
4982 		  && !has_single_use (name2))
4983 		names[0] = name2;
4984 	    }
4985 	}
4986       if (names[0] || names[1])
4987 	{
4988 	  double_int minv, maxv = double_int_zero, valv, cst2v;
4989 	  double_int tem, sgnbit;
4990 	  bool valid_p = false, valn = false, cst2n = false;
4991 	  enum tree_code ccode = comp_code;
4992 
4993 	  valv = tree_to_double_int (val).zext (nprec);
4994 	  cst2v = tree_to_double_int (cst2).zext (nprec);
4995 	  if (!TYPE_UNSIGNED (TREE_TYPE (val)))
4996 	    {
4997 	      valn = valv.sext (nprec).is_negative ();
4998 	      cst2n = cst2v.sext (nprec).is_negative ();
4999 	    }
5000 	  /* If CST2 doesn't have most significant bit set,
5001 	     but VAL is negative, we have comparison like
5002 	     if ((x & 0x123) > -4) (always true).  Just give up.  */
5003 	  if (!cst2n && valn)
5004 	    ccode = ERROR_MARK;
5005 	  if (cst2n)
5006 	    sgnbit = double_int_one.llshift (nprec - 1, nprec).zext (nprec);
5007 	  else
5008 	    sgnbit = double_int_zero;
5009 	  minv = valv & cst2v;
5010 	  switch (ccode)
5011 	    {
5012 	    case EQ_EXPR:
5013 	      /* Minimum unsigned value for equality is VAL & CST2
5014 		 (should be equal to VAL, otherwise we probably should
5015 		 have folded the comparison into false) and
5016 		 maximum unsigned value is VAL | ~CST2.  */
5017 	      maxv = valv | ~cst2v;
5018 	      maxv = maxv.zext (nprec);
5019 	      valid_p = true;
5020 	      break;
5021 	    case NE_EXPR:
5022 	      tem = valv | ~cst2v;
5023 	      tem = tem.zext (nprec);
5024 	      /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U.  */
5025 	      if (valv.is_zero ())
5026 		{
5027 		  cst2n = false;
5028 		  sgnbit = double_int_zero;
5029 		  goto gt_expr;
5030 		}
5031 	      /* If (VAL | ~CST2) is all ones, handle it as
5032 		 (X & CST2) < VAL.  */
5033 	      if (tem == double_int::mask (nprec))
5034 		{
5035 		  cst2n = false;
5036 		  valn = false;
5037 		  sgnbit = double_int_zero;
5038 		  goto lt_expr;
5039 		}
5040 	      if (!cst2n
5041 		  && cst2v.sext (nprec).is_negative ())
5042 		sgnbit
5043 		  = double_int_one.llshift (nprec - 1, nprec).zext (nprec);
5044 	      if (!sgnbit.is_zero ())
5045 		{
5046 		  if (valv == sgnbit)
5047 		    {
5048 		      cst2n = true;
5049 		      valn = true;
5050 		      goto gt_expr;
5051 		    }
5052 		  if (tem == double_int::mask (nprec - 1))
5053 		    {
5054 		      cst2n = true;
5055 		      goto lt_expr;
5056 		    }
5057 		  if (!cst2n)
5058 		    sgnbit = double_int_zero;
5059 		}
5060 	      break;
5061 	    case GE_EXPR:
5062 	      /* Minimum unsigned value for >= if (VAL & CST2) == VAL
5063 		 is VAL and maximum unsigned value is ~0.  For signed
5064 		 comparison, if CST2 doesn't have most significant bit
5065 		 set, handle it similarly.  If CST2 has MSB set,
5066 		 the minimum is the same, and maximum is ~0U/2.  */
5067 	      if (minv != valv)
5068 		{
5069 		  /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
5070 		     VAL.  */
5071 		  minv = masked_increment (valv, cst2v, sgnbit, nprec);
5072 		  if (minv == valv)
5073 		    break;
5074 		}
5075 	      maxv = double_int::mask (nprec - (cst2n ? 1 : 0));
5076 	      valid_p = true;
5077 	      break;
5078 	    case GT_EXPR:
5079 	    gt_expr:
5080 	      /* Find out smallest MINV where MINV > VAL
5081 		 && (MINV & CST2) == MINV, if any.  If VAL is signed and
5082 		 CST2 has MSB set, compute it biased by 1 << (nprec - 1).  */
5083 	      minv = masked_increment (valv, cst2v, sgnbit, nprec);
5084 	      if (minv == valv)
5085 		break;
5086 	      maxv = double_int::mask (nprec - (cst2n ? 1 : 0));
5087 	      valid_p = true;
5088 	      break;
5089 	    case LE_EXPR:
5090 	      /* Minimum unsigned value for <= is 0 and maximum
5091 		 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
5092 		 Otherwise, find smallest VAL2 where VAL2 > VAL
5093 		 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5094 		 as maximum.
5095 		 For signed comparison, if CST2 doesn't have most
5096 		 significant bit set, handle it similarly.  If CST2 has
5097 		 MSB set, the maximum is the same and minimum is INT_MIN.  */
5098 	      if (minv == valv)
5099 		maxv = valv;
5100 	      else
5101 		{
5102 		  maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5103 		  if (maxv == valv)
5104 		    break;
5105 		  maxv -= double_int_one;
5106 		}
5107 	      maxv |= ~cst2v;
5108 	      maxv = maxv.zext (nprec);
5109 	      minv = sgnbit;
5110 	      valid_p = true;
5111 	      break;
5112 	    case LT_EXPR:
5113 	    lt_expr:
5114 	      /* Minimum unsigned value for < is 0 and maximum
5115 		 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
5116 		 Otherwise, find smallest VAL2 where VAL2 > VAL
5117 		 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5118 		 as maximum.
5119 		 For signed comparison, if CST2 doesn't have most
5120 		 significant bit set, handle it similarly.  If CST2 has
5121 		 MSB set, the maximum is the same and minimum is INT_MIN.  */
5122 	      if (minv == valv)
5123 		{
5124 		  if (valv == sgnbit)
5125 		    break;
5126 		  maxv = valv;
5127 		}
5128 	      else
5129 		{
5130 		  maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5131 		  if (maxv == valv)
5132 		    break;
5133 		}
5134 	      maxv -= double_int_one;
5135 	      maxv |= ~cst2v;
5136 	      maxv = maxv.zext (nprec);
5137 	      minv = sgnbit;
5138 	      valid_p = true;
5139 	      break;
5140 	    default:
5141 	      break;
5142 	    }
5143 	  if (valid_p
5144 	      && (maxv - minv).zext (nprec) != double_int::mask (nprec))
5145 	    {
5146 	      tree tmp, new_val, type;
5147 	      int i;
5148 
5149 	      for (i = 0; i < 2; i++)
5150 		if (names[i])
5151 		  {
5152 		    double_int maxv2 = maxv;
5153 		    tmp = names[i];
5154 		    type = TREE_TYPE (names[i]);
5155 		    if (!TYPE_UNSIGNED (type))
5156 		      {
5157 			type = build_nonstandard_integer_type (nprec, 1);
5158 			tmp = build1 (NOP_EXPR, type, names[i]);
5159 		      }
5160 		    if (!minv.is_zero ())
5161 		      {
5162 			tmp = build2 (PLUS_EXPR, type, tmp,
5163 				      double_int_to_tree (type, -minv));
5164 			maxv2 = maxv - minv;
5165 		      }
5166 		    new_val = double_int_to_tree (type, maxv2);
5167 
5168 		    if (dump_file)
5169 		      {
5170 			fprintf (dump_file, "Adding assert for ");
5171 			print_generic_expr (dump_file, names[i], 0);
5172 			fprintf (dump_file, " from ");
5173 			print_generic_expr (dump_file, tmp, 0);
5174 			fprintf (dump_file, "\n");
5175 		      }
5176 
5177 		    register_new_assert_for (names[i], tmp, LE_EXPR,
5178 					     new_val, NULL, e, bsi);
5179 		    retval = true;
5180 		  }
5181 	    }
5182 	}
5183     }
5184 
5185   return retval;
5186 }
5187 
5188 /* OP is an operand of a truth value expression which is known to have
5189    a particular value.  Register any asserts for OP and for any
5190    operands in OP's defining statement.
5191 
5192    If CODE is EQ_EXPR, then we want to register OP is zero (false),
5193    if CODE is NE_EXPR, then we want to register OP is nonzero (true).   */
5194 
5195 static bool
5196 register_edge_assert_for_1 (tree op, enum tree_code code,
5197 			    edge e, gimple_stmt_iterator bsi)
5198 {
5199   bool retval = false;
5200   gimple op_def;
5201   tree val;
5202   enum tree_code rhs_code;
5203 
5204   /* We only care about SSA_NAMEs.  */
5205   if (TREE_CODE (op) != SSA_NAME)
5206     return false;
5207 
5208   /* We know that OP will have a zero or nonzero value.  If OP is used
5209      more than once go ahead and register an assert for OP.
5210 
5211      The FOUND_IN_SUBGRAPH support is not helpful in this situation as
5212      it will always be set for OP (because OP is used in a COND_EXPR in
5213      the subgraph).  */
5214   if (!has_single_use (op))
5215     {
5216       val = build_int_cst (TREE_TYPE (op), 0);
5217       register_new_assert_for (op, op, code, val, NULL, e, bsi);
5218       retval = true;
5219     }
5220 
5221   /* Now look at how OP is set.  If it's set from a comparison,
5222      a truth operation or some bit operations, then we may be able
5223      to register information about the operands of that assignment.  */
5224   op_def = SSA_NAME_DEF_STMT (op);
5225   if (gimple_code (op_def) != GIMPLE_ASSIGN)
5226     return retval;
5227 
5228   rhs_code = gimple_assign_rhs_code (op_def);
5229 
5230   if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
5231     {
5232       bool invert = (code == EQ_EXPR ? true : false);
5233       tree op0 = gimple_assign_rhs1 (op_def);
5234       tree op1 = gimple_assign_rhs2 (op_def);
5235 
5236       if (TREE_CODE (op0) == SSA_NAME)
5237         retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1,
5238 					      invert);
5239       if (TREE_CODE (op1) == SSA_NAME)
5240         retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1,
5241 					      invert);
5242     }
5243   else if ((code == NE_EXPR
5244 	    && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
5245 	   || (code == EQ_EXPR
5246 	       && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
5247     {
5248       /* Recurse on each operand.  */
5249       tree op0 = gimple_assign_rhs1 (op_def);
5250       tree op1 = gimple_assign_rhs2 (op_def);
5251       if (TREE_CODE (op0) == SSA_NAME
5252 	  && has_single_use (op0))
5253 	retval |= register_edge_assert_for_1 (op0, code, e, bsi);
5254       if (TREE_CODE (op1) == SSA_NAME
5255 	  && has_single_use (op1))
5256 	retval |= register_edge_assert_for_1 (op1, code, e, bsi);
5257     }
5258   else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
5259 	   && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
5260     {
5261       /* Recurse, flipping CODE.  */
5262       code = invert_tree_comparison (code, false);
5263       retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
5264 					    code, e, bsi);
5265     }
5266   else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
5267     {
5268       /* Recurse through the copy.  */
5269       retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
5270 					    code, e, bsi);
5271     }
5272   else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
5273     {
5274       /* Recurse through the type conversion, unless it is a narrowing
5275 	 conversion or conversion from non-integral type.  */
5276       tree rhs = gimple_assign_rhs1 (op_def);
5277       if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
5278 	  && (TYPE_PRECISION (TREE_TYPE (rhs))
5279 	      <= TYPE_PRECISION (TREE_TYPE (op))))
5280 	retval |= register_edge_assert_for_1 (rhs, code, e, bsi);
5281     }
5282 
5283   return retval;
5284 }
5285 
5286 /* Try to register an edge assertion for SSA name NAME on edge E for
5287    the condition COND contributing to the conditional jump pointed to by SI.
5288    Return true if an assertion for NAME could be registered.  */
5289 
5290 static bool
5291 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
5292 			  enum tree_code cond_code, tree cond_op0,
5293 			  tree cond_op1)
5294 {
5295   tree val;
5296   enum tree_code comp_code;
5297   bool retval = false;
5298   bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
5299 
5300   /* Do not attempt to infer anything in names that flow through
5301      abnormal edges.  */
5302   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
5303     return false;
5304 
5305   if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
5306 						cond_op0, cond_op1,
5307 						is_else_edge,
5308 						&comp_code, &val))
5309     return false;
5310 
5311   /* Register ASSERT_EXPRs for name.  */
5312   retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
5313 					cond_op1, is_else_edge);
5314 
5315 
5316   /* If COND is effectively an equality test of an SSA_NAME against
5317      the value zero or one, then we may be able to assert values
5318      for SSA_NAMEs which flow into COND.  */
5319 
5320   /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
5321      statement of NAME we can assert both operands of the BIT_AND_EXPR
5322      have nonzero value.  */
5323   if (((comp_code == EQ_EXPR && integer_onep (val))
5324        || (comp_code == NE_EXPR && integer_zerop (val))))
5325     {
5326       gimple def_stmt = SSA_NAME_DEF_STMT (name);
5327 
5328       if (is_gimple_assign (def_stmt)
5329 	  && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
5330 	{
5331 	  tree op0 = gimple_assign_rhs1 (def_stmt);
5332 	  tree op1 = gimple_assign_rhs2 (def_stmt);
5333 	  retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si);
5334 	  retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si);
5335 	}
5336     }
5337 
5338   /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
5339      statement of NAME we can assert both operands of the BIT_IOR_EXPR
5340      have zero value.  */
5341   if (((comp_code == EQ_EXPR && integer_zerop (val))
5342        || (comp_code == NE_EXPR && integer_onep (val))))
5343     {
5344       gimple def_stmt = SSA_NAME_DEF_STMT (name);
5345 
5346       /* For BIT_IOR_EXPR only if NAME == 0 both operands have
5347 	 necessarily zero value, or if type-precision is one.  */
5348       if (is_gimple_assign (def_stmt)
5349 	  && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
5350 	      && (TYPE_PRECISION (TREE_TYPE (name)) == 1
5351 	          || comp_code == EQ_EXPR)))
5352 	{
5353 	  tree op0 = gimple_assign_rhs1 (def_stmt);
5354 	  tree op1 = gimple_assign_rhs2 (def_stmt);
5355 	  retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
5356 	  retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
5357 	}
5358     }
5359 
5360   return retval;
5361 }
5362 
5363 
5364 /* Determine whether the outgoing edges of BB should receive an
5365    ASSERT_EXPR for each of the operands of BB's LAST statement.
5366    The last statement of BB must be a COND_EXPR.
5367 
5368    If any of the sub-graphs rooted at BB have an interesting use of
5369    the predicate operands, an assert location node is added to the
5370    list of assertions for the corresponding operands.  */
5371 
5372 static bool
5373 find_conditional_asserts (basic_block bb, gimple last)
5374 {
5375   bool need_assert;
5376   gimple_stmt_iterator bsi;
5377   tree op;
5378   edge_iterator ei;
5379   edge e;
5380   ssa_op_iter iter;
5381 
5382   need_assert = false;
5383   bsi = gsi_for_stmt (last);
5384 
5385   /* Look for uses of the operands in each of the sub-graphs
5386      rooted at BB.  We need to check each of the outgoing edges
5387      separately, so that we know what kind of ASSERT_EXPR to
5388      insert.  */
5389   FOR_EACH_EDGE (e, ei, bb->succs)
5390     {
5391       if (e->dest == bb)
5392 	continue;
5393 
5394       /* Register the necessary assertions for each operand in the
5395 	 conditional predicate.  */
5396       FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
5397 	{
5398 	  need_assert |= register_edge_assert_for (op, e, bsi,
5399 						   gimple_cond_code (last),
5400 						   gimple_cond_lhs (last),
5401 						   gimple_cond_rhs (last));
5402 	}
5403     }
5404 
5405   return need_assert;
5406 }
5407 
5408 struct case_info
5409 {
5410   tree expr;
5411   basic_block bb;
5412 };
5413 
5414 /* Compare two case labels sorting first by the destination bb index
5415    and then by the case value.  */
5416 
5417 static int
5418 compare_case_labels (const void *p1, const void *p2)
5419 {
5420   const struct case_info *ci1 = (const struct case_info *) p1;
5421   const struct case_info *ci2 = (const struct case_info *) p2;
5422   int idx1 = ci1->bb->index;
5423   int idx2 = ci2->bb->index;
5424 
5425   if (idx1 < idx2)
5426     return -1;
5427   else if (idx1 == idx2)
5428     {
5429       /* Make sure the default label is first in a group.  */
5430       if (!CASE_LOW (ci1->expr))
5431 	return -1;
5432       else if (!CASE_LOW (ci2->expr))
5433 	return 1;
5434       else
5435 	return tree_int_cst_compare (CASE_LOW (ci1->expr),
5436 				     CASE_LOW (ci2->expr));
5437     }
5438   else
5439     return 1;
5440 }
5441 
5442 /* Determine whether the outgoing edges of BB should receive an
5443    ASSERT_EXPR for each of the operands of BB's LAST statement.
5444    The last statement of BB must be a SWITCH_EXPR.
5445 
5446    If any of the sub-graphs rooted at BB have an interesting use of
5447    the predicate operands, an assert location node is added to the
5448    list of assertions for the corresponding operands.  */
5449 
5450 static bool
5451 find_switch_asserts (basic_block bb, gimple last)
5452 {
5453   bool need_assert;
5454   gimple_stmt_iterator bsi;
5455   tree op;
5456   edge e;
5457   struct case_info *ci;
5458   size_t n = gimple_switch_num_labels (last);
5459 #if GCC_VERSION >= 4000
5460   unsigned int idx;
5461 #else
5462   /* Work around GCC 3.4 bug (PR 37086).  */
5463   volatile unsigned int idx;
5464 #endif
5465 
5466   need_assert = false;
5467   bsi = gsi_for_stmt (last);
5468   op = gimple_switch_index (last);
5469   if (TREE_CODE (op) != SSA_NAME)
5470     return false;
5471 
5472   /* Build a vector of case labels sorted by destination label.  */
5473   ci = XNEWVEC (struct case_info, n);
5474   for (idx = 0; idx < n; ++idx)
5475     {
5476       ci[idx].expr = gimple_switch_label (last, idx);
5477       ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
5478     }
5479   qsort (ci, n, sizeof (struct case_info), compare_case_labels);
5480 
5481   for (idx = 0; idx < n; ++idx)
5482     {
5483       tree min, max;
5484       tree cl = ci[idx].expr;
5485       basic_block cbb = ci[idx].bb;
5486 
5487       min = CASE_LOW (cl);
5488       max = CASE_HIGH (cl);
5489 
5490       /* If there are multiple case labels with the same destination
5491 	 we need to combine them to a single value range for the edge.  */
5492       if (idx + 1 < n && cbb == ci[idx + 1].bb)
5493 	{
5494 	  /* Skip labels until the last of the group.  */
5495 	  do {
5496 	    ++idx;
5497 	  } while (idx < n && cbb == ci[idx].bb);
5498 	  --idx;
5499 
5500 	  /* Pick up the maximum of the case label range.  */
5501 	  if (CASE_HIGH (ci[idx].expr))
5502 	    max = CASE_HIGH (ci[idx].expr);
5503 	  else
5504 	    max = CASE_LOW (ci[idx].expr);
5505 	}
5506 
5507       /* Nothing to do if the range includes the default label until we
5508 	 can register anti-ranges.  */
5509       if (min == NULL_TREE)
5510 	continue;
5511 
5512       /* Find the edge to register the assert expr on.  */
5513       e = find_edge (bb, cbb);
5514 
5515       /* Register the necessary assertions for the operand in the
5516 	 SWITCH_EXPR.  */
5517       need_assert |= register_edge_assert_for (op, e, bsi,
5518 					       max ? GE_EXPR : EQ_EXPR,
5519 					       op,
5520 					       fold_convert (TREE_TYPE (op),
5521 							     min));
5522       if (max)
5523 	{
5524 	  need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR,
5525 						   op,
5526 						   fold_convert (TREE_TYPE (op),
5527 								 max));
5528 	}
5529     }
5530 
5531   XDELETEVEC (ci);
5532   return need_assert;
5533 }
5534 
5535 
5536 /* Traverse all the statements in block BB looking for statements that
5537    may generate useful assertions for the SSA names in their operand.
5538    If a statement produces a useful assertion A for name N_i, then the
5539    list of assertions already generated for N_i is scanned to
5540    determine if A is actually needed.
5541 
5542    If N_i already had the assertion A at a location dominating the
5543    current location, then nothing needs to be done.  Otherwise, the
5544    new location for A is recorded instead.
5545 
5546    1- For every statement S in BB, all the variables used by S are
5547       added to bitmap FOUND_IN_SUBGRAPH.
5548 
5549    2- If statement S uses an operand N in a way that exposes a known
5550       value range for N, then if N was not already generated by an
5551       ASSERT_EXPR, create a new assert location for N.  For instance,
5552       if N is a pointer and the statement dereferences it, we can
5553       assume that N is not NULL.
5554 
5555    3- COND_EXPRs are a special case of #2.  We can derive range
5556       information from the predicate but need to insert different
5557       ASSERT_EXPRs for each of the sub-graphs rooted at the
5558       conditional block.  If the last statement of BB is a conditional
5559       expression of the form 'X op Y', then
5560 
5561       a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
5562 
5563       b) If the conditional is the only entry point to the sub-graph
5564 	 corresponding to the THEN_CLAUSE, recurse into it.  On
5565 	 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
5566 	 an ASSERT_EXPR is added for the corresponding variable.
5567 
5568       c) Repeat step (b) on the ELSE_CLAUSE.
5569 
5570       d) Mark X and Y in FOUND_IN_SUBGRAPH.
5571 
5572       For instance,
5573 
5574 	    if (a == 9)
5575 	      b = a;
5576 	    else
5577 	      b = c + 1;
5578 
5579       In this case, an assertion on the THEN clause is useful to
5580       determine that 'a' is always 9 on that edge.  However, an assertion
5581       on the ELSE clause would be unnecessary.
5582 
5583    4- If BB does not end in a conditional expression, then we recurse
5584       into BB's dominator children.
5585 
5586    At the end of the recursive traversal, every SSA name will have a
5587    list of locations where ASSERT_EXPRs should be added.  When a new
5588    location for name N is found, it is registered by calling
5589    register_new_assert_for.  That function keeps track of all the
5590    registered assertions to prevent adding unnecessary assertions.
5591    For instance, if a pointer P_4 is dereferenced more than once in a
5592    dominator tree, only the location dominating all the dereference of
5593    P_4 will receive an ASSERT_EXPR.
5594 
5595    If this function returns true, then it means that there are names
5596    for which we need to generate ASSERT_EXPRs.  Those assertions are
5597    inserted by process_assert_insertions.  */
5598 
5599 static bool
5600 find_assert_locations_1 (basic_block bb, sbitmap live)
5601 {
5602   gimple_stmt_iterator si;
5603   gimple last;
5604   bool need_assert;
5605 
5606   need_assert = false;
5607   last = last_stmt (bb);
5608 
5609   /* If BB's last statement is a conditional statement involving integer
5610      operands, determine if we need to add ASSERT_EXPRs.  */
5611   if (last
5612       && gimple_code (last) == GIMPLE_COND
5613       && !fp_predicate (last)
5614       && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
5615     need_assert |= find_conditional_asserts (bb, last);
5616 
5617   /* If BB's last statement is a switch statement involving integer
5618      operands, determine if we need to add ASSERT_EXPRs.  */
5619   if (last
5620       && gimple_code (last) == GIMPLE_SWITCH
5621       && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
5622     need_assert |= find_switch_asserts (bb, last);
5623 
5624   /* Traverse all the statements in BB marking used names and looking
5625      for statements that may infer assertions for their used operands.  */
5626   for (si = gsi_last_bb (bb); !gsi_end_p (si); gsi_prev (&si))
5627     {
5628       gimple stmt;
5629       tree op;
5630       ssa_op_iter i;
5631 
5632       stmt = gsi_stmt (si);
5633 
5634       if (is_gimple_debug (stmt))
5635 	continue;
5636 
5637       /* See if we can derive an assertion for any of STMT's operands.  */
5638       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5639 	{
5640 	  tree value;
5641 	  enum tree_code comp_code;
5642 
5643 	  /* If op is not live beyond this stmt, do not bother to insert
5644 	     asserts for it.  */
5645 	  if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
5646 	    continue;
5647 
5648 	  /* If OP is used in such a way that we can infer a value
5649 	     range for it, and we don't find a previous assertion for
5650 	     it, create a new assertion location node for OP.  */
5651 	  if (infer_value_range (stmt, op, &comp_code, &value))
5652 	    {
5653 	      /* If we are able to infer a nonzero value range for OP,
5654 		 then walk backwards through the use-def chain to see if OP
5655 		 was set via a typecast.
5656 
5657 		 If so, then we can also infer a nonzero value range
5658 		 for the operand of the NOP_EXPR.  */
5659 	      if (comp_code == NE_EXPR && integer_zerop (value))
5660 		{
5661 		  tree t = op;
5662 		  gimple def_stmt = SSA_NAME_DEF_STMT (t);
5663 
5664 		  while (is_gimple_assign (def_stmt)
5665 			 && gimple_assign_rhs_code (def_stmt)  == NOP_EXPR
5666 			 && TREE_CODE
5667 			     (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
5668 			 && POINTER_TYPE_P
5669 			     (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
5670 		    {
5671 		      t = gimple_assign_rhs1 (def_stmt);
5672 		      def_stmt = SSA_NAME_DEF_STMT (t);
5673 
5674 		      /* Note we want to register the assert for the
5675 			 operand of the NOP_EXPR after SI, not after the
5676 			 conversion.  */
5677 		      if (! has_single_use (t))
5678 			{
5679 			  register_new_assert_for (t, t, comp_code, value,
5680 						   bb, NULL, si);
5681 			  need_assert = true;
5682 			}
5683 		    }
5684 		}
5685 
5686 	      register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
5687 	      need_assert = true;
5688 	    }
5689 	}
5690 
5691       /* Update live.  */
5692       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5693 	bitmap_set_bit (live, SSA_NAME_VERSION (op));
5694       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
5695 	bitmap_clear_bit (live, SSA_NAME_VERSION (op));
5696     }
5697 
5698   /* Traverse all PHI nodes in BB, updating live.  */
5699   for (si = gsi_start_phis (bb); !gsi_end_p(si); gsi_next (&si))
5700     {
5701       use_operand_p arg_p;
5702       ssa_op_iter i;
5703       gimple phi = gsi_stmt (si);
5704       tree res = gimple_phi_result (phi);
5705 
5706       if (virtual_operand_p (res))
5707 	continue;
5708 
5709       FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
5710 	{
5711 	  tree arg = USE_FROM_PTR (arg_p);
5712 	  if (TREE_CODE (arg) == SSA_NAME)
5713 	    bitmap_set_bit (live, SSA_NAME_VERSION (arg));
5714 	}
5715 
5716       bitmap_clear_bit (live, SSA_NAME_VERSION (res));
5717     }
5718 
5719   return need_assert;
5720 }
5721 
5722 /* Do an RPO walk over the function computing SSA name liveness
5723    on-the-fly and deciding on assert expressions to insert.
5724    Returns true if there are assert expressions to be inserted.  */
5725 
5726 static bool
5727 find_assert_locations (void)
5728 {
5729   int *rpo = XNEWVEC (int, last_basic_block);
5730   int *bb_rpo = XNEWVEC (int, last_basic_block);
5731   int *last_rpo = XCNEWVEC (int, last_basic_block);
5732   int rpo_cnt, i;
5733   bool need_asserts;
5734 
5735   live = XCNEWVEC (sbitmap, last_basic_block);
5736   rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
5737   for (i = 0; i < rpo_cnt; ++i)
5738     bb_rpo[rpo[i]] = i;
5739 
5740   need_asserts = false;
5741   for (i = rpo_cnt - 1; i >= 0; --i)
5742     {
5743       basic_block bb = BASIC_BLOCK (rpo[i]);
5744       edge e;
5745       edge_iterator ei;
5746 
5747       if (!live[rpo[i]])
5748 	{
5749 	  live[rpo[i]] = sbitmap_alloc (num_ssa_names);
5750 	  bitmap_clear (live[rpo[i]]);
5751 	}
5752 
5753       /* Process BB and update the live information with uses in
5754          this block.  */
5755       need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]);
5756 
5757       /* Merge liveness into the predecessor blocks and free it.  */
5758       if (!bitmap_empty_p (live[rpo[i]]))
5759 	{
5760 	  int pred_rpo = i;
5761 	  FOR_EACH_EDGE (e, ei, bb->preds)
5762 	    {
5763 	      int pred = e->src->index;
5764 	      if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
5765 		continue;
5766 
5767 	      if (!live[pred])
5768 		{
5769 		  live[pred] = sbitmap_alloc (num_ssa_names);
5770 		  bitmap_clear (live[pred]);
5771 		}
5772 	      bitmap_ior (live[pred], live[pred], live[rpo[i]]);
5773 
5774 	      if (bb_rpo[pred] < pred_rpo)
5775 		pred_rpo = bb_rpo[pred];
5776 	    }
5777 
5778 	  /* Record the RPO number of the last visited block that needs
5779 	     live information from this block.  */
5780 	  last_rpo[rpo[i]] = pred_rpo;
5781 	}
5782       else
5783 	{
5784 	  sbitmap_free (live[rpo[i]]);
5785 	  live[rpo[i]] = NULL;
5786 	}
5787 
5788       /* We can free all successors live bitmaps if all their
5789          predecessors have been visited already.  */
5790       FOR_EACH_EDGE (e, ei, bb->succs)
5791 	if (last_rpo[e->dest->index] == i
5792 	    && live[e->dest->index])
5793 	  {
5794 	    sbitmap_free (live[e->dest->index]);
5795 	    live[e->dest->index] = NULL;
5796 	  }
5797     }
5798 
5799   XDELETEVEC (rpo);
5800   XDELETEVEC (bb_rpo);
5801   XDELETEVEC (last_rpo);
5802   for (i = 0; i < last_basic_block; ++i)
5803     if (live[i])
5804       sbitmap_free (live[i]);
5805   XDELETEVEC (live);
5806 
5807   return need_asserts;
5808 }
5809 
5810 /* Create an ASSERT_EXPR for NAME and insert it in the location
5811    indicated by LOC.  Return true if we made any edge insertions.  */
5812 
5813 static bool
5814 process_assert_insertions_for (tree name, assert_locus_t loc)
5815 {
5816   /* Build the comparison expression NAME_i COMP_CODE VAL.  */
5817   gimple stmt;
5818   tree cond;
5819   gimple assert_stmt;
5820   edge_iterator ei;
5821   edge e;
5822 
5823   /* If we have X <=> X do not insert an assert expr for that.  */
5824   if (loc->expr == loc->val)
5825     return false;
5826 
5827   cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
5828   assert_stmt = build_assert_expr_for (cond, name);
5829   if (loc->e)
5830     {
5831       /* We have been asked to insert the assertion on an edge.  This
5832 	 is used only by COND_EXPR and SWITCH_EXPR assertions.  */
5833       gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
5834 			   || (gimple_code (gsi_stmt (loc->si))
5835 			       == GIMPLE_SWITCH));
5836 
5837       gsi_insert_on_edge (loc->e, assert_stmt);
5838       return true;
5839     }
5840 
5841   /* Otherwise, we can insert right after LOC->SI iff the
5842      statement must not be the last statement in the block.  */
5843   stmt = gsi_stmt (loc->si);
5844   if (!stmt_ends_bb_p (stmt))
5845     {
5846       gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
5847       return false;
5848     }
5849 
5850   /* If STMT must be the last statement in BB, we can only insert new
5851      assertions on the non-abnormal edge out of BB.  Note that since
5852      STMT is not control flow, there may only be one non-abnormal edge
5853      out of BB.  */
5854   FOR_EACH_EDGE (e, ei, loc->bb->succs)
5855     if (!(e->flags & EDGE_ABNORMAL))
5856       {
5857 	gsi_insert_on_edge (e, assert_stmt);
5858 	return true;
5859       }
5860 
5861   gcc_unreachable ();
5862 }
5863 
5864 
5865 /* Process all the insertions registered for every name N_i registered
5866    in NEED_ASSERT_FOR.  The list of assertions to be inserted are
5867    found in ASSERTS_FOR[i].  */
5868 
5869 static void
5870 process_assert_insertions (void)
5871 {
5872   unsigned i;
5873   bitmap_iterator bi;
5874   bool update_edges_p = false;
5875   int num_asserts = 0;
5876 
5877   if (dump_file && (dump_flags & TDF_DETAILS))
5878     dump_all_asserts (dump_file);
5879 
5880   EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
5881     {
5882       assert_locus_t loc = asserts_for[i];
5883       gcc_assert (loc);
5884 
5885       while (loc)
5886 	{
5887 	  assert_locus_t next = loc->next;
5888 	  update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
5889 	  free (loc);
5890 	  loc = next;
5891 	  num_asserts++;
5892 	}
5893     }
5894 
5895   if (update_edges_p)
5896     gsi_commit_edge_inserts ();
5897 
5898   statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
5899 			    num_asserts);
5900 }
5901 
5902 
5903 /* Traverse the flowgraph looking for conditional jumps to insert range
5904    expressions.  These range expressions are meant to provide information
5905    to optimizations that need to reason in terms of value ranges.  They
5906    will not be expanded into RTL.  For instance, given:
5907 
5908    x = ...
5909    y = ...
5910    if (x < y)
5911      y = x - 2;
5912    else
5913      x = y + 3;
5914 
5915    this pass will transform the code into:
5916 
5917    x = ...
5918    y = ...
5919    if (x < y)
5920     {
5921       x = ASSERT_EXPR <x, x < y>
5922       y = x - 2
5923     }
5924    else
5925     {
5926       y = ASSERT_EXPR <y, x <= y>
5927       x = y + 3
5928     }
5929 
5930    The idea is that once copy and constant propagation have run, other
5931    optimizations will be able to determine what ranges of values can 'x'
5932    take in different paths of the code, simply by checking the reaching
5933    definition of 'x'.  */
5934 
5935 static void
5936 insert_range_assertions (void)
5937 {
5938   need_assert_for = BITMAP_ALLOC (NULL);
5939   asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names);
5940 
5941   calculate_dominance_info (CDI_DOMINATORS);
5942 
5943   if (find_assert_locations ())
5944     {
5945       process_assert_insertions ();
5946       update_ssa (TODO_update_ssa_no_phi);
5947     }
5948 
5949   if (dump_file && (dump_flags & TDF_DETAILS))
5950     {
5951       fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
5952       dump_function_to_file (current_function_decl, dump_file, dump_flags);
5953     }
5954 
5955   free (asserts_for);
5956   BITMAP_FREE (need_assert_for);
5957 }
5958 
5959 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
5960    and "struct" hacks. If VRP can determine that the
5961    array subscript is a constant, check if it is outside valid
5962    range. If the array subscript is a RANGE, warn if it is
5963    non-overlapping with valid range.
5964    IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR.  */
5965 
5966 static void
5967 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
5968 {
5969   value_range_t* vr = NULL;
5970   tree low_sub, up_sub;
5971   tree low_bound, up_bound, up_bound_p1;
5972   tree base;
5973 
5974   if (TREE_NO_WARNING (ref))
5975     return;
5976 
5977   low_sub = up_sub = TREE_OPERAND (ref, 1);
5978   up_bound = array_ref_up_bound (ref);
5979 
5980   /* Can not check flexible arrays.  */
5981   if (!up_bound
5982       || TREE_CODE (up_bound) != INTEGER_CST)
5983     return;
5984 
5985   /* Accesses to trailing arrays via pointers may access storage
5986      beyond the types array bounds.  */
5987   base = get_base_address (ref);
5988   if (base && TREE_CODE (base) == MEM_REF)
5989     {
5990       tree cref, next = NULL_TREE;
5991 
5992       if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF)
5993 	return;
5994 
5995       cref = TREE_OPERAND (ref, 0);
5996       if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE)
5997 	for (next = DECL_CHAIN (TREE_OPERAND (cref, 1));
5998 	     next && TREE_CODE (next) != FIELD_DECL;
5999 	     next = DECL_CHAIN (next))
6000 	  ;
6001 
6002       /* If this is the last field in a struct type or a field in a
6003 	 union type do not warn.  */
6004       if (!next)
6005 	return;
6006     }
6007 
6008   low_bound = array_ref_low_bound (ref);
6009   up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, integer_one_node);
6010 
6011   if (TREE_CODE (low_sub) == SSA_NAME)
6012     {
6013       vr = get_value_range (low_sub);
6014       if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
6015         {
6016           low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
6017           up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
6018         }
6019     }
6020 
6021   if (vr && vr->type == VR_ANTI_RANGE)
6022     {
6023       if (TREE_CODE (up_sub) == INTEGER_CST
6024           && tree_int_cst_lt (up_bound, up_sub)
6025           && TREE_CODE (low_sub) == INTEGER_CST
6026           && tree_int_cst_lt (low_sub, low_bound))
6027         {
6028           warning_at (location, OPT_Warray_bounds,
6029 		      "array subscript is outside array bounds");
6030           TREE_NO_WARNING (ref) = 1;
6031         }
6032     }
6033   else if (TREE_CODE (up_sub) == INTEGER_CST
6034 	   && (ignore_off_by_one
6035 	       ? (tree_int_cst_lt (up_bound, up_sub)
6036 		  && !tree_int_cst_equal (up_bound_p1, up_sub))
6037 	       : (tree_int_cst_lt (up_bound, up_sub)
6038 		  || tree_int_cst_equal (up_bound_p1, up_sub))))
6039     {
6040       if (dump_file && (dump_flags & TDF_DETAILS))
6041 	{
6042 	  fprintf (dump_file, "Array bound warning for ");
6043 	  dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6044 	  fprintf (dump_file, "\n");
6045 	}
6046       warning_at (location, OPT_Warray_bounds,
6047 		  "array subscript is above array bounds");
6048       TREE_NO_WARNING (ref) = 1;
6049     }
6050   else if (TREE_CODE (low_sub) == INTEGER_CST
6051            && tree_int_cst_lt (low_sub, low_bound))
6052     {
6053       if (dump_file && (dump_flags & TDF_DETAILS))
6054 	{
6055 	  fprintf (dump_file, "Array bound warning for ");
6056 	  dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6057 	  fprintf (dump_file, "\n");
6058 	}
6059       warning_at (location, OPT_Warray_bounds,
6060 		  "array subscript is below array bounds");
6061       TREE_NO_WARNING (ref) = 1;
6062     }
6063 }
6064 
6065 /* Searches if the expr T, located at LOCATION computes
6066    address of an ARRAY_REF, and call check_array_ref on it.  */
6067 
6068 static void
6069 search_for_addr_array (tree t, location_t location)
6070 {
6071   while (TREE_CODE (t) == SSA_NAME)
6072     {
6073       gimple g = SSA_NAME_DEF_STMT (t);
6074 
6075       if (gimple_code (g) != GIMPLE_ASSIGN)
6076 	return;
6077 
6078       if (get_gimple_rhs_class (gimple_assign_rhs_code (g))
6079 	  != GIMPLE_SINGLE_RHS)
6080 	return;
6081 
6082       t = gimple_assign_rhs1 (g);
6083     }
6084 
6085 
6086   /* We are only interested in addresses of ARRAY_REF's.  */
6087   if (TREE_CODE (t) != ADDR_EXPR)
6088     return;
6089 
6090   /* Check each ARRAY_REFs in the reference chain. */
6091   do
6092     {
6093       if (TREE_CODE (t) == ARRAY_REF)
6094 	check_array_ref (location, t, true /*ignore_off_by_one*/);
6095 
6096       t = TREE_OPERAND (t, 0);
6097     }
6098   while (handled_component_p (t));
6099 
6100   if (TREE_CODE (t) == MEM_REF
6101       && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
6102       && !TREE_NO_WARNING (t))
6103     {
6104       tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
6105       tree low_bound, up_bound, el_sz;
6106       double_int idx;
6107       if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
6108 	  || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
6109 	  || !TYPE_DOMAIN (TREE_TYPE (tem)))
6110 	return;
6111 
6112       low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6113       up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6114       el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
6115       if (!low_bound
6116 	  || TREE_CODE (low_bound) != INTEGER_CST
6117 	  || !up_bound
6118 	  || TREE_CODE (up_bound) != INTEGER_CST
6119 	  || !el_sz
6120 	  || TREE_CODE (el_sz) != INTEGER_CST)
6121 	return;
6122 
6123       idx = mem_ref_offset (t);
6124       idx = idx.sdiv (tree_to_double_int (el_sz), TRUNC_DIV_EXPR);
6125       if (idx.slt (double_int_zero))
6126 	{
6127 	  if (dump_file && (dump_flags & TDF_DETAILS))
6128 	    {
6129 	      fprintf (dump_file, "Array bound warning for ");
6130 	      dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6131 	      fprintf (dump_file, "\n");
6132 	    }
6133 	  warning_at (location, OPT_Warray_bounds,
6134 		      "array subscript is below array bounds");
6135 	  TREE_NO_WARNING (t) = 1;
6136 	}
6137       else if (idx.sgt (tree_to_double_int (up_bound)
6138 			- tree_to_double_int (low_bound)
6139 			+ double_int_one))
6140 	{
6141 	  if (dump_file && (dump_flags & TDF_DETAILS))
6142 	    {
6143 	      fprintf (dump_file, "Array bound warning for ");
6144 	      dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6145 	      fprintf (dump_file, "\n");
6146 	    }
6147 	  warning_at (location, OPT_Warray_bounds,
6148 		      "array subscript is above array bounds");
6149 	  TREE_NO_WARNING (t) = 1;
6150 	}
6151     }
6152 }
6153 
6154 /* walk_tree() callback that checks if *TP is
6155    an ARRAY_REF inside an ADDR_EXPR (in which an array
6156    subscript one outside the valid range is allowed). Call
6157    check_array_ref for each ARRAY_REF found. The location is
6158    passed in DATA.  */
6159 
6160 static tree
6161 check_array_bounds (tree *tp, int *walk_subtree, void *data)
6162 {
6163   tree t = *tp;
6164   struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
6165   location_t location;
6166 
6167   if (EXPR_HAS_LOCATION (t))
6168     location = EXPR_LOCATION (t);
6169   else
6170     {
6171       location_t *locp = (location_t *) wi->info;
6172       location = *locp;
6173     }
6174 
6175   *walk_subtree = TRUE;
6176 
6177   if (TREE_CODE (t) == ARRAY_REF)
6178     check_array_ref (location, t, false /*ignore_off_by_one*/);
6179 
6180   if (TREE_CODE (t) == MEM_REF
6181       || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0)))
6182     search_for_addr_array (TREE_OPERAND (t, 0), location);
6183 
6184   if (TREE_CODE (t) == ADDR_EXPR)
6185     *walk_subtree = FALSE;
6186 
6187   return NULL_TREE;
6188 }
6189 
6190 /* Walk over all statements of all reachable BBs and call check_array_bounds
6191    on them.  */
6192 
6193 static void
6194 check_all_array_refs (void)
6195 {
6196   basic_block bb;
6197   gimple_stmt_iterator si;
6198 
6199   FOR_EACH_BB (bb)
6200     {
6201       edge_iterator ei;
6202       edge e;
6203       bool executable = false;
6204 
6205       /* Skip blocks that were found to be unreachable.  */
6206       FOR_EACH_EDGE (e, ei, bb->preds)
6207 	executable |= !!(e->flags & EDGE_EXECUTABLE);
6208       if (!executable)
6209 	continue;
6210 
6211       for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
6212 	{
6213 	  gimple stmt = gsi_stmt (si);
6214 	  struct walk_stmt_info wi;
6215 	  if (!gimple_has_location (stmt))
6216 	    continue;
6217 
6218 	  if (is_gimple_call (stmt))
6219 	    {
6220 	      size_t i;
6221 	      size_t n = gimple_call_num_args (stmt);
6222 	      for (i = 0; i < n; i++)
6223 		{
6224 		  tree arg = gimple_call_arg (stmt, i);
6225 		  search_for_addr_array (arg, gimple_location (stmt));
6226 		}
6227 	    }
6228 	  else
6229 	    {
6230 	      memset (&wi, 0, sizeof (wi));
6231 	      wi.info = CONST_CAST (void *, (const void *)
6232 				    gimple_location_ptr (stmt));
6233 
6234 	      walk_gimple_op (gsi_stmt (si),
6235 			      check_array_bounds,
6236 			      &wi);
6237 	    }
6238 	}
6239     }
6240 }
6241 
6242 /* Convert range assertion expressions into the implied copies and
6243    copy propagate away the copies.  Doing the trivial copy propagation
6244    here avoids the need to run the full copy propagation pass after
6245    VRP.
6246 
6247    FIXME, this will eventually lead to copy propagation removing the
6248    names that had useful range information attached to them.  For
6249    instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
6250    then N_i will have the range [3, +INF].
6251 
6252    However, by converting the assertion into the implied copy
6253    operation N_i = N_j, we will then copy-propagate N_j into the uses
6254    of N_i and lose the range information.  We may want to hold on to
6255    ASSERT_EXPRs a little while longer as the ranges could be used in
6256    things like jump threading.
6257 
6258    The problem with keeping ASSERT_EXPRs around is that passes after
6259    VRP need to handle them appropriately.
6260 
6261    Another approach would be to make the range information a first
6262    class property of the SSA_NAME so that it can be queried from
6263    any pass.  This is made somewhat more complex by the need for
6264    multiple ranges to be associated with one SSA_NAME.  */
6265 
6266 static void
6267 remove_range_assertions (void)
6268 {
6269   basic_block bb;
6270   gimple_stmt_iterator si;
6271 
6272   /* Note that the BSI iterator bump happens at the bottom of the
6273      loop and no bump is necessary if we're removing the statement
6274      referenced by the current BSI.  */
6275   FOR_EACH_BB (bb)
6276     for (si = gsi_start_bb (bb); !gsi_end_p (si);)
6277       {
6278 	gimple stmt = gsi_stmt (si);
6279 	gimple use_stmt;
6280 
6281 	if (is_gimple_assign (stmt)
6282 	    && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
6283 	  {
6284 	    tree rhs = gimple_assign_rhs1 (stmt);
6285 	    tree var;
6286 	    tree cond = fold (ASSERT_EXPR_COND (rhs));
6287 	    use_operand_p use_p;
6288 	    imm_use_iterator iter;
6289 
6290 	    gcc_assert (cond != boolean_false_node);
6291 
6292 	    /* Propagate the RHS into every use of the LHS.  */
6293 	    var = ASSERT_EXPR_VAR (rhs);
6294 	    FOR_EACH_IMM_USE_STMT (use_stmt, iter,
6295 				   gimple_assign_lhs (stmt))
6296 	      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
6297 		{
6298 		  SET_USE (use_p, var);
6299 		  gcc_assert (TREE_CODE (var) == SSA_NAME);
6300 		}
6301 
6302 	    /* And finally, remove the copy, it is not needed.  */
6303 	    gsi_remove (&si, true);
6304 	    release_defs (stmt);
6305 	  }
6306 	else
6307 	  gsi_next (&si);
6308       }
6309 }
6310 
6311 
6312 /* Return true if STMT is interesting for VRP.  */
6313 
6314 static bool
6315 stmt_interesting_for_vrp (gimple stmt)
6316 {
6317   if (gimple_code (stmt) == GIMPLE_PHI)
6318     {
6319       tree res = gimple_phi_result (stmt);
6320       return (!virtual_operand_p (res)
6321 	      && (INTEGRAL_TYPE_P (TREE_TYPE (res))
6322 		  || POINTER_TYPE_P (TREE_TYPE (res))));
6323     }
6324   else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
6325     {
6326       tree lhs = gimple_get_lhs (stmt);
6327 
6328       /* In general, assignments with virtual operands are not useful
6329 	 for deriving ranges, with the obvious exception of calls to
6330 	 builtin functions.  */
6331       if (lhs && TREE_CODE (lhs) == SSA_NAME
6332 	  && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6333 	      || POINTER_TYPE_P (TREE_TYPE (lhs)))
6334 	  && ((is_gimple_call (stmt)
6335 	       && gimple_call_fndecl (stmt) != NULL_TREE
6336 	       && DECL_BUILT_IN (gimple_call_fndecl (stmt)))
6337 	      || !gimple_vuse (stmt)))
6338 	return true;
6339     }
6340   else if (gimple_code (stmt) == GIMPLE_COND
6341 	   || gimple_code (stmt) == GIMPLE_SWITCH)
6342     return true;
6343 
6344   return false;
6345 }
6346 
6347 
6348 /* Initialize local data structures for VRP.  */
6349 
6350 static void
6351 vrp_initialize (void)
6352 {
6353   basic_block bb;
6354 
6355   values_propagated = false;
6356   num_vr_values = num_ssa_names;
6357   vr_value = XCNEWVEC (value_range_t *, num_vr_values);
6358   vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
6359 
6360   FOR_EACH_BB (bb)
6361     {
6362       gimple_stmt_iterator si;
6363 
6364       for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
6365 	{
6366 	  gimple phi = gsi_stmt (si);
6367 	  if (!stmt_interesting_for_vrp (phi))
6368 	    {
6369 	      tree lhs = PHI_RESULT (phi);
6370 	      set_value_range_to_varying (get_value_range (lhs));
6371 	      prop_set_simulate_again (phi, false);
6372 	    }
6373 	  else
6374 	    prop_set_simulate_again (phi, true);
6375 	}
6376 
6377       for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
6378         {
6379 	  gimple stmt = gsi_stmt (si);
6380 
6381  	  /* If the statement is a control insn, then we do not
6382  	     want to avoid simulating the statement once.  Failure
6383  	     to do so means that those edges will never get added.  */
6384 	  if (stmt_ends_bb_p (stmt))
6385 	    prop_set_simulate_again (stmt, true);
6386 	  else if (!stmt_interesting_for_vrp (stmt))
6387 	    {
6388 	      ssa_op_iter i;
6389 	      tree def;
6390 	      FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
6391 		set_value_range_to_varying (get_value_range (def));
6392 	      prop_set_simulate_again (stmt, false);
6393 	    }
6394 	  else
6395 	    prop_set_simulate_again (stmt, true);
6396 	}
6397     }
6398 }
6399 
6400 /* Return the singleton value-range for NAME or NAME.  */
6401 
6402 static inline tree
6403 vrp_valueize (tree name)
6404 {
6405   if (TREE_CODE (name) == SSA_NAME)
6406     {
6407       value_range_t *vr = get_value_range (name);
6408       if (vr->type == VR_RANGE
6409 	  && (vr->min == vr->max
6410 	      || operand_equal_p (vr->min, vr->max, 0)))
6411 	return vr->min;
6412     }
6413   return name;
6414 }
6415 
6416 /* Visit assignment STMT.  If it produces an interesting range, record
6417    the SSA name in *OUTPUT_P.  */
6418 
6419 static enum ssa_prop_result
6420 vrp_visit_assignment_or_call (gimple stmt, tree *output_p)
6421 {
6422   tree def, lhs;
6423   ssa_op_iter iter;
6424   enum gimple_code code = gimple_code (stmt);
6425   lhs = gimple_get_lhs (stmt);
6426 
6427   /* We only keep track of ranges in integral and pointer types.  */
6428   if (TREE_CODE (lhs) == SSA_NAME
6429       && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6430 	   /* It is valid to have NULL MIN/MAX values on a type.  See
6431 	      build_range_type.  */
6432 	   && TYPE_MIN_VALUE (TREE_TYPE (lhs))
6433 	   && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
6434 	  || POINTER_TYPE_P (TREE_TYPE (lhs))))
6435     {
6436       value_range_t new_vr = VR_INITIALIZER;
6437 
6438       /* Try folding the statement to a constant first.  */
6439       tree tem = gimple_fold_stmt_to_constant (stmt, vrp_valueize);
6440       if (tem && !is_overflow_infinity (tem))
6441 	set_value_range (&new_vr, VR_RANGE, tem, tem, NULL);
6442       /* Then dispatch to value-range extracting functions.  */
6443       else if (code == GIMPLE_CALL)
6444 	extract_range_basic (&new_vr, stmt);
6445       else
6446 	extract_range_from_assignment (&new_vr, stmt);
6447 
6448       if (update_value_range (lhs, &new_vr))
6449 	{
6450 	  *output_p = lhs;
6451 
6452 	  if (dump_file && (dump_flags & TDF_DETAILS))
6453 	    {
6454 	      fprintf (dump_file, "Found new range for ");
6455 	      print_generic_expr (dump_file, lhs, 0);
6456 	      fprintf (dump_file, ": ");
6457 	      dump_value_range (dump_file, &new_vr);
6458 	      fprintf (dump_file, "\n\n");
6459 	    }
6460 
6461 	  if (new_vr.type == VR_VARYING)
6462 	    return SSA_PROP_VARYING;
6463 
6464 	  return SSA_PROP_INTERESTING;
6465 	}
6466 
6467       return SSA_PROP_NOT_INTERESTING;
6468     }
6469 
6470   /* Every other statement produces no useful ranges.  */
6471   FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
6472     set_value_range_to_varying (get_value_range (def));
6473 
6474   return SSA_PROP_VARYING;
6475 }
6476 
6477 /* Helper that gets the value range of the SSA_NAME with version I
6478    or a symbolic range containing the SSA_NAME only if the value range
6479    is varying or undefined.  */
6480 
6481 static inline value_range_t
6482 get_vr_for_comparison (int i)
6483 {
6484   value_range_t vr = *get_value_range (ssa_name (i));
6485 
6486   /* If name N_i does not have a valid range, use N_i as its own
6487      range.  This allows us to compare against names that may
6488      have N_i in their ranges.  */
6489   if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
6490     {
6491       vr.type = VR_RANGE;
6492       vr.min = ssa_name (i);
6493       vr.max = ssa_name (i);
6494     }
6495 
6496   return vr;
6497 }
6498 
6499 /* Compare all the value ranges for names equivalent to VAR with VAL
6500    using comparison code COMP.  Return the same value returned by
6501    compare_range_with_value, including the setting of
6502    *STRICT_OVERFLOW_P.  */
6503 
6504 static tree
6505 compare_name_with_value (enum tree_code comp, tree var, tree val,
6506 			 bool *strict_overflow_p)
6507 {
6508   bitmap_iterator bi;
6509   unsigned i;
6510   bitmap e;
6511   tree retval, t;
6512   int used_strict_overflow;
6513   bool sop;
6514   value_range_t equiv_vr;
6515 
6516   /* Get the set of equivalences for VAR.  */
6517   e = get_value_range (var)->equiv;
6518 
6519   /* Start at -1.  Set it to 0 if we do a comparison without relying
6520      on overflow, or 1 if all comparisons rely on overflow.  */
6521   used_strict_overflow = -1;
6522 
6523   /* Compare vars' value range with val.  */
6524   equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
6525   sop = false;
6526   retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
6527   if (retval)
6528     used_strict_overflow = sop ? 1 : 0;
6529 
6530   /* If the equiv set is empty we have done all work we need to do.  */
6531   if (e == NULL)
6532     {
6533       if (retval
6534 	  && used_strict_overflow > 0)
6535 	*strict_overflow_p = true;
6536       return retval;
6537     }
6538 
6539   EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
6540     {
6541       equiv_vr = get_vr_for_comparison (i);
6542       sop = false;
6543       t = compare_range_with_value (comp, &equiv_vr, val, &sop);
6544       if (t)
6545 	{
6546 	  /* If we get different answers from different members
6547 	     of the equivalence set this check must be in a dead
6548 	     code region.  Folding it to a trap representation
6549 	     would be correct here.  For now just return don't-know.  */
6550 	  if (retval != NULL
6551 	      && t != retval)
6552 	    {
6553 	      retval = NULL_TREE;
6554 	      break;
6555 	    }
6556 	  retval = t;
6557 
6558 	  if (!sop)
6559 	    used_strict_overflow = 0;
6560 	  else if (used_strict_overflow < 0)
6561 	    used_strict_overflow = 1;
6562 	}
6563     }
6564 
6565   if (retval
6566       && used_strict_overflow > 0)
6567     *strict_overflow_p = true;
6568 
6569   return retval;
6570 }
6571 
6572 
6573 /* Given a comparison code COMP and names N1 and N2, compare all the
6574    ranges equivalent to N1 against all the ranges equivalent to N2
6575    to determine the value of N1 COMP N2.  Return the same value
6576    returned by compare_ranges.  Set *STRICT_OVERFLOW_P to indicate
6577    whether we relied on an overflow infinity in the comparison.  */
6578 
6579 
6580 static tree
6581 compare_names (enum tree_code comp, tree n1, tree n2,
6582 	       bool *strict_overflow_p)
6583 {
6584   tree t, retval;
6585   bitmap e1, e2;
6586   bitmap_iterator bi1, bi2;
6587   unsigned i1, i2;
6588   int used_strict_overflow;
6589   static bitmap_obstack *s_obstack = NULL;
6590   static bitmap s_e1 = NULL, s_e2 = NULL;
6591 
6592   /* Compare the ranges of every name equivalent to N1 against the
6593      ranges of every name equivalent to N2.  */
6594   e1 = get_value_range (n1)->equiv;
6595   e2 = get_value_range (n2)->equiv;
6596 
6597   /* Use the fake bitmaps if e1 or e2 are not available.  */
6598   if (s_obstack == NULL)
6599     {
6600       s_obstack = XNEW (bitmap_obstack);
6601       bitmap_obstack_initialize (s_obstack);
6602       s_e1 = BITMAP_ALLOC (s_obstack);
6603       s_e2 = BITMAP_ALLOC (s_obstack);
6604     }
6605   if (e1 == NULL)
6606     e1 = s_e1;
6607   if (e2 == NULL)
6608     e2 = s_e2;
6609 
6610   /* Add N1 and N2 to their own set of equivalences to avoid
6611      duplicating the body of the loop just to check N1 and N2
6612      ranges.  */
6613   bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
6614   bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
6615 
6616   /* If the equivalence sets have a common intersection, then the two
6617      names can be compared without checking their ranges.  */
6618   if (bitmap_intersect_p (e1, e2))
6619     {
6620       bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6621       bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6622 
6623       return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
6624 	     ? boolean_true_node
6625 	     : boolean_false_node;
6626     }
6627 
6628   /* Start at -1.  Set it to 0 if we do a comparison without relying
6629      on overflow, or 1 if all comparisons rely on overflow.  */
6630   used_strict_overflow = -1;
6631 
6632   /* Otherwise, compare all the equivalent ranges.  First, add N1 and
6633      N2 to their own set of equivalences to avoid duplicating the body
6634      of the loop just to check N1 and N2 ranges.  */
6635   EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
6636     {
6637       value_range_t vr1 = get_vr_for_comparison (i1);
6638 
6639       t = retval = NULL_TREE;
6640       EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
6641 	{
6642 	  bool sop = false;
6643 
6644 	  value_range_t vr2 = get_vr_for_comparison (i2);
6645 
6646 	  t = compare_ranges (comp, &vr1, &vr2, &sop);
6647 	  if (t)
6648 	    {
6649 	      /* If we get different answers from different members
6650 		 of the equivalence set this check must be in a dead
6651 		 code region.  Folding it to a trap representation
6652 		 would be correct here.  For now just return don't-know.  */
6653 	      if (retval != NULL
6654 		  && t != retval)
6655 		{
6656 		  bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6657 		  bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6658 		  return NULL_TREE;
6659 		}
6660 	      retval = t;
6661 
6662 	      if (!sop)
6663 		used_strict_overflow = 0;
6664 	      else if (used_strict_overflow < 0)
6665 		used_strict_overflow = 1;
6666 	    }
6667 	}
6668 
6669       if (retval)
6670 	{
6671 	  bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6672 	  bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6673 	  if (used_strict_overflow > 0)
6674 	    *strict_overflow_p = true;
6675 	  return retval;
6676 	}
6677     }
6678 
6679   /* None of the equivalent ranges are useful in computing this
6680      comparison.  */
6681   bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6682   bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6683   return NULL_TREE;
6684 }
6685 
6686 /* Helper function for vrp_evaluate_conditional_warnv.  */
6687 
6688 static tree
6689 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
6690 						      tree op0, tree op1,
6691 						      bool * strict_overflow_p)
6692 {
6693   value_range_t *vr0, *vr1;
6694 
6695   vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
6696   vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
6697 
6698   if (vr0 && vr1)
6699     return compare_ranges (code, vr0, vr1, strict_overflow_p);
6700   else if (vr0 && vr1 == NULL)
6701     return compare_range_with_value (code, vr0, op1, strict_overflow_p);
6702   else if (vr0 == NULL && vr1)
6703     return (compare_range_with_value
6704 	    (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
6705   return NULL;
6706 }
6707 
6708 /* Helper function for vrp_evaluate_conditional_warnv. */
6709 
6710 static tree
6711 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
6712 					 tree op1, bool use_equiv_p,
6713 					 bool *strict_overflow_p, bool *only_ranges)
6714 {
6715   tree ret;
6716   if (only_ranges)
6717     *only_ranges = true;
6718 
6719   /* We only deal with integral and pointer types.  */
6720   if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
6721       && !POINTER_TYPE_P (TREE_TYPE (op0)))
6722     return NULL_TREE;
6723 
6724   if (use_equiv_p)
6725     {
6726       if (only_ranges
6727           && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
6728 	              (code, op0, op1, strict_overflow_p)))
6729 	return ret;
6730       *only_ranges = false;
6731       if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME)
6732 	return compare_names (code, op0, op1, strict_overflow_p);
6733       else if (TREE_CODE (op0) == SSA_NAME)
6734 	return compare_name_with_value (code, op0, op1, strict_overflow_p);
6735       else if (TREE_CODE (op1) == SSA_NAME)
6736 	return (compare_name_with_value
6737 		(swap_tree_comparison (code), op1, op0, strict_overflow_p));
6738     }
6739   else
6740     return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1,
6741 								 strict_overflow_p);
6742   return NULL_TREE;
6743 }
6744 
6745 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
6746    information.  Return NULL if the conditional can not be evaluated.
6747    The ranges of all the names equivalent with the operands in COND
6748    will be used when trying to compute the value.  If the result is
6749    based on undefined signed overflow, issue a warning if
6750    appropriate.  */
6751 
6752 static tree
6753 vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt)
6754 {
6755   bool sop;
6756   tree ret;
6757   bool only_ranges;
6758 
6759   /* Some passes and foldings leak constants with overflow flag set
6760      into the IL.  Avoid doing wrong things with these and bail out.  */
6761   if ((TREE_CODE (op0) == INTEGER_CST
6762        && TREE_OVERFLOW (op0))
6763       || (TREE_CODE (op1) == INTEGER_CST
6764 	  && TREE_OVERFLOW (op1)))
6765     return NULL_TREE;
6766 
6767   sop = false;
6768   ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
6769   						 &only_ranges);
6770 
6771   if (ret && sop)
6772     {
6773       enum warn_strict_overflow_code wc;
6774       const char* warnmsg;
6775 
6776       if (is_gimple_min_invariant (ret))
6777 	{
6778 	  wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
6779 	  warnmsg = G_("assuming signed overflow does not occur when "
6780 		       "simplifying conditional to constant");
6781 	}
6782       else
6783 	{
6784 	  wc = WARN_STRICT_OVERFLOW_COMPARISON;
6785 	  warnmsg = G_("assuming signed overflow does not occur when "
6786 		       "simplifying conditional");
6787 	}
6788 
6789       if (issue_strict_overflow_warning (wc))
6790 	{
6791 	  location_t location;
6792 
6793 	  if (!gimple_has_location (stmt))
6794 	    location = input_location;
6795 	  else
6796 	    location = gimple_location (stmt);
6797 	  warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
6798 	}
6799     }
6800 
6801   if (warn_type_limits
6802       && ret && only_ranges
6803       && TREE_CODE_CLASS (code) == tcc_comparison
6804       && TREE_CODE (op0) == SSA_NAME)
6805     {
6806       /* If the comparison is being folded and the operand on the LHS
6807 	 is being compared against a constant value that is outside of
6808 	 the natural range of OP0's type, then the predicate will
6809 	 always fold regardless of the value of OP0.  If -Wtype-limits
6810 	 was specified, emit a warning.  */
6811       tree type = TREE_TYPE (op0);
6812       value_range_t *vr0 = get_value_range (op0);
6813 
6814       if (vr0->type == VR_RANGE
6815 	  && INTEGRAL_TYPE_P (type)
6816 	  && vrp_val_is_min (vr0->min)
6817 	  && vrp_val_is_max (vr0->max)
6818 	  && is_gimple_min_invariant (op1))
6819 	{
6820 	  location_t location;
6821 
6822 	  if (!gimple_has_location (stmt))
6823 	    location = input_location;
6824 	  else
6825 	    location = gimple_location (stmt);
6826 
6827 	  warning_at (location, OPT_Wtype_limits,
6828 		      integer_zerop (ret)
6829 		      ? G_("comparison always false "
6830                            "due to limited range of data type")
6831 		      : G_("comparison always true "
6832                            "due to limited range of data type"));
6833 	}
6834     }
6835 
6836   return ret;
6837 }
6838 
6839 
6840 /* Visit conditional statement STMT.  If we can determine which edge
6841    will be taken out of STMT's basic block, record it in
6842    *TAKEN_EDGE_P and return SSA_PROP_INTERESTING.  Otherwise, return
6843    SSA_PROP_VARYING.  */
6844 
6845 static enum ssa_prop_result
6846 vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p)
6847 {
6848   tree val;
6849   bool sop;
6850 
6851   *taken_edge_p = NULL;
6852 
6853   if (dump_file && (dump_flags & TDF_DETAILS))
6854     {
6855       tree use;
6856       ssa_op_iter i;
6857 
6858       fprintf (dump_file, "\nVisiting conditional with predicate: ");
6859       print_gimple_stmt (dump_file, stmt, 0, 0);
6860       fprintf (dump_file, "\nWith known ranges\n");
6861 
6862       FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
6863 	{
6864 	  fprintf (dump_file, "\t");
6865 	  print_generic_expr (dump_file, use, 0);
6866 	  fprintf (dump_file, ": ");
6867 	  dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
6868 	}
6869 
6870       fprintf (dump_file, "\n");
6871     }
6872 
6873   /* Compute the value of the predicate COND by checking the known
6874      ranges of each of its operands.
6875 
6876      Note that we cannot evaluate all the equivalent ranges here
6877      because those ranges may not yet be final and with the current
6878      propagation strategy, we cannot determine when the value ranges
6879      of the names in the equivalence set have changed.
6880 
6881      For instance, given the following code fragment
6882 
6883         i_5 = PHI <8, i_13>
6884 	...
6885      	i_14 = ASSERT_EXPR <i_5, i_5 != 0>
6886 	if (i_14 == 1)
6887 	  ...
6888 
6889      Assume that on the first visit to i_14, i_5 has the temporary
6890      range [8, 8] because the second argument to the PHI function is
6891      not yet executable.  We derive the range ~[0, 0] for i_14 and the
6892      equivalence set { i_5 }.  So, when we visit 'if (i_14 == 1)' for
6893      the first time, since i_14 is equivalent to the range [8, 8], we
6894      determine that the predicate is always false.
6895 
6896      On the next round of propagation, i_13 is determined to be
6897      VARYING, which causes i_5 to drop down to VARYING.  So, another
6898      visit to i_14 is scheduled.  In this second visit, we compute the
6899      exact same range and equivalence set for i_14, namely ~[0, 0] and
6900      { i_5 }.  But we did not have the previous range for i_5
6901      registered, so vrp_visit_assignment thinks that the range for
6902      i_14 has not changed.  Therefore, the predicate 'if (i_14 == 1)'
6903      is not visited again, which stops propagation from visiting
6904      statements in the THEN clause of that if().
6905 
6906      To properly fix this we would need to keep the previous range
6907      value for the names in the equivalence set.  This way we would've
6908      discovered that from one visit to the other i_5 changed from
6909      range [8, 8] to VR_VARYING.
6910 
6911      However, fixing this apparent limitation may not be worth the
6912      additional checking.  Testing on several code bases (GCC, DLV,
6913      MICO, TRAMP3D and SPEC2000) showed that doing this results in
6914      4 more predicates folded in SPEC.  */
6915   sop = false;
6916 
6917   val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
6918 						 gimple_cond_lhs (stmt),
6919 						 gimple_cond_rhs (stmt),
6920 						 false, &sop, NULL);
6921   if (val)
6922     {
6923       if (!sop)
6924 	*taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
6925       else
6926 	{
6927 	  if (dump_file && (dump_flags & TDF_DETAILS))
6928 	    fprintf (dump_file,
6929 		     "\nIgnoring predicate evaluation because "
6930 		     "it assumes that signed overflow is undefined");
6931 	  val = NULL_TREE;
6932 	}
6933     }
6934 
6935   if (dump_file && (dump_flags & TDF_DETAILS))
6936     {
6937       fprintf (dump_file, "\nPredicate evaluates to: ");
6938       if (val == NULL_TREE)
6939 	fprintf (dump_file, "DON'T KNOW\n");
6940       else
6941 	print_generic_stmt (dump_file, val, 0);
6942     }
6943 
6944   return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
6945 }
6946 
6947 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
6948    that includes the value VAL.  The search is restricted to the range
6949    [START_IDX, n - 1] where n is the size of VEC.
6950 
6951    If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
6952    returned.
6953 
6954    If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
6955    it is placed in IDX and false is returned.
6956 
6957    If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
6958    returned. */
6959 
6960 static bool
6961 find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx)
6962 {
6963   size_t n = gimple_switch_num_labels (stmt);
6964   size_t low, high;
6965 
6966   /* Find case label for minimum of the value range or the next one.
6967      At each iteration we are searching in [low, high - 1]. */
6968 
6969   for (low = start_idx, high = n; high != low; )
6970     {
6971       tree t;
6972       int cmp;
6973       /* Note that i != high, so we never ask for n. */
6974       size_t i = (high + low) / 2;
6975       t = gimple_switch_label (stmt, i);
6976 
6977       /* Cache the result of comparing CASE_LOW and val.  */
6978       cmp = tree_int_cst_compare (CASE_LOW (t), val);
6979 
6980       if (cmp == 0)
6981 	{
6982 	  /* Ranges cannot be empty. */
6983 	  *idx = i;
6984 	  return true;
6985 	}
6986       else if (cmp > 0)
6987         high = i;
6988       else
6989 	{
6990 	  low = i + 1;
6991 	  if (CASE_HIGH (t) != NULL
6992 	      && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
6993 	    {
6994 	      *idx = i;
6995 	      return true;
6996 	    }
6997         }
6998     }
6999 
7000   *idx = high;
7001   return false;
7002 }
7003 
7004 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
7005    for values between MIN and MAX. The first index is placed in MIN_IDX. The
7006    last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
7007    then MAX_IDX < MIN_IDX.
7008    Returns true if the default label is not needed. */
7009 
7010 static bool
7011 find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx,
7012 		       size_t *max_idx)
7013 {
7014   size_t i, j;
7015   bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
7016   bool max_take_default = !find_case_label_index (stmt, i, max, &j);
7017 
7018   if (i == j
7019       && min_take_default
7020       && max_take_default)
7021     {
7022       /* Only the default case label reached.
7023          Return an empty range. */
7024       *min_idx = 1;
7025       *max_idx = 0;
7026       return false;
7027     }
7028   else
7029     {
7030       bool take_default = min_take_default || max_take_default;
7031       tree low, high;
7032       size_t k;
7033 
7034       if (max_take_default)
7035 	j--;
7036 
7037       /* If the case label range is continuous, we do not need
7038 	 the default case label.  Verify that.  */
7039       high = CASE_LOW (gimple_switch_label (stmt, i));
7040       if (CASE_HIGH (gimple_switch_label (stmt, i)))
7041 	high = CASE_HIGH (gimple_switch_label (stmt, i));
7042       for (k = i + 1; k <= j; ++k)
7043 	{
7044 	  low = CASE_LOW (gimple_switch_label (stmt, k));
7045 	  if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
7046 	    {
7047 	      take_default = true;
7048 	      break;
7049 	    }
7050 	  high = low;
7051 	  if (CASE_HIGH (gimple_switch_label (stmt, k)))
7052 	    high = CASE_HIGH (gimple_switch_label (stmt, k));
7053 	}
7054 
7055       *min_idx = i;
7056       *max_idx = j;
7057       return !take_default;
7058     }
7059 }
7060 
7061 /* Searches the case label vector VEC for the ranges of CASE_LABELs that are
7062    used in range VR.  The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and
7063    MAX_IDX2.  If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1.
7064    Returns true if the default label is not needed.  */
7065 
7066 static bool
7067 find_case_label_ranges (gimple stmt, value_range_t *vr, size_t *min_idx1,
7068 			size_t *max_idx1, size_t *min_idx2,
7069 			size_t *max_idx2)
7070 {
7071   size_t i, j, k, l;
7072   unsigned int n = gimple_switch_num_labels (stmt);
7073   bool take_default;
7074   tree case_low, case_high;
7075   tree min = vr->min, max = vr->max;
7076 
7077   gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE);
7078 
7079   take_default = !find_case_label_range (stmt, min, max, &i, &j);
7080 
7081   /* Set second range to emtpy.  */
7082   *min_idx2 = 1;
7083   *max_idx2 = 0;
7084 
7085   if (vr->type == VR_RANGE)
7086     {
7087       *min_idx1 = i;
7088       *max_idx1 = j;
7089       return !take_default;
7090     }
7091 
7092   /* Set first range to all case labels.  */
7093   *min_idx1 = 1;
7094   *max_idx1 = n - 1;
7095 
7096   if (i > j)
7097     return false;
7098 
7099   /* Make sure all the values of case labels [i , j] are contained in
7100      range [MIN, MAX].  */
7101   case_low = CASE_LOW (gimple_switch_label (stmt, i));
7102   case_high = CASE_HIGH (gimple_switch_label (stmt, j));
7103   if (tree_int_cst_compare (case_low, min) < 0)
7104     i += 1;
7105   if (case_high != NULL_TREE
7106       && tree_int_cst_compare (max, case_high) < 0)
7107     j -= 1;
7108 
7109   if (i > j)
7110     return false;
7111 
7112   /* If the range spans case labels [i, j], the corresponding anti-range spans
7113      the labels [1, i - 1] and [j + 1, n -  1].  */
7114   k = j + 1;
7115   l = n - 1;
7116   if (k > l)
7117     {
7118       k = 1;
7119       l = 0;
7120     }
7121 
7122   j = i - 1;
7123   i = 1;
7124   if (i > j)
7125     {
7126       i = k;
7127       j = l;
7128       k = 1;
7129       l = 0;
7130     }
7131 
7132   *min_idx1 = i;
7133   *max_idx1 = j;
7134   *min_idx2 = k;
7135   *max_idx2 = l;
7136   return false;
7137 }
7138 
7139 /* Visit switch statement STMT.  If we can determine which edge
7140    will be taken out of STMT's basic block, record it in
7141    *TAKEN_EDGE_P and return SSA_PROP_INTERESTING.  Otherwise, return
7142    SSA_PROP_VARYING.  */
7143 
7144 static enum ssa_prop_result
7145 vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p)
7146 {
7147   tree op, val;
7148   value_range_t *vr;
7149   size_t i = 0, j = 0, k, l;
7150   bool take_default;
7151 
7152   *taken_edge_p = NULL;
7153   op = gimple_switch_index (stmt);
7154   if (TREE_CODE (op) != SSA_NAME)
7155     return SSA_PROP_VARYING;
7156 
7157   vr = get_value_range (op);
7158   if (dump_file && (dump_flags & TDF_DETAILS))
7159     {
7160       fprintf (dump_file, "\nVisiting switch expression with operand ");
7161       print_generic_expr (dump_file, op, 0);
7162       fprintf (dump_file, " with known range ");
7163       dump_value_range (dump_file, vr);
7164       fprintf (dump_file, "\n");
7165     }
7166 
7167   if ((vr->type != VR_RANGE
7168        && vr->type != VR_ANTI_RANGE)
7169       || symbolic_range_p (vr))
7170     return SSA_PROP_VARYING;
7171 
7172   /* Find the single edge that is taken from the switch expression.  */
7173   take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
7174 
7175   /* Check if the range spans no CASE_LABEL. If so, we only reach the default
7176      label */
7177   if (j < i)
7178     {
7179       gcc_assert (take_default);
7180       val = gimple_switch_default_label (stmt);
7181     }
7182   else
7183     {
7184       /* Check if labels with index i to j and maybe the default label
7185 	 are all reaching the same label.  */
7186 
7187       val = gimple_switch_label (stmt, i);
7188       if (take_default
7189 	  && CASE_LABEL (gimple_switch_default_label (stmt))
7190 	  != CASE_LABEL (val))
7191 	{
7192 	  if (dump_file && (dump_flags & TDF_DETAILS))
7193 	    fprintf (dump_file, "  not a single destination for this "
7194 		     "range\n");
7195           return SSA_PROP_VARYING;
7196 	}
7197       for (++i; i <= j; ++i)
7198         {
7199           if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
7200 	    {
7201 	      if (dump_file && (dump_flags & TDF_DETAILS))
7202 		fprintf (dump_file, "  not a single destination for this "
7203 			 "range\n");
7204 	      return SSA_PROP_VARYING;
7205 	    }
7206         }
7207       for (; k <= l; ++k)
7208         {
7209           if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val))
7210 	    {
7211 	      if (dump_file && (dump_flags & TDF_DETAILS))
7212 		fprintf (dump_file, "  not a single destination for this "
7213 			 "range\n");
7214 	      return SSA_PROP_VARYING;
7215 	    }
7216         }
7217     }
7218 
7219   *taken_edge_p = find_edge (gimple_bb (stmt),
7220 			     label_to_block (CASE_LABEL (val)));
7221 
7222   if (dump_file && (dump_flags & TDF_DETAILS))
7223     {
7224       fprintf (dump_file, "  will take edge to ");
7225       print_generic_stmt (dump_file, CASE_LABEL (val), 0);
7226     }
7227 
7228   return SSA_PROP_INTERESTING;
7229 }
7230 
7231 
7232 /* Evaluate statement STMT.  If the statement produces a useful range,
7233    return SSA_PROP_INTERESTING and record the SSA name with the
7234    interesting range into *OUTPUT_P.
7235 
7236    If STMT is a conditional branch and we can determine its truth
7237    value, the taken edge is recorded in *TAKEN_EDGE_P.
7238 
7239    If STMT produces a varying value, return SSA_PROP_VARYING.  */
7240 
7241 static enum ssa_prop_result
7242 vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
7243 {
7244   tree def;
7245   ssa_op_iter iter;
7246 
7247   if (dump_file && (dump_flags & TDF_DETAILS))
7248     {
7249       fprintf (dump_file, "\nVisiting statement:\n");
7250       print_gimple_stmt (dump_file, stmt, 0, dump_flags);
7251       fprintf (dump_file, "\n");
7252     }
7253 
7254   if (!stmt_interesting_for_vrp (stmt))
7255     gcc_assert (stmt_ends_bb_p (stmt));
7256   else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
7257     {
7258       /* In general, assignments with virtual operands are not useful
7259 	 for deriving ranges, with the obvious exception of calls to
7260 	 builtin functions.  */
7261       if ((is_gimple_call (stmt)
7262 	   && gimple_call_fndecl (stmt) != NULL_TREE
7263 	   && DECL_BUILT_IN (gimple_call_fndecl (stmt)))
7264 	  || !gimple_vuse (stmt))
7265 	return vrp_visit_assignment_or_call (stmt, output_p);
7266     }
7267   else if (gimple_code (stmt) == GIMPLE_COND)
7268     return vrp_visit_cond_stmt (stmt, taken_edge_p);
7269   else if (gimple_code (stmt) == GIMPLE_SWITCH)
7270     return vrp_visit_switch_stmt (stmt, taken_edge_p);
7271 
7272   /* All other statements produce nothing of interest for VRP, so mark
7273      their outputs varying and prevent further simulation.  */
7274   FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
7275     set_value_range_to_varying (get_value_range (def));
7276 
7277   return SSA_PROP_VARYING;
7278 }
7279 
7280 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
7281    { VR1TYPE, VR0MIN, VR0MAX } and store the result
7282    in { *VR0TYPE, *VR0MIN, *VR0MAX }.  This may not be the smallest
7283    possible such range.  The resulting range is not canonicalized.  */
7284 
7285 static void
7286 union_ranges (enum value_range_type *vr0type,
7287 	      tree *vr0min, tree *vr0max,
7288 	      enum value_range_type vr1type,
7289 	      tree vr1min, tree vr1max)
7290 {
7291   bool mineq = operand_equal_p (*vr0min, vr1min, 0);
7292   bool maxeq = operand_equal_p (*vr0max, vr1max, 0);
7293 
7294   /* [] is vr0, () is vr1 in the following classification comments.  */
7295   if (mineq && maxeq)
7296     {
7297       /* [(  )] */
7298       if (*vr0type == vr1type)
7299 	/* Nothing to do for equal ranges.  */
7300 	;
7301       else if ((*vr0type == VR_RANGE
7302 		&& vr1type == VR_ANTI_RANGE)
7303 	       || (*vr0type == VR_ANTI_RANGE
7304 		   && vr1type == VR_RANGE))
7305 	{
7306 	  /* For anti-range with range union the result is varying.  */
7307 	  goto give_up;
7308 	}
7309       else
7310 	gcc_unreachable ();
7311     }
7312   else if (operand_less_p (*vr0max, vr1min) == 1
7313 	   || operand_less_p (vr1max, *vr0min) == 1)
7314     {
7315       /* [ ] ( ) or ( ) [ ]
7316 	 If the ranges have an empty intersection, result of the union
7317 	 operation is the anti-range or if both are anti-ranges
7318 	 it covers all.  */
7319       if (*vr0type == VR_ANTI_RANGE
7320 	  && vr1type == VR_ANTI_RANGE)
7321 	goto give_up;
7322       else if (*vr0type == VR_ANTI_RANGE
7323 	       && vr1type == VR_RANGE)
7324 	;
7325       else if (*vr0type == VR_RANGE
7326 	       && vr1type == VR_ANTI_RANGE)
7327 	{
7328 	  *vr0type = vr1type;
7329 	  *vr0min = vr1min;
7330 	  *vr0max = vr1max;
7331 	}
7332       else if (*vr0type == VR_RANGE
7333 	       && vr1type == VR_RANGE)
7334 	{
7335 	  /* The result is the convex hull of both ranges.  */
7336 	  if (operand_less_p (*vr0max, vr1min) == 1)
7337 	    {
7338 	      /* If the result can be an anti-range, create one.  */
7339 	      if (TREE_CODE (*vr0max) == INTEGER_CST
7340 		  && TREE_CODE (vr1min) == INTEGER_CST
7341 		  && vrp_val_is_min (*vr0min)
7342 		  && vrp_val_is_max (vr1max))
7343 		{
7344 		  tree min = int_const_binop (PLUS_EXPR,
7345 					      *vr0max, integer_one_node);
7346 		  tree max = int_const_binop (MINUS_EXPR,
7347 					      vr1min, integer_one_node);
7348 		  if (!operand_less_p (max, min))
7349 		    {
7350 		      *vr0type = VR_ANTI_RANGE;
7351 		      *vr0min = min;
7352 		      *vr0max = max;
7353 		    }
7354 		  else
7355 		    *vr0max = vr1max;
7356 		}
7357 	      else
7358 		*vr0max = vr1max;
7359 	    }
7360 	  else
7361 	    {
7362 	      /* If the result can be an anti-range, create one.  */
7363 	      if (TREE_CODE (vr1max) == INTEGER_CST
7364 		  && TREE_CODE (*vr0min) == INTEGER_CST
7365 		  && vrp_val_is_min (vr1min)
7366 		  && vrp_val_is_max (*vr0max))
7367 		{
7368 		  tree min = int_const_binop (PLUS_EXPR,
7369 					      vr1max, integer_one_node);
7370 		  tree max = int_const_binop (MINUS_EXPR,
7371 					      *vr0min, integer_one_node);
7372 		  if (!operand_less_p (max, min))
7373 		    {
7374 		      *vr0type = VR_ANTI_RANGE;
7375 		      *vr0min = min;
7376 		      *vr0max = max;
7377 		    }
7378 		  else
7379 		    *vr0min = vr1min;
7380 		}
7381 	      else
7382 		*vr0min = vr1min;
7383 	    }
7384 	}
7385       else
7386 	gcc_unreachable ();
7387     }
7388   else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
7389 	   && (mineq || operand_less_p (*vr0min, vr1min) == 1))
7390     {
7391       /* [ (  ) ] or [(  ) ] or [ (  )] */
7392       if (*vr0type == VR_RANGE
7393 	  && vr1type == VR_RANGE)
7394 	;
7395       else if (*vr0type == VR_ANTI_RANGE
7396 	       && vr1type == VR_ANTI_RANGE)
7397 	{
7398 	  *vr0type = vr1type;
7399 	  *vr0min = vr1min;
7400 	  *vr0max = vr1max;
7401 	}
7402       else if (*vr0type == VR_ANTI_RANGE
7403 	       && vr1type == VR_RANGE)
7404 	{
7405 	  /* Arbitrarily choose the right or left gap.  */
7406 	  if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
7407 	    *vr0max = int_const_binop (MINUS_EXPR, vr1min, integer_one_node);
7408 	  else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
7409 	    *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node);
7410 	  else
7411 	    goto give_up;
7412 	}
7413       else if (*vr0type == VR_RANGE
7414 	       && vr1type == VR_ANTI_RANGE)
7415 	/* The result covers everything.  */
7416 	goto give_up;
7417       else
7418 	gcc_unreachable ();
7419     }
7420   else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
7421 	   && (mineq || operand_less_p (vr1min, *vr0min) == 1))
7422     {
7423       /* ( [  ] ) or ([  ] ) or ( [  ]) */
7424       if (*vr0type == VR_RANGE
7425 	  && vr1type == VR_RANGE)
7426 	{
7427 	  *vr0type = vr1type;
7428 	  *vr0min = vr1min;
7429 	  *vr0max = vr1max;
7430 	}
7431       else if (*vr0type == VR_ANTI_RANGE
7432 	       && vr1type == VR_ANTI_RANGE)
7433 	;
7434       else if (*vr0type == VR_RANGE
7435 	       && vr1type == VR_ANTI_RANGE)
7436 	{
7437 	  *vr0type = VR_ANTI_RANGE;
7438 	  if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
7439 	    {
7440 	      *vr0max = int_const_binop (MINUS_EXPR, *vr0min, integer_one_node);
7441 	      *vr0min = vr1min;
7442 	    }
7443 	  else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
7444 	    {
7445 	      *vr0min = int_const_binop (PLUS_EXPR, *vr0max, integer_one_node);
7446 	      *vr0max = vr1max;
7447 	    }
7448 	  else
7449 	    goto give_up;
7450 	}
7451       else if (*vr0type == VR_ANTI_RANGE
7452 	       && vr1type == VR_RANGE)
7453 	/* The result covers everything.  */
7454 	goto give_up;
7455       else
7456 	gcc_unreachable ();
7457     }
7458   else if ((operand_less_p (vr1min, *vr0max) == 1
7459 	    || operand_equal_p (vr1min, *vr0max, 0))
7460 	   && operand_less_p (*vr0min, vr1min) == 1
7461 	   && operand_less_p (*vr0max, vr1max) == 1)
7462     {
7463       /* [  (  ]  ) or [   ](   ) */
7464       if (*vr0type == VR_RANGE
7465 	  && vr1type == VR_RANGE)
7466 	*vr0max = vr1max;
7467       else if (*vr0type == VR_ANTI_RANGE
7468 	       && vr1type == VR_ANTI_RANGE)
7469 	*vr0min = vr1min;
7470       else if (*vr0type == VR_ANTI_RANGE
7471 	       && vr1type == VR_RANGE)
7472 	{
7473 	  if (TREE_CODE (vr1min) == INTEGER_CST)
7474 	    *vr0max = int_const_binop (MINUS_EXPR, vr1min, integer_one_node);
7475 	  else
7476 	    goto give_up;
7477 	}
7478       else if (*vr0type == VR_RANGE
7479 	       && vr1type == VR_ANTI_RANGE)
7480 	{
7481 	  if (TREE_CODE (*vr0max) == INTEGER_CST)
7482 	    {
7483 	      *vr0type = vr1type;
7484 	      *vr0min = int_const_binop (PLUS_EXPR, *vr0max, integer_one_node);
7485 	      *vr0max = vr1max;
7486 	    }
7487 	  else
7488 	    goto give_up;
7489 	}
7490       else
7491 	gcc_unreachable ();
7492     }
7493   else if ((operand_less_p (*vr0min, vr1max) == 1
7494 	    || operand_equal_p (*vr0min, vr1max, 0))
7495 	   && operand_less_p (vr1min, *vr0min) == 1
7496 	   && operand_less_p (vr1max, *vr0max) == 1)
7497     {
7498       /* (  [  )  ] or (   )[   ] */
7499       if (*vr0type == VR_RANGE
7500 	  && vr1type == VR_RANGE)
7501 	*vr0min = vr1min;
7502       else if (*vr0type == VR_ANTI_RANGE
7503 	       && vr1type == VR_ANTI_RANGE)
7504 	*vr0max = vr1max;
7505       else if (*vr0type == VR_ANTI_RANGE
7506 	       && vr1type == VR_RANGE)
7507 	{
7508 	  if (TREE_CODE (vr1max) == INTEGER_CST)
7509 	    *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node);
7510 	  else
7511 	    goto give_up;
7512 	}
7513       else if (*vr0type == VR_RANGE
7514 	       && vr1type == VR_ANTI_RANGE)
7515 	{
7516 	  if (TREE_CODE (*vr0min) == INTEGER_CST)
7517 	    {
7518 	      *vr0type = vr1type;
7519 	      *vr0min = vr1min;
7520 	      *vr0max = int_const_binop (MINUS_EXPR, *vr0min, integer_one_node);
7521 	    }
7522 	  else
7523 	    goto give_up;
7524 	}
7525       else
7526 	gcc_unreachable ();
7527     }
7528   else
7529     goto give_up;
7530 
7531   return;
7532 
7533 give_up:
7534   *vr0type = VR_VARYING;
7535   *vr0min = NULL_TREE;
7536   *vr0max = NULL_TREE;
7537 }
7538 
7539 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
7540    { VR1TYPE, VR0MIN, VR0MAX } and store the result
7541    in { *VR0TYPE, *VR0MIN, *VR0MAX }.  This may not be the smallest
7542    possible such range.  The resulting range is not canonicalized.  */
7543 
7544 static void
7545 intersect_ranges (enum value_range_type *vr0type,
7546 		  tree *vr0min, tree *vr0max,
7547 		  enum value_range_type vr1type,
7548 		  tree vr1min, tree vr1max)
7549 {
7550   bool mineq = operand_equal_p (*vr0min, vr1min, 0);
7551   bool maxeq = operand_equal_p (*vr0max, vr1max, 0);
7552 
7553   /* [] is vr0, () is vr1 in the following classification comments.  */
7554   if (mineq && maxeq)
7555     {
7556       /* [(  )] */
7557       if (*vr0type == vr1type)
7558 	/* Nothing to do for equal ranges.  */
7559 	;
7560       else if ((*vr0type == VR_RANGE
7561 		&& vr1type == VR_ANTI_RANGE)
7562 	       || (*vr0type == VR_ANTI_RANGE
7563 		   && vr1type == VR_RANGE))
7564 	{
7565 	  /* For anti-range with range intersection the result is empty.  */
7566 	  *vr0type = VR_UNDEFINED;
7567 	  *vr0min = NULL_TREE;
7568 	  *vr0max = NULL_TREE;
7569 	}
7570       else
7571 	gcc_unreachable ();
7572     }
7573   else if (operand_less_p (*vr0max, vr1min) == 1
7574 	   || operand_less_p (vr1max, *vr0min) == 1)
7575     {
7576       /* [ ] ( ) or ( ) [ ]
7577 	 If the ranges have an empty intersection, the result of the
7578 	 intersect operation is the range for intersecting an
7579 	 anti-range with a range or empty when intersecting two ranges.  */
7580       if (*vr0type == VR_RANGE
7581 	  && vr1type == VR_ANTI_RANGE)
7582 	;
7583       else if (*vr0type == VR_ANTI_RANGE
7584 	       && vr1type == VR_RANGE)
7585 	{
7586 	  *vr0type = vr1type;
7587 	  *vr0min = vr1min;
7588 	  *vr0max = vr1max;
7589 	}
7590       else if (*vr0type == VR_RANGE
7591 	       && vr1type == VR_RANGE)
7592 	{
7593 	  *vr0type = VR_UNDEFINED;
7594 	  *vr0min = NULL_TREE;
7595 	  *vr0max = NULL_TREE;
7596 	}
7597       else if (*vr0type == VR_ANTI_RANGE
7598 	       && vr1type == VR_ANTI_RANGE)
7599 	{
7600 	  /* If the anti-ranges are adjacent to each other merge them.  */
7601 	  if (TREE_CODE (*vr0max) == INTEGER_CST
7602 	      && TREE_CODE (vr1min) == INTEGER_CST
7603 	      && operand_less_p (*vr0max, vr1min) == 1
7604 	      && integer_onep (int_const_binop (MINUS_EXPR,
7605 						vr1min, *vr0max)))
7606 	    *vr0max = vr1max;
7607 	  else if (TREE_CODE (vr1max) == INTEGER_CST
7608 		   && TREE_CODE (*vr0min) == INTEGER_CST
7609 		   && operand_less_p (vr1max, *vr0min) == 1
7610 		   && integer_onep (int_const_binop (MINUS_EXPR,
7611 						     *vr0min, vr1max)))
7612 	    *vr0min = vr1min;
7613 	  /* Else arbitrarily take VR0.  */
7614 	}
7615     }
7616   else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
7617 	   && (mineq || operand_less_p (*vr0min, vr1min) == 1))
7618     {
7619       /* [ (  ) ] or [(  ) ] or [ (  )] */
7620       if (*vr0type == VR_RANGE
7621 	  && vr1type == VR_RANGE)
7622 	{
7623 	  /* If both are ranges the result is the inner one.  */
7624 	  *vr0type = vr1type;
7625 	  *vr0min = vr1min;
7626 	  *vr0max = vr1max;
7627 	}
7628       else if (*vr0type == VR_RANGE
7629 	       && vr1type == VR_ANTI_RANGE)
7630 	{
7631 	  /* Choose the right gap if the left one is empty.  */
7632 	  if (mineq)
7633 	    {
7634 	      if (TREE_CODE (vr1max) == INTEGER_CST)
7635 		*vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node);
7636 	      else
7637 		*vr0min = vr1max;
7638 	    }
7639 	  /* Choose the left gap if the right one is empty.  */
7640 	  else if (maxeq)
7641 	    {
7642 	      if (TREE_CODE (vr1min) == INTEGER_CST)
7643 		*vr0max = int_const_binop (MINUS_EXPR, vr1min,
7644 					   integer_one_node);
7645 	      else
7646 		*vr0max = vr1min;
7647 	    }
7648 	  /* Choose the anti-range if the range is effectively varying.  */
7649 	  else if (vrp_val_is_min (*vr0min)
7650 		   && vrp_val_is_max (*vr0max))
7651 	    {
7652 	      *vr0type = vr1type;
7653 	      *vr0min = vr1min;
7654 	      *vr0max = vr1max;
7655 	    }
7656 	  /* Else choose the range.  */
7657 	}
7658       else if (*vr0type == VR_ANTI_RANGE
7659 	       && vr1type == VR_ANTI_RANGE)
7660 	/* If both are anti-ranges the result is the outer one.  */
7661 	;
7662       else if (*vr0type == VR_ANTI_RANGE
7663 	       && vr1type == VR_RANGE)
7664 	{
7665 	  /* The intersection is empty.  */
7666 	  *vr0type = VR_UNDEFINED;
7667 	  *vr0min = NULL_TREE;
7668 	  *vr0max = NULL_TREE;
7669 	}
7670       else
7671 	gcc_unreachable ();
7672     }
7673   else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
7674 	   && (mineq || operand_less_p (vr1min, *vr0min) == 1))
7675     {
7676       /* ( [  ] ) or ([  ] ) or ( [  ]) */
7677       if (*vr0type == VR_RANGE
7678 	  && vr1type == VR_RANGE)
7679 	/* Choose the inner range.  */
7680 	;
7681       else if (*vr0type == VR_ANTI_RANGE
7682 	       && vr1type == VR_RANGE)
7683 	{
7684 	  /* Choose the right gap if the left is empty.  */
7685 	  if (mineq)
7686 	    {
7687 	      *vr0type = VR_RANGE;
7688 	      if (TREE_CODE (*vr0max) == INTEGER_CST)
7689 		*vr0min = int_const_binop (PLUS_EXPR, *vr0max,
7690 					   integer_one_node);
7691 	      else
7692 		*vr0min = *vr0max;
7693 	      *vr0max = vr1max;
7694 	    }
7695 	  /* Choose the left gap if the right is empty.  */
7696 	  else if (maxeq)
7697 	    {
7698 	      *vr0type = VR_RANGE;
7699 	      if (TREE_CODE (*vr0min) == INTEGER_CST)
7700 		*vr0max = int_const_binop (MINUS_EXPR, *vr0min,
7701 					   integer_one_node);
7702 	      else
7703 		*vr0max = *vr0min;
7704 	      *vr0min = vr1min;
7705 	    }
7706 	  /* Choose the anti-range if the range is effectively varying.  */
7707 	  else if (vrp_val_is_min (vr1min)
7708 		   && vrp_val_is_max (vr1max))
7709 	    ;
7710 	  /* Else choose the range.  */
7711 	  else
7712 	    {
7713 	      *vr0type = vr1type;
7714 	      *vr0min = vr1min;
7715 	      *vr0max = vr1max;
7716 	    }
7717 	}
7718       else if (*vr0type == VR_ANTI_RANGE
7719 	       && vr1type == VR_ANTI_RANGE)
7720 	{
7721 	  /* If both are anti-ranges the result is the outer one.  */
7722 	  *vr0type = vr1type;
7723 	  *vr0min = vr1min;
7724 	  *vr0max = vr1max;
7725 	}
7726       else if (vr1type == VR_ANTI_RANGE
7727 	       && *vr0type == VR_RANGE)
7728 	{
7729 	  /* The intersection is empty.  */
7730 	  *vr0type = VR_UNDEFINED;
7731 	  *vr0min = NULL_TREE;
7732 	  *vr0max = NULL_TREE;
7733 	}
7734       else
7735 	gcc_unreachable ();
7736     }
7737   else if ((operand_less_p (vr1min, *vr0max) == 1
7738 	    || operand_equal_p (vr1min, *vr0max, 0))
7739 	   && operand_less_p (*vr0min, vr1min) == 1)
7740     {
7741       /* [  (  ]  ) or [  ](  ) */
7742       if (*vr0type == VR_ANTI_RANGE
7743 	  && vr1type == VR_ANTI_RANGE)
7744 	*vr0max = vr1max;
7745       else if (*vr0type == VR_RANGE
7746 	       && vr1type == VR_RANGE)
7747 	*vr0min = vr1min;
7748       else if (*vr0type == VR_RANGE
7749 	       && vr1type == VR_ANTI_RANGE)
7750 	{
7751 	  if (TREE_CODE (vr1min) == INTEGER_CST)
7752 	    *vr0max = int_const_binop (MINUS_EXPR, vr1min,
7753 				       integer_one_node);
7754 	  else
7755 	    *vr0max = vr1min;
7756 	}
7757       else if (*vr0type == VR_ANTI_RANGE
7758 	       && vr1type == VR_RANGE)
7759 	{
7760 	  *vr0type = VR_RANGE;
7761 	  if (TREE_CODE (*vr0max) == INTEGER_CST)
7762 	    *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
7763 				       integer_one_node);
7764 	  else
7765 	    *vr0min = *vr0max;
7766 	  *vr0max = vr1max;
7767 	}
7768       else
7769 	gcc_unreachable ();
7770     }
7771   else if ((operand_less_p (*vr0min, vr1max) == 1
7772 	    || operand_equal_p (*vr0min, vr1max, 0))
7773 	   && operand_less_p (vr1min, *vr0min) == 1)
7774     {
7775       /* (  [  )  ] or (  )[  ] */
7776       if (*vr0type == VR_ANTI_RANGE
7777 	  && vr1type == VR_ANTI_RANGE)
7778 	*vr0min = vr1min;
7779       else if (*vr0type == VR_RANGE
7780 	       && vr1type == VR_RANGE)
7781 	*vr0max = vr1max;
7782       else if (*vr0type == VR_RANGE
7783 	       && vr1type == VR_ANTI_RANGE)
7784 	{
7785 	  if (TREE_CODE (vr1max) == INTEGER_CST)
7786 	    *vr0min = int_const_binop (PLUS_EXPR, vr1max,
7787 				       integer_one_node);
7788 	  else
7789 	    *vr0min = vr1max;
7790 	}
7791       else if (*vr0type == VR_ANTI_RANGE
7792 	       && vr1type == VR_RANGE)
7793 	{
7794 	  *vr0type = VR_RANGE;
7795 	  if (TREE_CODE (*vr0min) == INTEGER_CST)
7796 	    *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
7797 				       integer_one_node);
7798 	  else
7799 	    *vr0max = *vr0min;
7800 	  *vr0min = vr1min;
7801 	}
7802       else
7803 	gcc_unreachable ();
7804     }
7805 
7806   /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
7807      result for the intersection.  That's always a conservative
7808      correct estimate.  */
7809 
7810   return;
7811 }
7812 
7813 
7814 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
7815    in *VR0.  This may not be the smallest possible such range.  */
7816 
7817 static void
7818 vrp_intersect_ranges_1 (value_range_t *vr0, value_range_t *vr1)
7819 {
7820   value_range_t saved;
7821 
7822   /* If either range is VR_VARYING the other one wins.  */
7823   if (vr1->type == VR_VARYING)
7824     return;
7825   if (vr0->type == VR_VARYING)
7826     {
7827       copy_value_range (vr0, vr1);
7828       return;
7829     }
7830 
7831   /* When either range is VR_UNDEFINED the resulting range is
7832      VR_UNDEFINED, too.  */
7833   if (vr0->type == VR_UNDEFINED)
7834     return;
7835   if (vr1->type == VR_UNDEFINED)
7836     {
7837       set_value_range_to_undefined (vr0);
7838       return;
7839     }
7840 
7841   /* Save the original vr0 so we can return it as conservative intersection
7842      result when our worker turns things to varying.  */
7843   saved = *vr0;
7844   intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
7845 		    vr1->type, vr1->min, vr1->max);
7846   /* Make sure to canonicalize the result though as the inversion of a
7847      VR_RANGE can still be a VR_RANGE.  */
7848   set_and_canonicalize_value_range (vr0, vr0->type,
7849 				    vr0->min, vr0->max, vr0->equiv);
7850   /* If that failed, use the saved original VR0.  */
7851   if (vr0->type == VR_VARYING)
7852     {
7853       *vr0 = saved;
7854       return;
7855     }
7856   /* If the result is VR_UNDEFINED there is no need to mess with
7857      the equivalencies.  */
7858   if (vr0->type == VR_UNDEFINED)
7859     return;
7860 
7861   /* The resulting set of equivalences for range intersection is the union of
7862      the two sets.  */
7863   if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
7864     bitmap_ior_into (vr0->equiv, vr1->equiv);
7865   else if (vr1->equiv && !vr0->equiv)
7866     bitmap_copy (vr0->equiv, vr1->equiv);
7867 }
7868 
7869 static void
7870 vrp_intersect_ranges (value_range_t *vr0, value_range_t *vr1)
7871 {
7872   if (dump_file && (dump_flags & TDF_DETAILS))
7873     {
7874       fprintf (dump_file, "Intersecting\n  ");
7875       dump_value_range (dump_file, vr0);
7876       fprintf (dump_file, "\nand\n  ");
7877       dump_value_range (dump_file, vr1);
7878       fprintf (dump_file, "\n");
7879     }
7880   vrp_intersect_ranges_1 (vr0, vr1);
7881   if (dump_file && (dump_flags & TDF_DETAILS))
7882     {
7883       fprintf (dump_file, "to\n  ");
7884       dump_value_range (dump_file, vr0);
7885       fprintf (dump_file, "\n");
7886     }
7887 }
7888 
7889 /* Meet operation for value ranges.  Given two value ranges VR0 and
7890    VR1, store in VR0 a range that contains both VR0 and VR1.  This
7891    may not be the smallest possible such range.  */
7892 
7893 static void
7894 vrp_meet_1 (value_range_t *vr0, value_range_t *vr1)
7895 {
7896   value_range_t saved;
7897 
7898   if (vr0->type == VR_UNDEFINED)
7899     {
7900       set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
7901       return;
7902     }
7903 
7904   if (vr1->type == VR_UNDEFINED)
7905     {
7906       /* VR0 already has the resulting range.  */
7907       return;
7908     }
7909 
7910   if (vr0->type == VR_VARYING)
7911     {
7912       /* Nothing to do.  VR0 already has the resulting range.  */
7913       return;
7914     }
7915 
7916   if (vr1->type == VR_VARYING)
7917     {
7918       set_value_range_to_varying (vr0);
7919       return;
7920     }
7921 
7922   saved = *vr0;
7923   union_ranges (&vr0->type, &vr0->min, &vr0->max,
7924 		vr1->type, vr1->min, vr1->max);
7925   if (vr0->type == VR_VARYING)
7926     {
7927       /* Failed to find an efficient meet.  Before giving up and setting
7928 	 the result to VARYING, see if we can at least derive a useful
7929 	 anti-range.  FIXME, all this nonsense about distinguishing
7930 	 anti-ranges from ranges is necessary because of the odd
7931 	 semantics of range_includes_zero_p and friends.  */
7932       if (((saved.type == VR_RANGE
7933 	    && range_includes_zero_p (saved.min, saved.max) == 0)
7934 	   || (saved.type == VR_ANTI_RANGE
7935 	       && range_includes_zero_p (saved.min, saved.max) == 1))
7936 	  && ((vr1->type == VR_RANGE
7937 	       && range_includes_zero_p (vr1->min, vr1->max) == 0)
7938 	      || (vr1->type == VR_ANTI_RANGE
7939 		  && range_includes_zero_p (vr1->min, vr1->max) == 1)))
7940 	{
7941 	  set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
7942 
7943 	  /* Since this meet operation did not result from the meeting of
7944 	     two equivalent names, VR0 cannot have any equivalences.  */
7945 	  if (vr0->equiv)
7946 	    bitmap_clear (vr0->equiv);
7947 	  return;
7948 	}
7949 
7950       set_value_range_to_varying (vr0);
7951       return;
7952     }
7953   set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
7954 				    vr0->equiv);
7955   if (vr0->type == VR_VARYING)
7956     return;
7957 
7958   /* The resulting set of equivalences is always the intersection of
7959      the two sets.  */
7960   if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
7961     bitmap_and_into (vr0->equiv, vr1->equiv);
7962   else if (vr0->equiv && !vr1->equiv)
7963     bitmap_clear (vr0->equiv);
7964 }
7965 
7966 static void
7967 vrp_meet (value_range_t *vr0, value_range_t *vr1)
7968 {
7969   if (dump_file && (dump_flags & TDF_DETAILS))
7970     {
7971       fprintf (dump_file, "Meeting\n  ");
7972       dump_value_range (dump_file, vr0);
7973       fprintf (dump_file, "\nand\n  ");
7974       dump_value_range (dump_file, vr1);
7975       fprintf (dump_file, "\n");
7976     }
7977   vrp_meet_1 (vr0, vr1);
7978   if (dump_file && (dump_flags & TDF_DETAILS))
7979     {
7980       fprintf (dump_file, "to\n  ");
7981       dump_value_range (dump_file, vr0);
7982       fprintf (dump_file, "\n");
7983     }
7984 }
7985 
7986 
7987 /* Visit all arguments for PHI node PHI that flow through executable
7988    edges.  If a valid value range can be derived from all the incoming
7989    value ranges, set a new range for the LHS of PHI.  */
7990 
7991 static enum ssa_prop_result
7992 vrp_visit_phi_node (gimple phi)
7993 {
7994   size_t i;
7995   tree lhs = PHI_RESULT (phi);
7996   value_range_t *lhs_vr = get_value_range (lhs);
7997   value_range_t vr_result = VR_INITIALIZER;
7998   bool first = true;
7999   int edges, old_edges;
8000   struct loop *l;
8001 
8002   if (dump_file && (dump_flags & TDF_DETAILS))
8003     {
8004       fprintf (dump_file, "\nVisiting PHI node: ");
8005       print_gimple_stmt (dump_file, phi, 0, dump_flags);
8006     }
8007 
8008   edges = 0;
8009   for (i = 0; i < gimple_phi_num_args (phi); i++)
8010     {
8011       edge e = gimple_phi_arg_edge (phi, i);
8012 
8013       if (dump_file && (dump_flags & TDF_DETAILS))
8014 	{
8015 	  fprintf (dump_file,
8016 	      "\n    Argument #%d (%d -> %d %sexecutable)\n",
8017 	      (int) i, e->src->index, e->dest->index,
8018 	      (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
8019 	}
8020 
8021       if (e->flags & EDGE_EXECUTABLE)
8022 	{
8023 	  tree arg = PHI_ARG_DEF (phi, i);
8024 	  value_range_t vr_arg;
8025 
8026 	  ++edges;
8027 
8028 	  if (TREE_CODE (arg) == SSA_NAME)
8029 	    {
8030 	      vr_arg = *(get_value_range (arg));
8031 	      /* Do not allow equivalences or symbolic ranges to leak in from
8032 		 backedges.  That creates invalid equivalencies.
8033 		 See PR53465 and PR54767.  */
8034 	      if (e->flags & EDGE_DFS_BACK
8035 		  && (vr_arg.type == VR_RANGE
8036 		      || vr_arg.type == VR_ANTI_RANGE))
8037 		{
8038 		  vr_arg.equiv = NULL;
8039 		  if (symbolic_range_p (&vr_arg))
8040 		    {
8041 		      vr_arg.type = VR_VARYING;
8042 		      vr_arg.min = NULL_TREE;
8043 		      vr_arg.max = NULL_TREE;
8044 		    }
8045 		}
8046 	    }
8047 	  else
8048 	    {
8049 	      if (is_overflow_infinity (arg))
8050 		{
8051 		  arg = copy_node (arg);
8052 		  TREE_OVERFLOW (arg) = 0;
8053 		}
8054 
8055 	      vr_arg.type = VR_RANGE;
8056 	      vr_arg.min = arg;
8057 	      vr_arg.max = arg;
8058 	      vr_arg.equiv = NULL;
8059 	    }
8060 
8061 	  if (dump_file && (dump_flags & TDF_DETAILS))
8062 	    {
8063 	      fprintf (dump_file, "\t");
8064 	      print_generic_expr (dump_file, arg, dump_flags);
8065 	      fprintf (dump_file, "\n\tValue: ");
8066 	      dump_value_range (dump_file, &vr_arg);
8067 	      fprintf (dump_file, "\n");
8068 	    }
8069 
8070 	  if (first)
8071 	    copy_value_range (&vr_result, &vr_arg);
8072 	  else
8073 	    vrp_meet (&vr_result, &vr_arg);
8074 	  first = false;
8075 
8076 	  if (vr_result.type == VR_VARYING)
8077 	    break;
8078 	}
8079     }
8080 
8081   if (vr_result.type == VR_VARYING)
8082     goto varying;
8083   else if (vr_result.type == VR_UNDEFINED)
8084     goto update_range;
8085 
8086   old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
8087   vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
8088 
8089   /* To prevent infinite iterations in the algorithm, derive ranges
8090      when the new value is slightly bigger or smaller than the
8091      previous one.  We don't do this if we have seen a new executable
8092      edge; this helps us avoid an overflow infinity for conditionals
8093      which are not in a loop.  If the old value-range was VR_UNDEFINED
8094      use the updated range and iterate one more time.  */
8095   if (edges > 0
8096       && gimple_phi_num_args (phi) > 1
8097       && edges == old_edges
8098       && lhs_vr->type != VR_UNDEFINED)
8099     {
8100       int cmp_min = compare_values (lhs_vr->min, vr_result.min);
8101       int cmp_max = compare_values (lhs_vr->max, vr_result.max);
8102 
8103       /* For non VR_RANGE or for pointers fall back to varying if
8104 	 the range changed.  */
8105       if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE
8106 	   || POINTER_TYPE_P (TREE_TYPE (lhs)))
8107 	  && (cmp_min != 0 || cmp_max != 0))
8108 	goto varying;
8109 
8110       /* If the new minimum is smaller or larger than the previous
8111 	 one, go all the way to -INF.  In the first case, to avoid
8112 	 iterating millions of times to reach -INF, and in the
8113 	 other case to avoid infinite bouncing between different
8114 	 minimums.  */
8115       if (cmp_min > 0 || cmp_min < 0)
8116 	{
8117 	  if (!needs_overflow_infinity (TREE_TYPE (vr_result.min))
8118 	      || !vrp_var_may_overflow (lhs, phi))
8119 	    vr_result.min = TYPE_MIN_VALUE (TREE_TYPE (vr_result.min));
8120 	  else if (supports_overflow_infinity (TREE_TYPE (vr_result.min)))
8121 	    vr_result.min =
8122 		negative_overflow_infinity (TREE_TYPE (vr_result.min));
8123 	}
8124 
8125       /* Similarly, if the new maximum is smaller or larger than
8126 	 the previous one, go all the way to +INF.  */
8127       if (cmp_max < 0 || cmp_max > 0)
8128 	{
8129 	  if (!needs_overflow_infinity (TREE_TYPE (vr_result.max))
8130 	      || !vrp_var_may_overflow (lhs, phi))
8131 	    vr_result.max = TYPE_MAX_VALUE (TREE_TYPE (vr_result.max));
8132 	  else if (supports_overflow_infinity (TREE_TYPE (vr_result.max)))
8133 	    vr_result.max =
8134 		positive_overflow_infinity (TREE_TYPE (vr_result.max));
8135 	}
8136 
8137       /* If we dropped either bound to +-INF then if this is a loop
8138 	 PHI node SCEV may known more about its value-range.  */
8139       if ((cmp_min > 0 || cmp_min < 0
8140 	   || cmp_max < 0 || cmp_max > 0)
8141 	  && current_loops
8142 	  && (l = loop_containing_stmt (phi))
8143 	  && l->header == gimple_bb (phi))
8144 	adjust_range_with_scev (&vr_result, l, phi, lhs);
8145 
8146       /* If we will end up with a (-INF, +INF) range, set it to
8147 	 VARYING.  Same if the previous max value was invalid for
8148 	 the type and we end up with vr_result.min > vr_result.max.  */
8149       if ((vrp_val_is_max (vr_result.max)
8150 	   && vrp_val_is_min (vr_result.min))
8151 	  || compare_values (vr_result.min,
8152 			     vr_result.max) > 0)
8153 	goto varying;
8154     }
8155 
8156   /* If the new range is different than the previous value, keep
8157      iterating.  */
8158 update_range:
8159   if (update_value_range (lhs, &vr_result))
8160     {
8161       if (dump_file && (dump_flags & TDF_DETAILS))
8162 	{
8163 	  fprintf (dump_file, "Found new range for ");
8164 	  print_generic_expr (dump_file, lhs, 0);
8165 	  fprintf (dump_file, ": ");
8166 	  dump_value_range (dump_file, &vr_result);
8167 	  fprintf (dump_file, "\n\n");
8168 	}
8169 
8170       return SSA_PROP_INTERESTING;
8171     }
8172 
8173   /* Nothing changed, don't add outgoing edges.  */
8174   return SSA_PROP_NOT_INTERESTING;
8175 
8176   /* No match found.  Set the LHS to VARYING.  */
8177 varying:
8178   set_value_range_to_varying (lhs_vr);
8179   return SSA_PROP_VARYING;
8180 }
8181 
8182 /* Simplify boolean operations if the source is known
8183    to be already a boolean.  */
8184 static bool
8185 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
8186 {
8187   enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
8188   tree lhs, op0, op1;
8189   bool need_conversion;
8190 
8191   /* We handle only !=/== case here.  */
8192   gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
8193 
8194   op0 = gimple_assign_rhs1 (stmt);
8195   if (!op_with_boolean_value_range_p (op0))
8196     return false;
8197 
8198   op1 = gimple_assign_rhs2 (stmt);
8199   if (!op_with_boolean_value_range_p (op1))
8200     return false;
8201 
8202   /* Reduce number of cases to handle to NE_EXPR.  As there is no
8203      BIT_XNOR_EXPR we cannot replace A == B with a single statement.  */
8204   if (rhs_code == EQ_EXPR)
8205     {
8206       if (TREE_CODE (op1) == INTEGER_CST)
8207 	op1 = int_const_binop (BIT_XOR_EXPR, op1, integer_one_node);
8208       else
8209 	return false;
8210     }
8211 
8212   lhs = gimple_assign_lhs (stmt);
8213   need_conversion
8214     = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
8215 
8216   /* Make sure to not sign-extend a 1-bit 1 when converting the result.  */
8217   if (need_conversion
8218       && !TYPE_UNSIGNED (TREE_TYPE (op0))
8219       && TYPE_PRECISION (TREE_TYPE (op0)) == 1
8220       && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
8221     return false;
8222 
8223   /* For A != 0 we can substitute A itself.  */
8224   if (integer_zerop (op1))
8225     gimple_assign_set_rhs_with_ops (gsi,
8226 				    need_conversion
8227 				    ? NOP_EXPR : TREE_CODE (op0),
8228 				    op0, NULL_TREE);
8229   /* For A != B we substitute A ^ B.  Either with conversion.  */
8230   else if (need_conversion)
8231     {
8232       tree tem = make_ssa_name (TREE_TYPE (op0), NULL);
8233       gimple newop = gimple_build_assign_with_ops (BIT_XOR_EXPR, tem, op0, op1);
8234       gsi_insert_before (gsi, newop, GSI_SAME_STMT);
8235       gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem, NULL_TREE);
8236     }
8237   /* Or without.  */
8238   else
8239     gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
8240   update_stmt (gsi_stmt (*gsi));
8241 
8242   return true;
8243 }
8244 
8245 /* Simplify a division or modulo operator to a right shift or
8246    bitwise and if the first operand is unsigned or is greater
8247    than zero and the second operand is an exact power of two.  */
8248 
8249 static bool
8250 simplify_div_or_mod_using_ranges (gimple stmt)
8251 {
8252   enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
8253   tree val = NULL;
8254   tree op0 = gimple_assign_rhs1 (stmt);
8255   tree op1 = gimple_assign_rhs2 (stmt);
8256   value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt));
8257 
8258   if (TYPE_UNSIGNED (TREE_TYPE (op0)))
8259     {
8260       val = integer_one_node;
8261     }
8262   else
8263     {
8264       bool sop = false;
8265 
8266       val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
8267 
8268       if (val
8269 	  && sop
8270 	  && integer_onep (val)
8271 	  && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
8272 	{
8273 	  location_t location;
8274 
8275 	  if (!gimple_has_location (stmt))
8276 	    location = input_location;
8277 	  else
8278 	    location = gimple_location (stmt);
8279 	  warning_at (location, OPT_Wstrict_overflow,
8280 		      "assuming signed overflow does not occur when "
8281 		      "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
8282 	}
8283     }
8284 
8285   if (val && integer_onep (val))
8286     {
8287       tree t;
8288 
8289       if (rhs_code == TRUNC_DIV_EXPR)
8290 	{
8291 	  t = build_int_cst (integer_type_node, tree_log2 (op1));
8292 	  gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
8293 	  gimple_assign_set_rhs1 (stmt, op0);
8294 	  gimple_assign_set_rhs2 (stmt, t);
8295 	}
8296       else
8297 	{
8298 	  t = build_int_cst (TREE_TYPE (op1), 1);
8299 	  t = int_const_binop (MINUS_EXPR, op1, t);
8300 	  t = fold_convert (TREE_TYPE (op0), t);
8301 
8302 	  gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
8303 	  gimple_assign_set_rhs1 (stmt, op0);
8304 	  gimple_assign_set_rhs2 (stmt, t);
8305 	}
8306 
8307       update_stmt (stmt);
8308       return true;
8309     }
8310 
8311   return false;
8312 }
8313 
8314 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
8315    ABS_EXPR.  If the operand is <= 0, then simplify the
8316    ABS_EXPR into a NEGATE_EXPR.  */
8317 
8318 static bool
8319 simplify_abs_using_ranges (gimple stmt)
8320 {
8321   tree val = NULL;
8322   tree op = gimple_assign_rhs1 (stmt);
8323   tree type = TREE_TYPE (op);
8324   value_range_t *vr = get_value_range (op);
8325 
8326   if (TYPE_UNSIGNED (type))
8327     {
8328       val = integer_zero_node;
8329     }
8330   else if (vr)
8331     {
8332       bool sop = false;
8333 
8334       val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
8335       if (!val)
8336 	{
8337 	  sop = false;
8338 	  val = compare_range_with_value (GE_EXPR, vr, integer_zero_node,
8339 					  &sop);
8340 
8341 	  if (val)
8342 	    {
8343 	      if (integer_zerop (val))
8344 		val = integer_one_node;
8345 	      else if (integer_onep (val))
8346 		val = integer_zero_node;
8347 	    }
8348 	}
8349 
8350       if (val
8351 	  && (integer_onep (val) || integer_zerop (val)))
8352 	{
8353 	  if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
8354 	    {
8355 	      location_t location;
8356 
8357 	      if (!gimple_has_location (stmt))
8358 		location = input_location;
8359 	      else
8360 		location = gimple_location (stmt);
8361 	      warning_at (location, OPT_Wstrict_overflow,
8362 			  "assuming signed overflow does not occur when "
8363 			  "simplifying %<abs (X)%> to %<X%> or %<-X%>");
8364 	    }
8365 
8366 	  gimple_assign_set_rhs1 (stmt, op);
8367 	  if (integer_onep (val))
8368 	    gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
8369 	  else
8370 	    gimple_assign_set_rhs_code (stmt, SSA_NAME);
8371 	  update_stmt (stmt);
8372 	  return true;
8373 	}
8374     }
8375 
8376   return false;
8377 }
8378 
8379 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
8380    If all the bits that are being cleared by & are already
8381    known to be zero from VR, or all the bits that are being
8382    set by | are already known to be one from VR, the bit
8383    operation is redundant.  */
8384 
8385 static bool
8386 simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
8387 {
8388   tree op0 = gimple_assign_rhs1 (stmt);
8389   tree op1 = gimple_assign_rhs2 (stmt);
8390   tree op = NULL_TREE;
8391   value_range_t vr0 = VR_INITIALIZER;
8392   value_range_t vr1 = VR_INITIALIZER;
8393   double_int may_be_nonzero0, may_be_nonzero1;
8394   double_int must_be_nonzero0, must_be_nonzero1;
8395   double_int mask;
8396 
8397   if (TREE_CODE (op0) == SSA_NAME)
8398     vr0 = *(get_value_range (op0));
8399   else if (is_gimple_min_invariant (op0))
8400     set_value_range_to_value (&vr0, op0, NULL);
8401   else
8402     return false;
8403 
8404   if (TREE_CODE (op1) == SSA_NAME)
8405     vr1 = *(get_value_range (op1));
8406   else if (is_gimple_min_invariant (op1))
8407     set_value_range_to_value (&vr1, op1, NULL);
8408   else
8409     return false;
8410 
8411   if (!zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, &must_be_nonzero0))
8412     return false;
8413   if (!zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, &must_be_nonzero1))
8414     return false;
8415 
8416   switch (gimple_assign_rhs_code (stmt))
8417     {
8418     case BIT_AND_EXPR:
8419       mask = may_be_nonzero0.and_not (must_be_nonzero1);
8420       if (mask.is_zero ())
8421 	{
8422 	  op = op0;
8423 	  break;
8424 	}
8425       mask = may_be_nonzero1.and_not (must_be_nonzero0);
8426       if (mask.is_zero ())
8427 	{
8428 	  op = op1;
8429 	  break;
8430 	}
8431       break;
8432     case BIT_IOR_EXPR:
8433       mask = may_be_nonzero0.and_not (must_be_nonzero1);
8434       if (mask.is_zero ())
8435 	{
8436 	  op = op1;
8437 	  break;
8438 	}
8439       mask = may_be_nonzero1.and_not (must_be_nonzero0);
8440       if (mask.is_zero ())
8441 	{
8442 	  op = op0;
8443 	  break;
8444 	}
8445       break;
8446     default:
8447       gcc_unreachable ();
8448     }
8449 
8450   if (op == NULL_TREE)
8451     return false;
8452 
8453   gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op, NULL);
8454   update_stmt (gsi_stmt (*gsi));
8455   return true;
8456 }
8457 
8458 /* We are comparing trees OP0 and OP1 using COND_CODE.  OP0 has
8459    a known value range VR.
8460 
8461    If there is one and only one value which will satisfy the
8462    conditional, then return that value.  Else return NULL.  */
8463 
8464 static tree
8465 test_for_singularity (enum tree_code cond_code, tree op0,
8466 		      tree op1, value_range_t *vr)
8467 {
8468   tree min = NULL;
8469   tree max = NULL;
8470 
8471   /* Extract minimum/maximum values which satisfy the
8472      the conditional as it was written.  */
8473   if (cond_code == LE_EXPR || cond_code == LT_EXPR)
8474     {
8475       /* This should not be negative infinity; there is no overflow
8476 	 here.  */
8477       min = TYPE_MIN_VALUE (TREE_TYPE (op0));
8478 
8479       max = op1;
8480       if (cond_code == LT_EXPR && !is_overflow_infinity (max))
8481 	{
8482 	  tree one = build_int_cst (TREE_TYPE (op0), 1);
8483 	  max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
8484 	  if (EXPR_P (max))
8485 	    TREE_NO_WARNING (max) = 1;
8486 	}
8487     }
8488   else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
8489     {
8490       /* This should not be positive infinity; there is no overflow
8491 	 here.  */
8492       max = TYPE_MAX_VALUE (TREE_TYPE (op0));
8493 
8494       min = op1;
8495       if (cond_code == GT_EXPR && !is_overflow_infinity (min))
8496 	{
8497 	  tree one = build_int_cst (TREE_TYPE (op0), 1);
8498 	  min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
8499 	  if (EXPR_P (min))
8500 	    TREE_NO_WARNING (min) = 1;
8501 	}
8502     }
8503 
8504   /* Now refine the minimum and maximum values using any
8505      value range information we have for op0.  */
8506   if (min && max)
8507     {
8508       if (compare_values (vr->min, min) == 1)
8509 	min = vr->min;
8510       if (compare_values (vr->max, max) == -1)
8511 	max = vr->max;
8512 
8513       /* If the new min/max values have converged to a single value,
8514 	 then there is only one value which can satisfy the condition,
8515 	 return that value.  */
8516       if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
8517 	return min;
8518     }
8519   return NULL;
8520 }
8521 
8522 /* Simplify a conditional using a relational operator to an equality
8523    test if the range information indicates only one value can satisfy
8524    the original conditional.  */
8525 
8526 static bool
8527 simplify_cond_using_ranges (gimple stmt)
8528 {
8529   tree op0 = gimple_cond_lhs (stmt);
8530   tree op1 = gimple_cond_rhs (stmt);
8531   enum tree_code cond_code = gimple_cond_code (stmt);
8532 
8533   if (cond_code != NE_EXPR
8534       && cond_code != EQ_EXPR
8535       && TREE_CODE (op0) == SSA_NAME
8536       && INTEGRAL_TYPE_P (TREE_TYPE (op0))
8537       && is_gimple_min_invariant (op1))
8538     {
8539       value_range_t *vr = get_value_range (op0);
8540 
8541       /* If we have range information for OP0, then we might be
8542 	 able to simplify this conditional. */
8543       if (vr->type == VR_RANGE)
8544 	{
8545 	  tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
8546 
8547 	  if (new_tree)
8548 	    {
8549 	      if (dump_file)
8550 		{
8551 		  fprintf (dump_file, "Simplified relational ");
8552 		  print_gimple_stmt (dump_file, stmt, 0, 0);
8553 		  fprintf (dump_file, " into ");
8554 		}
8555 
8556 	      gimple_cond_set_code (stmt, EQ_EXPR);
8557 	      gimple_cond_set_lhs (stmt, op0);
8558 	      gimple_cond_set_rhs (stmt, new_tree);
8559 
8560 	      update_stmt (stmt);
8561 
8562 	      if (dump_file)
8563 		{
8564 		  print_gimple_stmt (dump_file, stmt, 0, 0);
8565 		  fprintf (dump_file, "\n");
8566 		}
8567 
8568 	      return true;
8569 	    }
8570 
8571 	  /* Try again after inverting the condition.  We only deal
8572 	     with integral types here, so no need to worry about
8573 	     issues with inverting FP comparisons.  */
8574 	  cond_code = invert_tree_comparison (cond_code, false);
8575 	  new_tree = test_for_singularity (cond_code, op0, op1, vr);
8576 
8577 	  if (new_tree)
8578 	    {
8579 	      if (dump_file)
8580 		{
8581 		  fprintf (dump_file, "Simplified relational ");
8582 		  print_gimple_stmt (dump_file, stmt, 0, 0);
8583 		  fprintf (dump_file, " into ");
8584 		}
8585 
8586 	      gimple_cond_set_code (stmt, NE_EXPR);
8587 	      gimple_cond_set_lhs (stmt, op0);
8588 	      gimple_cond_set_rhs (stmt, new_tree);
8589 
8590 	      update_stmt (stmt);
8591 
8592 	      if (dump_file)
8593 		{
8594 		  print_gimple_stmt (dump_file, stmt, 0, 0);
8595 		  fprintf (dump_file, "\n");
8596 		}
8597 
8598 	      return true;
8599 	    }
8600 	}
8601     }
8602 
8603   return false;
8604 }
8605 
8606 /* Simplify a switch statement using the value range of the switch
8607    argument.  */
8608 
8609 static bool
8610 simplify_switch_using_ranges (gimple stmt)
8611 {
8612   tree op = gimple_switch_index (stmt);
8613   value_range_t *vr;
8614   bool take_default;
8615   edge e;
8616   edge_iterator ei;
8617   size_t i = 0, j = 0, n, n2;
8618   tree vec2;
8619   switch_update su;
8620   size_t k = 1, l = 0;
8621 
8622   if (TREE_CODE (op) == SSA_NAME)
8623     {
8624       vr = get_value_range (op);
8625 
8626       /* We can only handle integer ranges.  */
8627       if ((vr->type != VR_RANGE
8628 	   && vr->type != VR_ANTI_RANGE)
8629 	  || symbolic_range_p (vr))
8630 	return false;
8631 
8632       /* Find case label for min/max of the value range.  */
8633       take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
8634     }
8635   else if (TREE_CODE (op) == INTEGER_CST)
8636     {
8637       take_default = !find_case_label_index (stmt, 1, op, &i);
8638       if (take_default)
8639 	{
8640 	  i = 1;
8641 	  j = 0;
8642 	}
8643       else
8644 	{
8645 	  j = i;
8646 	}
8647     }
8648   else
8649     return false;
8650 
8651   n = gimple_switch_num_labels (stmt);
8652 
8653   /* Bail out if this is just all edges taken.  */
8654   if (i == 1
8655       && j == n - 1
8656       && take_default)
8657     return false;
8658 
8659   /* Build a new vector of taken case labels.  */
8660   vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default);
8661   n2 = 0;
8662 
8663   /* Add the default edge, if necessary.  */
8664   if (take_default)
8665     TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
8666 
8667   for (; i <= j; ++i, ++n2)
8668     TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
8669 
8670   for (; k <= l; ++k, ++n2)
8671     TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k);
8672 
8673   /* Mark needed edges.  */
8674   for (i = 0; i < n2; ++i)
8675     {
8676       e = find_edge (gimple_bb (stmt),
8677 		     label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
8678       e->aux = (void *)-1;
8679     }
8680 
8681   /* Queue not needed edges for later removal.  */
8682   FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
8683     {
8684       if (e->aux == (void *)-1)
8685 	{
8686 	  e->aux = NULL;
8687 	  continue;
8688 	}
8689 
8690       if (dump_file && (dump_flags & TDF_DETAILS))
8691 	{
8692 	  fprintf (dump_file, "removing unreachable case label\n");
8693 	}
8694       to_remove_edges.safe_push (e);
8695       e->flags &= ~EDGE_EXECUTABLE;
8696     }
8697 
8698   /* And queue an update for the stmt.  */
8699   su.stmt = stmt;
8700   su.vec = vec2;
8701   to_update_switch_stmts.safe_push (su);
8702   return false;
8703 }
8704 
8705 /* Simplify an integral conversion from an SSA name in STMT.  */
8706 
8707 static bool
8708 simplify_conversion_using_ranges (gimple stmt)
8709 {
8710   tree innerop, middleop, finaltype;
8711   gimple def_stmt;
8712   value_range_t *innervr;
8713   bool inner_unsigned_p, middle_unsigned_p, final_unsigned_p;
8714   unsigned inner_prec, middle_prec, final_prec;
8715   double_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
8716 
8717   finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
8718   if (!INTEGRAL_TYPE_P (finaltype))
8719     return false;
8720   middleop = gimple_assign_rhs1 (stmt);
8721   def_stmt = SSA_NAME_DEF_STMT (middleop);
8722   if (!is_gimple_assign (def_stmt)
8723       || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
8724     return false;
8725   innerop = gimple_assign_rhs1 (def_stmt);
8726   if (TREE_CODE (innerop) != SSA_NAME)
8727     return false;
8728 
8729   /* Get the value-range of the inner operand.  */
8730   innervr = get_value_range (innerop);
8731   if (innervr->type != VR_RANGE
8732       || TREE_CODE (innervr->min) != INTEGER_CST
8733       || TREE_CODE (innervr->max) != INTEGER_CST)
8734     return false;
8735 
8736   /* Simulate the conversion chain to check if the result is equal if
8737      the middle conversion is removed.  */
8738   innermin = tree_to_double_int (innervr->min);
8739   innermax = tree_to_double_int (innervr->max);
8740 
8741   inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
8742   middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
8743   final_prec = TYPE_PRECISION (finaltype);
8744 
8745   /* If the first conversion is not injective, the second must not
8746      be widening.  */
8747   if ((innermax - innermin).ugt (double_int::mask (middle_prec))
8748       && middle_prec < final_prec)
8749     return false;
8750   /* We also want a medium value so that we can track the effect that
8751      narrowing conversions with sign change have.  */
8752   inner_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (innerop));
8753   if (inner_unsigned_p)
8754     innermed = double_int::mask (inner_prec).lrshift (1, inner_prec);
8755   else
8756     innermed = double_int_zero;
8757   if (innermin.cmp (innermed, inner_unsigned_p) >= 0
8758       || innermed.cmp (innermax, inner_unsigned_p) >= 0)
8759     innermed = innermin;
8760 
8761   middle_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (middleop));
8762   middlemin = innermin.ext (middle_prec, middle_unsigned_p);
8763   middlemed = innermed.ext (middle_prec, middle_unsigned_p);
8764   middlemax = innermax.ext (middle_prec, middle_unsigned_p);
8765 
8766   /* Require that the final conversion applied to both the original
8767      and the intermediate range produces the same result.  */
8768   final_unsigned_p = TYPE_UNSIGNED (finaltype);
8769   if (middlemin.ext (final_prec, final_unsigned_p)
8770 	 != innermin.ext (final_prec, final_unsigned_p)
8771       || middlemed.ext (final_prec, final_unsigned_p)
8772 	 != innermed.ext (final_prec, final_unsigned_p)
8773       || middlemax.ext (final_prec, final_unsigned_p)
8774 	 != innermax.ext (final_prec, final_unsigned_p))
8775     return false;
8776 
8777   gimple_assign_set_rhs1 (stmt, innerop);
8778   update_stmt (stmt);
8779   return true;
8780 }
8781 
8782 /* Return whether the value range *VR fits in an integer type specified
8783    by PRECISION and UNSIGNED_P.  */
8784 
8785 static bool
8786 range_fits_type_p (value_range_t *vr, unsigned precision, bool unsigned_p)
8787 {
8788   tree src_type;
8789   unsigned src_precision;
8790   double_int tem;
8791 
8792   /* We can only handle integral and pointer types.  */
8793   src_type = TREE_TYPE (vr->min);
8794   if (!INTEGRAL_TYPE_P (src_type)
8795       && !POINTER_TYPE_P (src_type))
8796     return false;
8797 
8798   /* An extension is fine unless VR is signed and unsigned_p,
8799      and so is an identity transform.  */
8800   src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
8801   if ((src_precision < precision
8802        && !(unsigned_p && !TYPE_UNSIGNED (src_type)))
8803       || (src_precision == precision
8804 	  && TYPE_UNSIGNED (src_type) == unsigned_p))
8805     return true;
8806 
8807   /* Now we can only handle ranges with constant bounds.  */
8808   if (vr->type != VR_RANGE
8809       || TREE_CODE (vr->min) != INTEGER_CST
8810       || TREE_CODE (vr->max) != INTEGER_CST)
8811     return false;
8812 
8813   /* For sign changes, the MSB of the double_int has to be clear.
8814      An unsigned value with its MSB set cannot be represented by
8815      a signed double_int, while a negative value cannot be represented
8816      by an unsigned double_int.  */
8817   if (TYPE_UNSIGNED (src_type) != unsigned_p
8818       && (TREE_INT_CST_HIGH (vr->min) | TREE_INT_CST_HIGH (vr->max)) < 0)
8819     return false;
8820 
8821   /* Then we can perform the conversion on both ends and compare
8822      the result for equality.  */
8823   tem = tree_to_double_int (vr->min).ext (precision, unsigned_p);
8824   if (tree_to_double_int (vr->min) != tem)
8825     return false;
8826   tem = tree_to_double_int (vr->max).ext (precision, unsigned_p);
8827   if (tree_to_double_int (vr->max) != tem)
8828     return false;
8829 
8830   return true;
8831 }
8832 
8833 /* Simplify a conversion from integral SSA name to float in STMT.  */
8834 
8835 static bool
8836 simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
8837 {
8838   tree rhs1 = gimple_assign_rhs1 (stmt);
8839   value_range_t *vr = get_value_range (rhs1);
8840   enum machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
8841   enum machine_mode mode;
8842   tree tem;
8843   gimple conv;
8844 
8845   /* We can only handle constant ranges.  */
8846   if (vr->type != VR_RANGE
8847       || TREE_CODE (vr->min) != INTEGER_CST
8848       || TREE_CODE (vr->max) != INTEGER_CST)
8849     return false;
8850 
8851   /* First check if we can use a signed type in place of an unsigned.  */
8852   if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
8853       && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0)
8854 	  != CODE_FOR_nothing)
8855       && range_fits_type_p (vr, GET_MODE_PRECISION
8856 			          (TYPE_MODE (TREE_TYPE (rhs1))), 0))
8857     mode = TYPE_MODE (TREE_TYPE (rhs1));
8858   /* If we can do the conversion in the current input mode do nothing.  */
8859   else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)),
8860 			TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing)
8861     return false;
8862   /* Otherwise search for a mode we can use, starting from the narrowest
8863      integer mode available.  */
8864   else
8865     {
8866       mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
8867       do
8868 	{
8869 	  /* If we cannot do a signed conversion to float from mode
8870 	     or if the value-range does not fit in the signed type
8871 	     try with a wider mode.  */
8872 	  if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
8873 	      && range_fits_type_p (vr, GET_MODE_PRECISION (mode), 0))
8874 	    break;
8875 
8876 	  mode = GET_MODE_WIDER_MODE (mode);
8877 	  /* But do not widen the input.  Instead leave that to the
8878 	     optabs expansion code.  */
8879 	  if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
8880 	    return false;
8881 	}
8882       while (mode != VOIDmode);
8883       if (mode == VOIDmode)
8884 	return false;
8885     }
8886 
8887   /* It works, insert a truncation or sign-change before the
8888      float conversion.  */
8889   tem = make_ssa_name (build_nonstandard_integer_type
8890 			  (GET_MODE_PRECISION (mode), 0), NULL);
8891   conv = gimple_build_assign_with_ops (NOP_EXPR, tem, rhs1, NULL_TREE);
8892   gsi_insert_before (gsi, conv, GSI_SAME_STMT);
8893   gimple_assign_set_rhs1 (stmt, tem);
8894   update_stmt (stmt);
8895 
8896   return true;
8897 }
8898 
8899 /* Simplify STMT using ranges if possible.  */
8900 
8901 static bool
8902 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
8903 {
8904   gimple stmt = gsi_stmt (*gsi);
8905   if (is_gimple_assign (stmt))
8906     {
8907       enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
8908       tree rhs1 = gimple_assign_rhs1 (stmt);
8909 
8910       switch (rhs_code)
8911 	{
8912 	case EQ_EXPR:
8913 	case NE_EXPR:
8914           /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
8915 	     if the RHS is zero or one, and the LHS are known to be boolean
8916 	     values.  */
8917 	  if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
8918 	    return simplify_truth_ops_using_ranges (gsi, stmt);
8919 	  break;
8920 
8921       /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
8922 	 and BIT_AND_EXPR respectively if the first operand is greater
8923 	 than zero and the second operand is an exact power of two.  */
8924 	case TRUNC_DIV_EXPR:
8925 	case TRUNC_MOD_EXPR:
8926 	  if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
8927 	      && integer_pow2p (gimple_assign_rhs2 (stmt)))
8928 	    return simplify_div_or_mod_using_ranges (stmt);
8929 	  break;
8930 
8931       /* Transform ABS (X) into X or -X as appropriate.  */
8932 	case ABS_EXPR:
8933 	  if (TREE_CODE (rhs1) == SSA_NAME
8934 	      && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
8935 	    return simplify_abs_using_ranges (stmt);
8936 	  break;
8937 
8938 	case BIT_AND_EXPR:
8939 	case BIT_IOR_EXPR:
8940 	  /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
8941 	     if all the bits being cleared are already cleared or
8942 	     all the bits being set are already set.  */
8943 	  if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
8944 	    return simplify_bit_ops_using_ranges (gsi, stmt);
8945 	  break;
8946 
8947 	CASE_CONVERT:
8948 	  if (TREE_CODE (rhs1) == SSA_NAME
8949 	      && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
8950 	    return simplify_conversion_using_ranges (stmt);
8951 	  break;
8952 
8953 	case FLOAT_EXPR:
8954 	  if (TREE_CODE (rhs1) == SSA_NAME
8955 	      && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
8956 	    return simplify_float_conversion_using_ranges (gsi, stmt);
8957 	  break;
8958 
8959 	default:
8960 	  break;
8961 	}
8962     }
8963   else if (gimple_code (stmt) == GIMPLE_COND)
8964     return simplify_cond_using_ranges (stmt);
8965   else if (gimple_code (stmt) == GIMPLE_SWITCH)
8966     return simplify_switch_using_ranges (stmt);
8967 
8968   return false;
8969 }
8970 
8971 /* If the statement pointed by SI has a predicate whose value can be
8972    computed using the value range information computed by VRP, compute
8973    its value and return true.  Otherwise, return false.  */
8974 
8975 static bool
8976 fold_predicate_in (gimple_stmt_iterator *si)
8977 {
8978   bool assignment_p = false;
8979   tree val;
8980   gimple stmt = gsi_stmt (*si);
8981 
8982   if (is_gimple_assign (stmt)
8983       && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
8984     {
8985       assignment_p = true;
8986       val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
8987 				      gimple_assign_rhs1 (stmt),
8988 				      gimple_assign_rhs2 (stmt),
8989 				      stmt);
8990     }
8991   else if (gimple_code (stmt) == GIMPLE_COND)
8992     val = vrp_evaluate_conditional (gimple_cond_code (stmt),
8993 				    gimple_cond_lhs (stmt),
8994 				    gimple_cond_rhs (stmt),
8995 				    stmt);
8996   else
8997     return false;
8998 
8999   if (val)
9000     {
9001       if (assignment_p)
9002         val = fold_convert (gimple_expr_type (stmt), val);
9003 
9004       if (dump_file)
9005 	{
9006 	  fprintf (dump_file, "Folding predicate ");
9007 	  print_gimple_expr (dump_file, stmt, 0, 0);
9008 	  fprintf (dump_file, " to ");
9009 	  print_generic_expr (dump_file, val, 0);
9010 	  fprintf (dump_file, "\n");
9011 	}
9012 
9013       if (is_gimple_assign (stmt))
9014 	gimple_assign_set_rhs_from_tree (si, val);
9015       else
9016 	{
9017 	  gcc_assert (gimple_code (stmt) == GIMPLE_COND);
9018 	  if (integer_zerop (val))
9019 	    gimple_cond_make_false (stmt);
9020 	  else if (integer_onep (val))
9021 	    gimple_cond_make_true (stmt);
9022 	  else
9023 	    gcc_unreachable ();
9024 	}
9025 
9026       return true;
9027     }
9028 
9029   return false;
9030 }
9031 
9032 /* Callback for substitute_and_fold folding the stmt at *SI.  */
9033 
9034 static bool
9035 vrp_fold_stmt (gimple_stmt_iterator *si)
9036 {
9037   if (fold_predicate_in (si))
9038     return true;
9039 
9040   return simplify_stmt_using_ranges (si);
9041 }
9042 
9043 /* Stack of dest,src equivalency pairs that need to be restored after
9044    each attempt to thread a block's incoming edge to an outgoing edge.
9045 
9046    A NULL entry is used to mark the end of pairs which need to be
9047    restored.  */
9048 static vec<tree> equiv_stack;
9049 
9050 /* A trivial wrapper so that we can present the generic jump threading
9051    code with a simple API for simplifying statements.  STMT is the
9052    statement we want to simplify, WITHIN_STMT provides the location
9053    for any overflow warnings.  */
9054 
9055 static tree
9056 simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt)
9057 {
9058   /* We only use VRP information to simplify conditionals.  This is
9059      overly conservative, but it's unclear if doing more would be
9060      worth the compile time cost.  */
9061   if (gimple_code (stmt) != GIMPLE_COND)
9062     return NULL;
9063 
9064   return vrp_evaluate_conditional (gimple_cond_code (stmt),
9065 				   gimple_cond_lhs (stmt),
9066 				   gimple_cond_rhs (stmt), within_stmt);
9067 }
9068 
9069 /* Blocks which have more than one predecessor and more than
9070    one successor present jump threading opportunities, i.e.,
9071    when the block is reached from a specific predecessor, we
9072    may be able to determine which of the outgoing edges will
9073    be traversed.  When this optimization applies, we are able
9074    to avoid conditionals at runtime and we may expose secondary
9075    optimization opportunities.
9076 
9077    This routine is effectively a driver for the generic jump
9078    threading code.  It basically just presents the generic code
9079    with edges that may be suitable for jump threading.
9080 
9081    Unlike DOM, we do not iterate VRP if jump threading was successful.
9082    While iterating may expose new opportunities for VRP, it is expected
9083    those opportunities would be very limited and the compile time cost
9084    to expose those opportunities would be significant.
9085 
9086    As jump threading opportunities are discovered, they are registered
9087    for later realization.  */
9088 
9089 static void
9090 identify_jump_threads (void)
9091 {
9092   basic_block bb;
9093   gimple dummy;
9094   int i;
9095   edge e;
9096 
9097   /* Ugh.  When substituting values earlier in this pass we can
9098      wipe the dominance information.  So rebuild the dominator
9099      information as we need it within the jump threading code.  */
9100   calculate_dominance_info (CDI_DOMINATORS);
9101 
9102   /* We do not allow VRP information to be used for jump threading
9103      across a back edge in the CFG.  Otherwise it becomes too
9104      difficult to avoid eliminating loop exit tests.  Of course
9105      EDGE_DFS_BACK is not accurate at this time so we have to
9106      recompute it.  */
9107   mark_dfs_back_edges ();
9108 
9109   /* Do not thread across edges we are about to remove.  Just marking
9110      them as EDGE_DFS_BACK will do.  */
9111   FOR_EACH_VEC_ELT (to_remove_edges, i, e)
9112     e->flags |= EDGE_DFS_BACK;
9113 
9114   /* Allocate our unwinder stack to unwind any temporary equivalences
9115      that might be recorded.  */
9116   equiv_stack.create (20);
9117 
9118   /* To avoid lots of silly node creation, we create a single
9119      conditional and just modify it in-place when attempting to
9120      thread jumps.  */
9121   dummy = gimple_build_cond (EQ_EXPR,
9122 			     integer_zero_node, integer_zero_node,
9123 			     NULL, NULL);
9124 
9125   /* Walk through all the blocks finding those which present a
9126      potential jump threading opportunity.  We could set this up
9127      as a dominator walker and record data during the walk, but
9128      I doubt it's worth the effort for the classes of jump
9129      threading opportunities we are trying to identify at this
9130      point in compilation.  */
9131   FOR_EACH_BB (bb)
9132     {
9133       gimple last;
9134 
9135       /* If the generic jump threading code does not find this block
9136 	 interesting, then there is nothing to do.  */
9137       if (! potentially_threadable_block (bb))
9138 	continue;
9139 
9140       /* We only care about blocks ending in a COND_EXPR.  While there
9141 	 may be some value in handling SWITCH_EXPR here, I doubt it's
9142 	 terribly important.  */
9143       last = gsi_stmt (gsi_last_bb (bb));
9144 
9145       /* We're basically looking for a switch or any kind of conditional with
9146 	 integral or pointer type arguments.  Note the type of the second
9147 	 argument will be the same as the first argument, so no need to
9148 	 check it explicitly.  */
9149       if (gimple_code (last) == GIMPLE_SWITCH
9150 	  || (gimple_code (last) == GIMPLE_COND
9151       	      && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
9152 	      && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
9153 		  || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))))
9154 	      && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
9155 		  || is_gimple_min_invariant (gimple_cond_rhs (last)))))
9156 	{
9157 	  edge_iterator ei;
9158 
9159 	  /* We've got a block with multiple predecessors and multiple
9160 	     successors which also ends in a suitable conditional or
9161 	     switch statement.  For each predecessor, see if we can thread
9162 	     it to a specific successor.  */
9163 	  FOR_EACH_EDGE (e, ei, bb->preds)
9164 	    {
9165 	      /* Do not thread across back edges or abnormal edges
9166 		 in the CFG.  */
9167 	      if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX))
9168 		continue;
9169 
9170 	      thread_across_edge (dummy, e, true, &equiv_stack,
9171 				  simplify_stmt_for_jump_threading);
9172 	    }
9173 	}
9174     }
9175 
9176   /* We do not actually update the CFG or SSA graphs at this point as
9177      ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
9178      handle ASSERT_EXPRs gracefully.  */
9179 }
9180 
9181 /* We identified all the jump threading opportunities earlier, but could
9182    not transform the CFG at that time.  This routine transforms the
9183    CFG and arranges for the dominator tree to be rebuilt if necessary.
9184 
9185    Note the SSA graph update will occur during the normal TODO
9186    processing by the pass manager.  */
9187 static void
9188 finalize_jump_threads (void)
9189 {
9190   thread_through_all_blocks (false);
9191   equiv_stack.release ();
9192 }
9193 
9194 
9195 /* Traverse all the blocks folding conditionals with known ranges.  */
9196 
9197 static void
9198 vrp_finalize (void)
9199 {
9200   size_t i;
9201 
9202   values_propagated = true;
9203 
9204   if (dump_file)
9205     {
9206       fprintf (dump_file, "\nValue ranges after VRP:\n\n");
9207       dump_all_value_ranges (dump_file);
9208       fprintf (dump_file, "\n");
9209     }
9210 
9211   substitute_and_fold (op_with_constant_singleton_value_range,
9212 		       vrp_fold_stmt, false);
9213 
9214   if (warn_array_bounds && first_pass_instance)
9215     check_all_array_refs ();
9216 
9217   /* We must identify jump threading opportunities before we release
9218      the datastructures built by VRP.  */
9219   identify_jump_threads ();
9220 
9221   /* Free allocated memory.  */
9222   for (i = 0; i < num_vr_values; i++)
9223     if (vr_value[i])
9224       {
9225 	BITMAP_FREE (vr_value[i]->equiv);
9226 	free (vr_value[i]);
9227       }
9228 
9229   free (vr_value);
9230   free (vr_phi_edge_counts);
9231 
9232   /* So that we can distinguish between VRP data being available
9233      and not available.  */
9234   vr_value = NULL;
9235   vr_phi_edge_counts = NULL;
9236 }
9237 
9238 
9239 /* Main entry point to VRP (Value Range Propagation).  This pass is
9240    loosely based on J. R. C. Patterson, ``Accurate Static Branch
9241    Prediction by Value Range Propagation,'' in SIGPLAN Conference on
9242    Programming Language Design and Implementation, pp. 67-78, 1995.
9243    Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
9244 
9245    This is essentially an SSA-CCP pass modified to deal with ranges
9246    instead of constants.
9247 
9248    While propagating ranges, we may find that two or more SSA name
9249    have equivalent, though distinct ranges.  For instance,
9250 
9251      1	x_9 = p_3->a;
9252      2	p_4 = ASSERT_EXPR <p_3, p_3 != 0>
9253      3	if (p_4 == q_2)
9254      4	  p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
9255      5	endif
9256      6	if (q_2)
9257 
9258    In the code above, pointer p_5 has range [q_2, q_2], but from the
9259    code we can also determine that p_5 cannot be NULL and, if q_2 had
9260    a non-varying range, p_5's range should also be compatible with it.
9261 
9262    These equivalences are created by two expressions: ASSERT_EXPR and
9263    copy operations.  Since p_5 is an assertion on p_4, and p_4 was the
9264    result of another assertion, then we can use the fact that p_5 and
9265    p_4 are equivalent when evaluating p_5's range.
9266 
9267    Together with value ranges, we also propagate these equivalences
9268    between names so that we can take advantage of information from
9269    multiple ranges when doing final replacement.  Note that this
9270    equivalency relation is transitive but not symmetric.
9271 
9272    In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
9273    cannot assert that q_2 is equivalent to p_5 because q_2 may be used
9274    in contexts where that assertion does not hold (e.g., in line 6).
9275 
9276    TODO, the main difference between this pass and Patterson's is that
9277    we do not propagate edge probabilities.  We only compute whether
9278    edges can be taken or not.  That is, instead of having a spectrum
9279    of jump probabilities between 0 and 1, we only deal with 0, 1 and
9280    DON'T KNOW.  In the future, it may be worthwhile to propagate
9281    probabilities to aid branch prediction.  */
9282 
9283 static unsigned int
9284 execute_vrp (void)
9285 {
9286   int i;
9287   edge e;
9288   switch_update *su;
9289 
9290   loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
9291   rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
9292   scev_initialize ();
9293 
9294   /* ???  This ends up using stale EDGE_DFS_BACK for liveness computation.
9295      Inserting assertions may split edges which will invalidate
9296      EDGE_DFS_BACK.  */
9297   insert_range_assertions ();
9298 
9299   to_remove_edges.create (10);
9300   to_update_switch_stmts.create (5);
9301   threadedge_initialize_values ();
9302 
9303   /* For visiting PHI nodes we need EDGE_DFS_BACK computed.  */
9304   mark_dfs_back_edges ();
9305 
9306   vrp_initialize ();
9307   ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
9308   vrp_finalize ();
9309 
9310   free_numbers_of_iterations_estimates ();
9311 
9312   /* ASSERT_EXPRs must be removed before finalizing jump threads
9313      as finalizing jump threads calls the CFG cleanup code which
9314      does not properly handle ASSERT_EXPRs.  */
9315   remove_range_assertions ();
9316 
9317   /* If we exposed any new variables, go ahead and put them into
9318      SSA form now, before we handle jump threading.  This simplifies
9319      interactions between rewriting of _DECL nodes into SSA form
9320      and rewriting SSA_NAME nodes into SSA form after block
9321      duplication and CFG manipulation.  */
9322   update_ssa (TODO_update_ssa);
9323 
9324   finalize_jump_threads ();
9325 
9326   /* Remove dead edges from SWITCH_EXPR optimization.  This leaves the
9327      CFG in a broken state and requires a cfg_cleanup run.  */
9328   FOR_EACH_VEC_ELT (to_remove_edges, i, e)
9329     remove_edge (e);
9330   /* Update SWITCH_EXPR case label vector.  */
9331   FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su)
9332     {
9333       size_t j;
9334       size_t n = TREE_VEC_LENGTH (su->vec);
9335       tree label;
9336       gimple_switch_set_num_labels (su->stmt, n);
9337       for (j = 0; j < n; j++)
9338 	gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
9339       /* As we may have replaced the default label with a regular one
9340 	 make sure to make it a real default label again.  This ensures
9341 	 optimal expansion.  */
9342       label = gimple_switch_label (su->stmt, 0);
9343       CASE_LOW (label) = NULL_TREE;
9344       CASE_HIGH (label) = NULL_TREE;
9345     }
9346 
9347   if (to_remove_edges.length () > 0)
9348     free_dominance_info (CDI_DOMINATORS);
9349 
9350   to_remove_edges.release ();
9351   to_update_switch_stmts.release ();
9352   threadedge_finalize_values ();
9353 
9354   scev_finalize ();
9355   loop_optimizer_finalize ();
9356   return 0;
9357 }
9358 
9359 static bool
9360 gate_vrp (void)
9361 {
9362   return flag_tree_vrp != 0;
9363 }
9364 
9365 struct gimple_opt_pass pass_vrp =
9366 {
9367  {
9368   GIMPLE_PASS,
9369   "vrp",				/* name */
9370   OPTGROUP_NONE,                        /* optinfo_flags */
9371   gate_vrp,				/* gate */
9372   execute_vrp,				/* execute */
9373   NULL,					/* sub */
9374   NULL,					/* next */
9375   0,					/* static_pass_number */
9376   TV_TREE_VRP,				/* tv_id */
9377   PROP_ssa,				/* properties_required */
9378   0,					/* properties_provided */
9379   0,					/* properties_destroyed */
9380   0,					/* todo_flags_start */
9381   TODO_cleanup_cfg
9382     | TODO_update_ssa
9383     | TODO_verify_ssa
9384     | TODO_verify_flow
9385     | TODO_ggc_collect			/* todo_flags_finish */
9386  }
9387 };
9388