xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-vrp.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /* Support routines for Value Range Propagation (VRP).
2    Copyright (C) 2005-2019 Free Software Foundation, Inc.
3    Contributed by Diego Novillo <dnovillo@redhat.com>.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11 
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 GNU General Public License for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
44 #include "tree-cfg.h"
45 #include "tree-dfa.h"
46 #include "tree-ssa-loop-manip.h"
47 #include "tree-ssa-loop-niter.h"
48 #include "tree-ssa-loop.h"
49 #include "tree-into-ssa.h"
50 #include "tree-ssa.h"
51 #include "intl.h"
52 #include "cfgloop.h"
53 #include "tree-scalar-evolution.h"
54 #include "tree-ssa-propagate.h"
55 #include "tree-chrec.h"
56 #include "tree-ssa-threadupdate.h"
57 #include "tree-ssa-scopedtables.h"
58 #include "tree-ssa-threadedge.h"
59 #include "omp-general.h"
60 #include "target.h"
61 #include "case-cfn-macros.h"
62 #include "params.h"
63 #include "alloc-pool.h"
64 #include "domwalk.h"
65 #include "tree-cfgcleanup.h"
66 #include "stringpool.h"
67 #include "attribs.h"
68 #include "vr-values.h"
69 #include "builtins.h"
70 #include "wide-int-range.h"
71 
72 /* Set of SSA names found live during the RPO traversal of the function
73    for still active basic-blocks.  */
74 static sbitmap *live;
75 
76 void
77 value_range_base::set (enum value_range_kind kind, tree min, tree max)
78 {
79   m_kind = kind;
80   m_min = min;
81   m_max = max;
82   if (flag_checking)
83     check ();
84 }
85 
86 void
87 value_range::set_equiv (bitmap equiv)
88 {
89   /* Since updating the equivalence set involves deep copying the
90      bitmaps, only do it if absolutely necessary.
91 
92      All equivalence bitmaps are allocated from the same obstack.  So
93      we can use the obstack associated with EQUIV to allocate vr->equiv.  */
94   if (m_equiv == NULL
95       && equiv != NULL)
96     m_equiv = BITMAP_ALLOC (equiv->obstack);
97 
98   if (equiv != m_equiv)
99     {
100       if (equiv && !bitmap_empty_p (equiv))
101 	bitmap_copy (m_equiv, equiv);
102       else
103 	bitmap_clear (m_equiv);
104     }
105 }
106 
107 /* Initialize value_range.  */
108 
109 void
110 value_range::set (enum value_range_kind kind, tree min, tree max,
111 		  bitmap equiv)
112 {
113   value_range_base::set (kind, min, max);
114   set_equiv (equiv);
115   if (flag_checking)
116     check ();
117 }
118 
119 value_range_base::value_range_base (value_range_kind kind, tree min, tree max)
120 {
121   set (kind, min, max);
122 }
123 
124 value_range::value_range (value_range_kind kind, tree min, tree max,
125 			  bitmap equiv)
126 {
127   m_equiv = NULL;
128   set (kind, min, max, equiv);
129 }
130 
131 value_range::value_range (const value_range_base &other)
132 {
133   m_equiv = NULL;
134   set (other.kind (), other.min(), other.max (), NULL);
135 }
136 
137 /* Like set, but keep the equivalences in place.  */
138 
139 void
140 value_range::update (value_range_kind kind, tree min, tree max)
141 {
142   set (kind, min, max,
143        (kind != VR_UNDEFINED && kind != VR_VARYING) ? m_equiv : NULL);
144 }
145 
146 /* Copy value_range in FROM into THIS while avoiding bitmap sharing.
147 
148    Note: The code that avoids the bitmap sharing looks at the existing
149    this->m_equiv, so this function cannot be used to initalize an
150    object.  Use the constructors for initialization.  */
151 
152 void
153 value_range::deep_copy (const value_range *from)
154 {
155   set (from->m_kind, from->min (), from->max (), from->m_equiv);
156 }
157 
158 void
159 value_range::move (value_range *from)
160 {
161   set (from->m_kind, from->min (), from->max ());
162   m_equiv = from->m_equiv;
163   from->m_equiv = NULL;
164 }
165 
166 /* Check the validity of the range.  */
167 
168 void
169 value_range_base::check ()
170 {
171   switch (m_kind)
172     {
173     case VR_RANGE:
174     case VR_ANTI_RANGE:
175       {
176 	int cmp;
177 
178 	gcc_assert (m_min && m_max);
179 
180 	gcc_assert (!TREE_OVERFLOW_P (m_min) && !TREE_OVERFLOW_P (m_max));
181 
182 	/* Creating ~[-MIN, +MAX] is stupid because that would be
183 	   the empty set.  */
184 	if (INTEGRAL_TYPE_P (TREE_TYPE (m_min)) && m_kind == VR_ANTI_RANGE)
185 	  gcc_assert (!vrp_val_is_min (m_min) || !vrp_val_is_max (m_max));
186 
187 	cmp = compare_values (m_min, m_max);
188 	gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
189 	break;
190       }
191     case VR_UNDEFINED:
192     case VR_VARYING:
193       gcc_assert (!min () && !max ());
194       break;
195     default:
196       gcc_unreachable ();
197     }
198 }
199 
200 void
201 value_range::check ()
202 {
203   value_range_base::check ();
204   switch (m_kind)
205     {
206     case VR_UNDEFINED:
207     case VR_VARYING:
208       gcc_assert (!m_equiv || bitmap_empty_p (m_equiv));
209     default:;
210     }
211 }
212 
213 /* Equality operator.  We purposely do not overload ==, to avoid
214    confusion with the equality bitmap in the derived value_range
215    class.  */
216 
217 bool
218 value_range_base::equal_p (const value_range_base &other) const
219 {
220   return (m_kind == other.m_kind
221 	  && vrp_operand_equal_p (m_min, other.m_min)
222 	  && vrp_operand_equal_p (m_max, other.m_max));
223 }
224 
225 /* Returns TRUE if THIS == OTHER.  Ignores the equivalence bitmap if
226    IGNORE_EQUIVS is TRUE.  */
227 
228 bool
229 value_range::equal_p (const value_range &other, bool ignore_equivs) const
230 {
231   return (value_range_base::equal_p (other)
232 	  && (ignore_equivs
233 	      || vrp_bitmap_equal_p (m_equiv, other.m_equiv)));
234 }
235 
236 /* Return TRUE if this is a symbolic range.  */
237 
238 bool
239 value_range_base::symbolic_p () const
240 {
241   return (!varying_p ()
242 	  && !undefined_p ()
243 	  && (!is_gimple_min_invariant (m_min)
244 	      || !is_gimple_min_invariant (m_max)));
245 }
246 
247 /* NOTE: This is not the inverse of symbolic_p because the range
248    could also be varying or undefined.  Ideally they should be inverse
249    of each other, with varying only applying to symbolics.  Varying of
250    constants would be represented as [-MIN, +MAX].  */
251 
252 bool
253 value_range_base::constant_p () const
254 {
255   return (!varying_p ()
256 	  && !undefined_p ()
257 	  && TREE_CODE (m_min) == INTEGER_CST
258 	  && TREE_CODE (m_max) == INTEGER_CST);
259 }
260 
261 void
262 value_range_base::set_undefined ()
263 {
264   set (VR_UNDEFINED, NULL, NULL);
265 }
266 
267 void
268 value_range::set_undefined ()
269 {
270   set (VR_UNDEFINED, NULL, NULL, NULL);
271 }
272 
273 void
274 value_range_base::set_varying ()
275 {
276   set (VR_VARYING, NULL, NULL);
277 }
278 
279 void
280 value_range::set_varying ()
281 {
282   set (VR_VARYING, NULL, NULL, NULL);
283 }
284 
285 /* Return TRUE if it is possible that range contains VAL.  */
286 
287 bool
288 value_range_base::may_contain_p (tree val) const
289 {
290   if (varying_p ())
291     return true;
292 
293   if (undefined_p ())
294     return true;
295 
296   if (m_kind == VR_ANTI_RANGE)
297     {
298       int res = value_inside_range (val, min (), max ());
299       return res == 0 || res == -2;
300     }
301   return value_inside_range (val, min (), max ()) != 0;
302 }
303 
304 void
305 value_range::equiv_clear ()
306 {
307   if (m_equiv)
308     bitmap_clear (m_equiv);
309 }
310 
311 /* Add VAR and VAR's equivalence set (VAR_VR) to the equivalence
312    bitmap.  If no equivalence table has been created, OBSTACK is the
313    obstack to use (NULL for the default obstack).
314 
315    This is the central point where equivalence processing can be
316    turned on/off.  */
317 
318 void
319 value_range::equiv_add (const_tree var,
320 			const value_range *var_vr,
321 			bitmap_obstack *obstack)
322 {
323   if (!m_equiv)
324     m_equiv = BITMAP_ALLOC (obstack);
325   unsigned ver = SSA_NAME_VERSION (var);
326   bitmap_set_bit (m_equiv, ver);
327   if (var_vr && var_vr->m_equiv)
328     bitmap_ior_into (m_equiv, var_vr->m_equiv);
329 }
330 
331 /* If range is a singleton, place it in RESULT and return TRUE.
332    Note: A singleton can be any gimple invariant, not just constants.
333    So, [&x, &x] counts as a singleton.  */
334 
335 bool
336 value_range_base::singleton_p (tree *result) const
337 {
338   if (m_kind == VR_RANGE
339       && vrp_operand_equal_p (min (), max ())
340       && is_gimple_min_invariant (min ()))
341     {
342       if (result)
343         *result = min ();
344       return true;
345     }
346   return false;
347 }
348 
349 tree
350 value_range_base::type () const
351 {
352   /* Types are only valid for VR_RANGE and VR_ANTI_RANGE, which are
353      known to have non-zero min/max.  */
354   gcc_assert (min ());
355   return TREE_TYPE (min ());
356 }
357 
358 void
359 value_range_base::dump (FILE *file) const
360 {
361   if (undefined_p ())
362     fprintf (file, "UNDEFINED");
363   else if (m_kind == VR_RANGE || m_kind == VR_ANTI_RANGE)
364     {
365       tree ttype = type ();
366 
367       print_generic_expr (file, ttype);
368       fprintf (file, " ");
369 
370       fprintf (file, "%s[", (m_kind == VR_ANTI_RANGE) ? "~" : "");
371 
372       if (INTEGRAL_TYPE_P (ttype)
373 	  && !TYPE_UNSIGNED (ttype)
374 	  && vrp_val_is_min (min ())
375 	  && TYPE_PRECISION (ttype) != 1)
376 	fprintf (file, "-INF");
377       else
378 	print_generic_expr (file, min ());
379 
380       fprintf (file, ", ");
381 
382       if (INTEGRAL_TYPE_P (ttype)
383 	  && vrp_val_is_max (max ())
384 	  && TYPE_PRECISION (ttype) != 1)
385 	fprintf (file, "+INF");
386       else
387 	print_generic_expr (file, max ());
388 
389       fprintf (file, "]");
390     }
391   else if (varying_p ())
392     fprintf (file, "VARYING");
393   else
394     gcc_unreachable ();
395 }
396 
397 void
398 value_range::dump (FILE *file) const
399 {
400   value_range_base::dump (file);
401   if ((m_kind == VR_RANGE || m_kind == VR_ANTI_RANGE)
402       && m_equiv)
403     {
404       bitmap_iterator bi;
405       unsigned i, c = 0;
406 
407       fprintf (file, "  EQUIVALENCES: { ");
408 
409       EXECUTE_IF_SET_IN_BITMAP (m_equiv, 0, i, bi)
410 	{
411 	  print_generic_expr (file, ssa_name (i));
412 	  fprintf (file, " ");
413 	  c++;
414 	}
415 
416       fprintf (file, "} (%u elements)", c);
417     }
418 }
419 
420 void
421 dump_value_range (FILE *file, const value_range *vr)
422 {
423   if (!vr)
424     fprintf (file, "[]");
425   else
426     vr->dump (file);
427 }
428 
429 void
430 dump_value_range (FILE *file, const value_range_base *vr)
431 {
432   if (!vr)
433     fprintf (file, "[]");
434   else
435     vr->dump (file);
436 }
437 
438 DEBUG_FUNCTION void
439 debug (const value_range_base *vr)
440 {
441   dump_value_range (stderr, vr);
442 }
443 
444 DEBUG_FUNCTION void
445 debug (const value_range_base &vr)
446 {
447   dump_value_range (stderr, &vr);
448 }
449 
450 DEBUG_FUNCTION void
451 debug (const value_range *vr)
452 {
453   dump_value_range (stderr, vr);
454 }
455 
456 DEBUG_FUNCTION void
457 debug (const value_range &vr)
458 {
459   dump_value_range (stderr, &vr);
460 }
461 
462 /* Return true if the SSA name NAME is live on the edge E.  */
463 
464 static bool
465 live_on_edge (edge e, tree name)
466 {
467   return (live[e->dest->index]
468 	  && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
469 }
470 
471 /* Location information for ASSERT_EXPRs.  Each instance of this
472    structure describes an ASSERT_EXPR for an SSA name.  Since a single
473    SSA name may have more than one assertion associated with it, these
474    locations are kept in a linked list attached to the corresponding
475    SSA name.  */
476 struct assert_locus
477 {
478   /* Basic block where the assertion would be inserted.  */
479   basic_block bb;
480 
481   /* Some assertions need to be inserted on an edge (e.g., assertions
482      generated by COND_EXPRs).  In those cases, BB will be NULL.  */
483   edge e;
484 
485   /* Pointer to the statement that generated this assertion.  */
486   gimple_stmt_iterator si;
487 
488   /* Predicate code for the ASSERT_EXPR.  Must be COMPARISON_CLASS_P.  */
489   enum tree_code comp_code;
490 
491   /* Value being compared against.  */
492   tree val;
493 
494   /* Expression to compare.  */
495   tree expr;
496 
497   /* Next node in the linked list.  */
498   assert_locus *next;
499 };
500 
501 /* If bit I is present, it means that SSA name N_i has a list of
502    assertions that should be inserted in the IL.  */
503 static bitmap need_assert_for;
504 
505 /* Array of locations lists where to insert assertions.  ASSERTS_FOR[I]
506    holds a list of ASSERT_LOCUS_T nodes that describe where
507    ASSERT_EXPRs for SSA name N_I should be inserted.  */
508 static assert_locus **asserts_for;
509 
510 /* Return the maximum value for TYPE.  */
511 
512 tree
513 vrp_val_max (const_tree type)
514 {
515   if (!INTEGRAL_TYPE_P (type))
516     return NULL_TREE;
517 
518   return TYPE_MAX_VALUE (type);
519 }
520 
521 /* Return the minimum value for TYPE.  */
522 
523 tree
524 vrp_val_min (const_tree type)
525 {
526   if (!INTEGRAL_TYPE_P (type))
527     return NULL_TREE;
528 
529   return TYPE_MIN_VALUE (type);
530 }
531 
532 /* Return whether VAL is equal to the maximum value of its type.
533    We can't do a simple equality comparison with TYPE_MAX_VALUE because
534    C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE
535    is not == to the integer constant with the same value in the type.  */
536 
537 bool
538 vrp_val_is_max (const_tree val)
539 {
540   tree type_max = vrp_val_max (TREE_TYPE (val));
541   return (val == type_max
542 	  || (type_max != NULL_TREE
543 	      && operand_equal_p (val, type_max, 0)));
544 }
545 
546 /* Return whether VAL is equal to the minimum value of its type.  */
547 
548 bool
549 vrp_val_is_min (const_tree val)
550 {
551   tree type_min = vrp_val_min (TREE_TYPE (val));
552   return (val == type_min
553 	  || (type_min != NULL_TREE
554 	      && operand_equal_p (val, type_min, 0)));
555 }
556 
557 /* VR_TYPE describes a range with mininum value *MIN and maximum
558    value *MAX.  Restrict the range to the set of values that have
559    no bits set outside NONZERO_BITS.  Update *MIN and *MAX and
560    return the new range type.
561 
562    SGN gives the sign of the values described by the range.  */
563 
564 enum value_range_kind
565 intersect_range_with_nonzero_bits (enum value_range_kind vr_type,
566 				   wide_int *min, wide_int *max,
567 				   const wide_int &nonzero_bits,
568 				   signop sgn)
569 {
570   if (vr_type == VR_ANTI_RANGE)
571     {
572       /* The VR_ANTI_RANGE is equivalent to the union of the ranges
573 	 A: [-INF, *MIN) and B: (*MAX, +INF].  First use NONZERO_BITS
574 	 to create an inclusive upper bound for A and an inclusive lower
575 	 bound for B.  */
576       wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
577       wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
578 
579       /* If the calculation of A_MAX wrapped, A is effectively empty
580 	 and A_MAX is the highest value that satisfies NONZERO_BITS.
581 	 Likewise if the calculation of B_MIN wrapped, B is effectively
582 	 empty and B_MIN is the lowest value that satisfies NONZERO_BITS.  */
583       bool a_empty = wi::ge_p (a_max, *min, sgn);
584       bool b_empty = wi::le_p (b_min, *max, sgn);
585 
586       /* If both A and B are empty, there are no valid values.  */
587       if (a_empty && b_empty)
588 	return VR_UNDEFINED;
589 
590       /* If exactly one of A or B is empty, return a VR_RANGE for the
591 	 other one.  */
592       if (a_empty || b_empty)
593 	{
594 	  *min = b_min;
595 	  *max = a_max;
596 	  gcc_checking_assert (wi::le_p (*min, *max, sgn));
597 	  return VR_RANGE;
598 	}
599 
600       /* Update the VR_ANTI_RANGE bounds.  */
601       *min = a_max + 1;
602       *max = b_min - 1;
603       gcc_checking_assert (wi::le_p (*min, *max, sgn));
604 
605       /* Now check whether the excluded range includes any values that
606 	 satisfy NONZERO_BITS.  If not, switch to a full VR_RANGE.  */
607       if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
608 	{
609 	  unsigned int precision = min->get_precision ();
610 	  *min = wi::min_value (precision, sgn);
611 	  *max = wi::max_value (precision, sgn);
612 	  vr_type = VR_RANGE;
613 	}
614     }
615   if (vr_type == VR_RANGE)
616     {
617       *max = wi::round_down_for_mask (*max, nonzero_bits);
618 
619       /* Check that the range contains at least one valid value.  */
620       if (wi::gt_p (*min, *max, sgn))
621 	return VR_UNDEFINED;
622 
623       *min = wi::round_up_for_mask (*min, nonzero_bits);
624       gcc_checking_assert (wi::le_p (*min, *max, sgn));
625     }
626   return vr_type;
627 }
628 
629 
630 /* Set value range to the canonical form of {VRTYPE, MIN, MAX, EQUIV}.
631    This means adjusting VRTYPE, MIN and MAX representing the case of a
632    wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
633    as anti-rage ~[MAX+1, MIN-1].  Likewise for wrapping anti-ranges.
634    In corner cases where MAX+1 or MIN-1 wraps this will fall back
635    to varying.
636    This routine exists to ease canonicalization in the case where we
637    extract ranges from var + CST op limit.  */
638 
639 void
640 value_range_base::set_and_canonicalize (enum value_range_kind kind,
641 					tree min, tree max)
642 {
643   /* Use the canonical setters for VR_UNDEFINED and VR_VARYING.  */
644   if (kind == VR_UNDEFINED)
645     {
646       set_undefined ();
647       return;
648     }
649   else if (kind == VR_VARYING)
650     {
651       set_varying ();
652       return;
653     }
654 
655   /* Nothing to canonicalize for symbolic ranges.  */
656   if (TREE_CODE (min) != INTEGER_CST
657       || TREE_CODE (max) != INTEGER_CST)
658     {
659       set (kind, min, max);
660       return;
661     }
662 
663   /* Wrong order for min and max, to swap them and the VR type we need
664      to adjust them.  */
665   if (tree_int_cst_lt (max, min))
666     {
667       tree one, tmp;
668 
669       /* For one bit precision if max < min, then the swapped
670 	 range covers all values, so for VR_RANGE it is varying and
671 	 for VR_ANTI_RANGE empty range, so drop to varying as well.  */
672       if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
673 	{
674 	  set_varying ();
675 	  return;
676 	}
677 
678       one = build_int_cst (TREE_TYPE (min), 1);
679       tmp = int_const_binop (PLUS_EXPR, max, one);
680       max = int_const_binop (MINUS_EXPR, min, one);
681       min = tmp;
682 
683       /* There's one corner case, if we had [C+1, C] before we now have
684 	 that again.  But this represents an empty value range, so drop
685 	 to varying in this case.  */
686       if (tree_int_cst_lt (max, min))
687 	{
688 	  set_varying ();
689 	  return;
690 	}
691 
692       kind = kind == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
693     }
694 
695   /* Anti-ranges that can be represented as ranges should be so.  */
696   if (kind == VR_ANTI_RANGE)
697     {
698       /* For -fstrict-enums we may receive out-of-range ranges so consider
699          values < -INF and values > INF as -INF/INF as well.  */
700       tree type = TREE_TYPE (min);
701       bool is_min = (INTEGRAL_TYPE_P (type)
702 		     && tree_int_cst_compare (min, TYPE_MIN_VALUE (type)) <= 0);
703       bool is_max = (INTEGRAL_TYPE_P (type)
704 		     && tree_int_cst_compare (max, TYPE_MAX_VALUE (type)) >= 0);
705 
706       if (is_min && is_max)
707 	{
708 	  /* We cannot deal with empty ranges, drop to varying.
709 	     ???  This could be VR_UNDEFINED instead.  */
710 	  set_varying ();
711 	  return;
712 	}
713       else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
714 	       && (is_min || is_max))
715 	{
716 	  /* Non-empty boolean ranges can always be represented
717 	     as a singleton range.  */
718 	  if (is_min)
719 	    min = max = vrp_val_max (TREE_TYPE (min));
720 	  else
721 	    min = max = vrp_val_min (TREE_TYPE (min));
722 	  kind = VR_RANGE;
723 	}
724       else if (is_min
725 	       /* As a special exception preserve non-null ranges.  */
726 	       && !(TYPE_UNSIGNED (TREE_TYPE (min))
727 		    && integer_zerop (max)))
728         {
729 	  tree one = build_int_cst (TREE_TYPE (max), 1);
730 	  min = int_const_binop (PLUS_EXPR, max, one);
731 	  max = vrp_val_max (TREE_TYPE (max));
732 	  kind = VR_RANGE;
733         }
734       else if (is_max)
735         {
736 	  tree one = build_int_cst (TREE_TYPE (min), 1);
737 	  max = int_const_binop (MINUS_EXPR, min, one);
738 	  min = vrp_val_min (TREE_TYPE (min));
739 	  kind = VR_RANGE;
740         }
741     }
742 
743   /* Do not drop [-INF(OVF), +INF(OVF)] to varying.  (OVF) has to be sticky
744      to make sure VRP iteration terminates, otherwise we can get into
745      oscillations.  */
746 
747   set (kind, min, max);
748 }
749 
750 void
751 value_range::set_and_canonicalize (enum value_range_kind kind,
752 				   tree min, tree max, bitmap equiv)
753 {
754   value_range_base::set_and_canonicalize (kind, min, max);
755   if (this->kind () == VR_RANGE || this->kind () == VR_ANTI_RANGE)
756     set_equiv (equiv);
757   else
758     equiv_clear ();
759 }
760 
761 void
762 value_range_base::set (tree val)
763 {
764   gcc_assert (TREE_CODE (val) == SSA_NAME || is_gimple_min_invariant (val));
765   if (TREE_OVERFLOW_P (val))
766     val = drop_tree_overflow (val);
767   set (VR_RANGE, val, val);
768 }
769 
770 void
771 value_range::set (tree val)
772 {
773   gcc_assert (TREE_CODE (val) == SSA_NAME || is_gimple_min_invariant (val));
774   if (TREE_OVERFLOW_P (val))
775     val = drop_tree_overflow (val);
776   set (VR_RANGE, val, val, NULL);
777 }
778 
779 /* Set value range VR to a non-NULL range of type TYPE.  */
780 
781 void
782 value_range_base::set_nonnull (tree type)
783 {
784   tree zero = build_int_cst (type, 0);
785   set (VR_ANTI_RANGE, zero, zero);
786 }
787 
788 void
789 value_range::set_nonnull (tree type)
790 {
791   tree zero = build_int_cst (type, 0);
792   set (VR_ANTI_RANGE, zero, zero, NULL);
793 }
794 
795 /* Set value range VR to a NULL range of type TYPE.  */
796 
797 void
798 value_range_base::set_null (tree type)
799 {
800   set (build_int_cst (type, 0));
801 }
802 
803 void
804 value_range::set_null (tree type)
805 {
806   set (build_int_cst (type, 0));
807 }
808 
809 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes.  */
810 
811 bool
812 vrp_operand_equal_p (const_tree val1, const_tree val2)
813 {
814   if (val1 == val2)
815     return true;
816   if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
817     return false;
818   return true;
819 }
820 
821 /* Return true, if the bitmaps B1 and B2 are equal.  */
822 
823 bool
824 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
825 {
826   return (b1 == b2
827 	  || ((!b1 || bitmap_empty_p (b1))
828 	      && (!b2 || bitmap_empty_p (b2)))
829 	  || (b1 && b2
830 	      && bitmap_equal_p (b1, b2)));
831 }
832 
833 /* Return true if VR is [0, 0].  */
834 
835 static inline bool
836 range_is_null (const value_range_base *vr)
837 {
838   return vr->zero_p ();
839 }
840 
841 static inline bool
842 range_is_nonnull (const value_range_base *vr)
843 {
844   return (vr->kind () == VR_ANTI_RANGE
845 	  && vr->min () == vr->max ()
846 	  && integer_zerop (vr->min ()));
847 }
848 
849 /* Return true if max and min of VR are INTEGER_CST.  It's not necessary
850    a singleton.  */
851 
852 bool
853 range_int_cst_p (const value_range_base *vr)
854 {
855   return (vr->kind () == VR_RANGE
856 	  && TREE_CODE (vr->min ()) == INTEGER_CST
857 	  && TREE_CODE (vr->max ()) == INTEGER_CST);
858 }
859 
860 /* Return true if VR is a INTEGER_CST singleton.  */
861 
862 bool
863 range_int_cst_singleton_p (const value_range_base *vr)
864 {
865   return (range_int_cst_p (vr)
866 	  && tree_int_cst_equal (vr->min (), vr->max ()));
867 }
868 
869 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
870    otherwise.  We only handle additive operations and set NEG to true if the
871    symbol is negated and INV to the invariant part, if any.  */
872 
873 tree
874 get_single_symbol (tree t, bool *neg, tree *inv)
875 {
876   bool neg_;
877   tree inv_;
878 
879   *inv = NULL_TREE;
880   *neg = false;
881 
882   if (TREE_CODE (t) == PLUS_EXPR
883       || TREE_CODE (t) == POINTER_PLUS_EXPR
884       || TREE_CODE (t) == MINUS_EXPR)
885     {
886       if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
887 	{
888 	  neg_ = (TREE_CODE (t) == MINUS_EXPR);
889 	  inv_ = TREE_OPERAND (t, 0);
890 	  t = TREE_OPERAND (t, 1);
891 	}
892       else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
893 	{
894 	  neg_ = false;
895 	  inv_ = TREE_OPERAND (t, 1);
896 	  t = TREE_OPERAND (t, 0);
897 	}
898       else
899         return NULL_TREE;
900     }
901   else
902     {
903       neg_ = false;
904       inv_ = NULL_TREE;
905     }
906 
907   if (TREE_CODE (t) == NEGATE_EXPR)
908     {
909       t = TREE_OPERAND (t, 0);
910       neg_ = !neg_;
911     }
912 
913   if (TREE_CODE (t) != SSA_NAME)
914     return NULL_TREE;
915 
916   if (inv_ && TREE_OVERFLOW_P (inv_))
917     inv_ = drop_tree_overflow (inv_);
918 
919   *neg = neg_;
920   *inv = inv_;
921   return t;
922 }
923 
924 /* The reverse operation: build a symbolic expression with TYPE
925    from symbol SYM, negated according to NEG, and invariant INV.  */
926 
927 static tree
928 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
929 {
930   const bool pointer_p = POINTER_TYPE_P (type);
931   tree t = sym;
932 
933   if (neg)
934     t = build1 (NEGATE_EXPR, type, t);
935 
936   if (integer_zerop (inv))
937     return t;
938 
939   return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
940 }
941 
942 /* Return
943    1 if VAL < VAL2
944    0 if !(VAL < VAL2)
945    -2 if those are incomparable.  */
946 int
947 operand_less_p (tree val, tree val2)
948 {
949   /* LT is folded faster than GE and others.  Inline the common case.  */
950   if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
951     return tree_int_cst_lt (val, val2);
952   else
953     {
954       tree tcmp;
955 
956       fold_defer_overflow_warnings ();
957 
958       tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
959 
960       fold_undefer_and_ignore_overflow_warnings ();
961 
962       if (!tcmp
963 	  || TREE_CODE (tcmp) != INTEGER_CST)
964 	return -2;
965 
966       if (!integer_zerop (tcmp))
967 	return 1;
968     }
969 
970   return 0;
971 }
972 
973 /* Compare two values VAL1 and VAL2.  Return
974 
975    	-2 if VAL1 and VAL2 cannot be compared at compile-time,
976    	-1 if VAL1 < VAL2,
977    	 0 if VAL1 == VAL2,
978 	+1 if VAL1 > VAL2, and
979 	+2 if VAL1 != VAL2
980 
981    This is similar to tree_int_cst_compare but supports pointer values
982    and values that cannot be compared at compile time.
983 
984    If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
985    true if the return value is only valid if we assume that signed
986    overflow is undefined.  */
987 
988 int
989 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
990 {
991   if (val1 == val2)
992     return 0;
993 
994   /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
995      both integers.  */
996   gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
997 	      == POINTER_TYPE_P (TREE_TYPE (val2)));
998 
999   /* Convert the two values into the same type.  This is needed because
1000      sizetype causes sign extension even for unsigned types.  */
1001   val2 = fold_convert (TREE_TYPE (val1), val2);
1002   STRIP_USELESS_TYPE_CONVERSION (val2);
1003 
1004   const bool overflow_undefined
1005     = INTEGRAL_TYPE_P (TREE_TYPE (val1))
1006       && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
1007   tree inv1, inv2;
1008   bool neg1, neg2;
1009   tree sym1 = get_single_symbol (val1, &neg1, &inv1);
1010   tree sym2 = get_single_symbol (val2, &neg2, &inv2);
1011 
1012   /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
1013      accordingly.  If VAL1 and VAL2 don't use the same name, return -2.  */
1014   if (sym1 && sym2)
1015     {
1016       /* Both values must use the same name with the same sign.  */
1017       if (sym1 != sym2 || neg1 != neg2)
1018 	return -2;
1019 
1020       /* [-]NAME + CST == [-]NAME + CST.  */
1021       if (inv1 == inv2)
1022 	return 0;
1023 
1024       /* If overflow is defined we cannot simplify more.  */
1025       if (!overflow_undefined)
1026 	return -2;
1027 
1028       if (strict_overflow_p != NULL
1029 	  /* Symbolic range building sets TREE_NO_WARNING to declare
1030 	     that overflow doesn't happen.  */
1031 	  && (!inv1 || !TREE_NO_WARNING (val1))
1032 	  && (!inv2 || !TREE_NO_WARNING (val2)))
1033 	*strict_overflow_p = true;
1034 
1035       if (!inv1)
1036 	inv1 = build_int_cst (TREE_TYPE (val1), 0);
1037       if (!inv2)
1038 	inv2 = build_int_cst (TREE_TYPE (val2), 0);
1039 
1040       return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
1041 		      TYPE_SIGN (TREE_TYPE (val1)));
1042     }
1043 
1044   const bool cst1 = is_gimple_min_invariant (val1);
1045   const bool cst2 = is_gimple_min_invariant (val2);
1046 
1047   /* If one is of the form '[-]NAME + CST' and the other is constant, then
1048      it might be possible to say something depending on the constants.  */
1049   if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
1050     {
1051       if (!overflow_undefined)
1052 	return -2;
1053 
1054       if (strict_overflow_p != NULL
1055 	  /* Symbolic range building sets TREE_NO_WARNING to declare
1056 	     that overflow doesn't happen.  */
1057 	  && (!sym1 || !TREE_NO_WARNING (val1))
1058 	  && (!sym2 || !TREE_NO_WARNING (val2)))
1059 	*strict_overflow_p = true;
1060 
1061       const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
1062       tree cst = cst1 ? val1 : val2;
1063       tree inv = cst1 ? inv2 : inv1;
1064 
1065       /* Compute the difference between the constants.  If it overflows or
1066 	 underflows, this means that we can trivially compare the NAME with
1067 	 it and, consequently, the two values with each other.  */
1068       wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
1069       if (wi::cmp (0, wi::to_wide (inv), sgn)
1070 	  != wi::cmp (diff, wi::to_wide (cst), sgn))
1071 	{
1072 	  const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
1073 	  return cst1 ? res : -res;
1074 	}
1075 
1076       return -2;
1077     }
1078 
1079   /* We cannot say anything more for non-constants.  */
1080   if (!cst1 || !cst2)
1081     return -2;
1082 
1083   if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1084     {
1085       /* We cannot compare overflowed values.  */
1086       if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1087 	return -2;
1088 
1089       if (TREE_CODE (val1) == INTEGER_CST
1090 	  && TREE_CODE (val2) == INTEGER_CST)
1091 	return tree_int_cst_compare (val1, val2);
1092 
1093       if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
1094 	{
1095 	  if (known_eq (wi::to_poly_widest (val1),
1096 			wi::to_poly_widest (val2)))
1097 	    return 0;
1098 	  if (known_lt (wi::to_poly_widest (val1),
1099 			wi::to_poly_widest (val2)))
1100 	    return -1;
1101 	  if (known_gt (wi::to_poly_widest (val1),
1102 			wi::to_poly_widest (val2)))
1103 	    return 1;
1104 	}
1105 
1106       return -2;
1107     }
1108   else
1109     {
1110       tree t;
1111 
1112       /* First see if VAL1 and VAL2 are not the same.  */
1113       if (val1 == val2 || operand_equal_p (val1, val2, 0))
1114 	return 0;
1115 
1116       /* If VAL1 is a lower address than VAL2, return -1.  */
1117       if (operand_less_p (val1, val2) == 1)
1118 	return -1;
1119 
1120       /* If VAL1 is a higher address than VAL2, return +1.  */
1121       if (operand_less_p (val2, val1) == 1)
1122 	return 1;
1123 
1124       /* If VAL1 is different than VAL2, return +2.
1125 	 For integer constants we either have already returned -1 or 1
1126 	 or they are equivalent.  We still might succeed in proving
1127 	 something about non-trivial operands.  */
1128       if (TREE_CODE (val1) != INTEGER_CST
1129 	  || TREE_CODE (val2) != INTEGER_CST)
1130 	{
1131           t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1132 	  if (t && integer_onep (t))
1133 	    return 2;
1134 	}
1135 
1136       return -2;
1137     }
1138 }
1139 
1140 /* Compare values like compare_values_warnv.  */
1141 
1142 int
1143 compare_values (tree val1, tree val2)
1144 {
1145   bool sop;
1146   return compare_values_warnv (val1, val2, &sop);
1147 }
1148 
1149 
1150 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
1151           0 if VAL is not inside [MIN, MAX],
1152 	 -2 if we cannot tell either way.
1153 
1154    Benchmark compile/20001226-1.c compilation time after changing this
1155    function.  */
1156 
1157 int
1158 value_inside_range (tree val, tree min, tree max)
1159 {
1160   int cmp1, cmp2;
1161 
1162   cmp1 = operand_less_p (val, min);
1163   if (cmp1 == -2)
1164     return -2;
1165   if (cmp1 == 1)
1166     return 0;
1167 
1168   cmp2 = operand_less_p (max, val);
1169   if (cmp2 == -2)
1170     return -2;
1171 
1172   return !cmp2;
1173 }
1174 
1175 
1176 /* Return TRUE if *VR includes the value X.  */
1177 
1178 bool
1179 range_includes_p (const value_range_base *vr, HOST_WIDE_INT x)
1180 {
1181   if (vr->varying_p () || vr->undefined_p ())
1182     return true;
1183   return vr->may_contain_p (build_int_cst (vr->type (), x));
1184 }
1185 
1186 /* If *VR has a value range that is a single constant value return that,
1187    otherwise return NULL_TREE.
1188 
1189    ?? This actually returns TRUE for [&x, &x], so perhaps "constant"
1190    is not the best name.  */
1191 
1192 tree
1193 value_range_constant_singleton (const value_range_base *vr)
1194 {
1195   tree result = NULL;
1196   if (vr->singleton_p (&result))
1197     return result;
1198   return NULL;
1199 }
1200 
1201 /* Value range wrapper for wide_int_range_set_zero_nonzero_bits.
1202 
1203    Compute MAY_BE_NONZERO and MUST_BE_NONZERO bit masks for range in VR.
1204 
1205    Return TRUE if VR was a constant range and we were able to compute
1206    the bit masks.  */
1207 
1208 bool
1209 vrp_set_zero_nonzero_bits (const tree expr_type,
1210 			   const value_range_base *vr,
1211 			   wide_int *may_be_nonzero,
1212 			   wide_int *must_be_nonzero)
1213 {
1214   if (!range_int_cst_p (vr))
1215     {
1216       *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
1217       *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
1218       return false;
1219     }
1220   wide_int_range_set_zero_nonzero_bits (TYPE_SIGN (expr_type),
1221 					wi::to_wide (vr->min ()),
1222 					wi::to_wide (vr->max ()),
1223 					*may_be_nonzero, *must_be_nonzero);
1224   return true;
1225 }
1226 
1227 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
1228    so that *VR0 U *VR1 == *AR.  Returns true if that is possible,
1229    false otherwise.  If *AR can be represented with a single range
1230    *VR1 will be VR_UNDEFINED.  */
1231 
1232 static bool
1233 ranges_from_anti_range (const value_range_base *ar,
1234 			value_range_base *vr0, value_range_base *vr1)
1235 {
1236   tree type = ar->type ();
1237 
1238   vr0->set_undefined ();
1239   vr1->set_undefined ();
1240 
1241   /* As a future improvement, we could handle ~[0, A] as: [-INF, -1] U
1242      [A+1, +INF].  Not sure if this helps in practice, though.  */
1243 
1244   if (ar->kind () != VR_ANTI_RANGE
1245       || TREE_CODE (ar->min ()) != INTEGER_CST
1246       || TREE_CODE (ar->max ()) != INTEGER_CST
1247       || !vrp_val_min (type)
1248       || !vrp_val_max (type))
1249     return false;
1250 
1251   if (tree_int_cst_lt (vrp_val_min (type), ar->min ()))
1252     vr0->set (VR_RANGE,
1253 	      vrp_val_min (type),
1254 	      wide_int_to_tree (type, wi::to_wide (ar->min ()) - 1));
1255   if (tree_int_cst_lt (ar->max (), vrp_val_max (type)))
1256     vr1->set (VR_RANGE,
1257 	      wide_int_to_tree (type, wi::to_wide (ar->max ()) + 1),
1258 	      vrp_val_max (type));
1259   if (vr0->undefined_p ())
1260     {
1261       *vr0 = *vr1;
1262       vr1->set_undefined ();
1263     }
1264 
1265   return !vr0->undefined_p ();
1266 }
1267 
1268 /* Extract the components of a value range into a pair of wide ints in
1269    [WMIN, WMAX].
1270 
1271    If the value range is anything but a VR_*RANGE of constants, the
1272    resulting wide ints are set to [-MIN, +MAX] for the type.  */
1273 
1274 static void inline
1275 extract_range_into_wide_ints (const value_range_base *vr,
1276 			      signop sign, unsigned prec,
1277 			      wide_int &wmin, wide_int &wmax)
1278 {
1279   gcc_assert (vr->kind () != VR_ANTI_RANGE || vr->symbolic_p ());
1280   if (range_int_cst_p (vr))
1281     {
1282       wmin = wi::to_wide (vr->min ());
1283       wmax = wi::to_wide (vr->max ());
1284     }
1285   else
1286     {
1287       wmin = wi::min_value (prec, sign);
1288       wmax = wi::max_value (prec, sign);
1289     }
1290 }
1291 
1292 /* Value range wrapper for wide_int_range_multiplicative_op:
1293 
1294      *VR = *VR0 .CODE. *VR1.  */
1295 
1296 static void
1297 extract_range_from_multiplicative_op (value_range_base *vr,
1298 				      enum tree_code code,
1299 				      const value_range_base *vr0,
1300 				      const value_range_base *vr1)
1301 {
1302   gcc_assert (code == MULT_EXPR
1303 	      || code == TRUNC_DIV_EXPR
1304 	      || code == FLOOR_DIV_EXPR
1305 	      || code == CEIL_DIV_EXPR
1306 	      || code == EXACT_DIV_EXPR
1307 	      || code == ROUND_DIV_EXPR
1308 	      || code == RSHIFT_EXPR
1309 	      || code == LSHIFT_EXPR);
1310   gcc_assert (vr0->kind () == VR_RANGE
1311 	      && vr0->kind () == vr1->kind ());
1312 
1313   tree type = vr0->type ();
1314   wide_int res_lb, res_ub;
1315   wide_int vr0_lb = wi::to_wide (vr0->min ());
1316   wide_int vr0_ub = wi::to_wide (vr0->max ());
1317   wide_int vr1_lb = wi::to_wide (vr1->min ());
1318   wide_int vr1_ub = wi::to_wide (vr1->max ());
1319   bool overflow_undefined = TYPE_OVERFLOW_UNDEFINED (type);
1320   unsigned prec = TYPE_PRECISION (type);
1321 
1322   if (wide_int_range_multiplicative_op (res_lb, res_ub,
1323 					code, TYPE_SIGN (type), prec,
1324 					vr0_lb, vr0_ub, vr1_lb, vr1_ub,
1325 					overflow_undefined))
1326     vr->set_and_canonicalize (VR_RANGE,
1327 			      wide_int_to_tree (type, res_lb),
1328 			      wide_int_to_tree (type, res_ub));
1329   else
1330     vr->set_varying ();
1331 }
1332 
1333 /* If BOUND will include a symbolic bound, adjust it accordingly,
1334    otherwise leave it as is.
1335 
1336    CODE is the original operation that combined the bounds (PLUS_EXPR
1337    or MINUS_EXPR).
1338 
1339    TYPE is the type of the original operation.
1340 
1341    SYM_OPn is the symbolic for OPn if it has a symbolic.
1342 
1343    NEG_OPn is TRUE if the OPn was negated.  */
1344 
1345 static void
1346 adjust_symbolic_bound (tree &bound, enum tree_code code, tree type,
1347 		       tree sym_op0, tree sym_op1,
1348 		       bool neg_op0, bool neg_op1)
1349 {
1350   bool minus_p = (code == MINUS_EXPR);
1351   /* If the result bound is constant, we're done; otherwise, build the
1352      symbolic lower bound.  */
1353   if (sym_op0 == sym_op1)
1354     ;
1355   else if (sym_op0)
1356     bound = build_symbolic_expr (type, sym_op0,
1357 				 neg_op0, bound);
1358   else if (sym_op1)
1359     {
1360       /* We may not negate if that might introduce
1361 	 undefined overflow.  */
1362       if (!minus_p
1363 	  || neg_op1
1364 	  || TYPE_OVERFLOW_WRAPS (type))
1365 	bound = build_symbolic_expr (type, sym_op1,
1366 				     neg_op1 ^ minus_p, bound);
1367       else
1368 	bound = NULL_TREE;
1369     }
1370 }
1371 
1372 /* Combine OP1 and OP1, which are two parts of a bound, into one wide
1373    int bound according to CODE.  CODE is the operation combining the
1374    bound (either a PLUS_EXPR or a MINUS_EXPR).
1375 
1376    TYPE is the type of the combine operation.
1377 
1378    WI is the wide int to store the result.
1379 
1380    OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
1381    if over/underflow occurred.  */
1382 
1383 static void
1384 combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf,
1385 	       tree type, tree op0, tree op1)
1386 {
1387   bool minus_p = (code == MINUS_EXPR);
1388   const signop sgn = TYPE_SIGN (type);
1389   const unsigned int prec = TYPE_PRECISION (type);
1390 
1391   /* Combine the bounds, if any.  */
1392   if (op0 && op1)
1393     {
1394       if (minus_p)
1395 	wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1396       else
1397 	wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1398     }
1399   else if (op0)
1400     wi = wi::to_wide (op0);
1401   else if (op1)
1402     {
1403       if (minus_p)
1404 	wi = wi::neg (wi::to_wide (op1), &ovf);
1405       else
1406 	wi = wi::to_wide (op1);
1407     }
1408   else
1409     wi = wi::shwi (0, prec);
1410 }
1411 
1412 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and
1413    put the result in VR.
1414 
1415    TYPE is the type of the range.
1416 
1417    MIN_OVF and MAX_OVF indicate what type of overflow, if any,
1418    occurred while originally calculating WMIN or WMAX.  -1 indicates
1419    underflow.  +1 indicates overflow.  0 indicates neither.  */
1420 
1421 static void
1422 set_value_range_with_overflow (value_range_kind &kind, tree &min, tree &max,
1423 			       tree type,
1424 			       const wide_int &wmin, const wide_int &wmax,
1425 			       wi::overflow_type min_ovf,
1426 			       wi::overflow_type max_ovf)
1427 {
1428   const signop sgn = TYPE_SIGN (type);
1429   const unsigned int prec = TYPE_PRECISION (type);
1430 
1431   /* For one bit precision if max < min, then the swapped
1432      range covers all values.  */
1433   if (prec == 1 && wi::lt_p (wmax, wmin, sgn))
1434     {
1435       kind = VR_VARYING;
1436       return;
1437     }
1438 
1439   if (TYPE_OVERFLOW_WRAPS (type))
1440     {
1441       /* If overflow wraps, truncate the values and adjust the
1442 	 range kind and bounds appropriately.  */
1443       wide_int tmin = wide_int::from (wmin, prec, sgn);
1444       wide_int tmax = wide_int::from (wmax, prec, sgn);
1445       if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
1446 	{
1447 	  /* If the limits are swapped, we wrapped around and cover
1448 	     the entire range.  We have a similar check at the end of
1449 	     extract_range_from_binary_expr.  */
1450 	  if (wi::gt_p (tmin, tmax, sgn))
1451 	    kind = VR_VARYING;
1452 	  else
1453 	    {
1454 	      kind = VR_RANGE;
1455 	      /* No overflow or both overflow or underflow.  The
1456 		 range kind stays VR_RANGE.  */
1457 	      min = wide_int_to_tree (type, tmin);
1458 	      max = wide_int_to_tree (type, tmax);
1459 	    }
1460 	  return;
1461 	}
1462       else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
1463 	       || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
1464 	{
1465 	  /* Min underflow or max overflow.  The range kind
1466 	     changes to VR_ANTI_RANGE.  */
1467 	  bool covers = false;
1468 	  wide_int tem = tmin;
1469 	  tmin = tmax + 1;
1470 	  if (wi::cmp (tmin, tmax, sgn) < 0)
1471 	    covers = true;
1472 	  tmax = tem - 1;
1473 	  if (wi::cmp (tmax, tem, sgn) > 0)
1474 	    covers = true;
1475 	  /* If the anti-range would cover nothing, drop to varying.
1476 	     Likewise if the anti-range bounds are outside of the
1477 	     types values.  */
1478 	  if (covers || wi::cmp (tmin, tmax, sgn) > 0)
1479 	    {
1480 	      kind = VR_VARYING;
1481 	      return;
1482 	    }
1483 	  kind = VR_ANTI_RANGE;
1484 	  min = wide_int_to_tree (type, tmin);
1485 	  max = wide_int_to_tree (type, tmax);
1486 	  return;
1487 	}
1488       else
1489 	{
1490 	  /* Other underflow and/or overflow, drop to VR_VARYING.  */
1491 	  kind = VR_VARYING;
1492 	  return;
1493 	}
1494     }
1495   else
1496     {
1497       /* If overflow does not wrap, saturate to the types min/max
1498 	 value.  */
1499       wide_int type_min = wi::min_value (prec, sgn);
1500       wide_int type_max = wi::max_value (prec, sgn);
1501       kind = VR_RANGE;
1502       if (min_ovf == wi::OVF_UNDERFLOW)
1503 	min = wide_int_to_tree (type, type_min);
1504       else if (min_ovf == wi::OVF_OVERFLOW)
1505 	min = wide_int_to_tree (type, type_max);
1506       else
1507 	min = wide_int_to_tree (type, wmin);
1508 
1509       if (max_ovf == wi::OVF_UNDERFLOW)
1510 	max = wide_int_to_tree (type, type_min);
1511       else if (max_ovf == wi::OVF_OVERFLOW)
1512 	max = wide_int_to_tree (type, type_max);
1513       else
1514 	max = wide_int_to_tree (type, wmax);
1515     }
1516 }
1517 
1518 /* Extract range information from a binary operation CODE based on
1519    the ranges of each of its operands *VR0 and *VR1 with resulting
1520    type EXPR_TYPE.  The resulting range is stored in *VR.  */
1521 
1522 void
1523 extract_range_from_binary_expr (value_range_base *vr,
1524 				enum tree_code code, tree expr_type,
1525 				const value_range_base *vr0_,
1526 				const value_range_base *vr1_)
1527 {
1528   signop sign = TYPE_SIGN (expr_type);
1529   unsigned int prec = TYPE_PRECISION (expr_type);
1530   value_range_base vr0 = *vr0_, vr1 = *vr1_;
1531   value_range_base vrtem0, vrtem1;
1532   enum value_range_kind type;
1533   tree min = NULL_TREE, max = NULL_TREE;
1534   int cmp;
1535 
1536   if (!INTEGRAL_TYPE_P (expr_type)
1537       && !POINTER_TYPE_P (expr_type))
1538     {
1539       vr->set_varying ();
1540       return;
1541     }
1542 
1543   /* Not all binary expressions can be applied to ranges in a
1544      meaningful way.  Handle only arithmetic operations.  */
1545   if (code != PLUS_EXPR
1546       && code != MINUS_EXPR
1547       && code != POINTER_PLUS_EXPR
1548       && code != MULT_EXPR
1549       && code != TRUNC_DIV_EXPR
1550       && code != FLOOR_DIV_EXPR
1551       && code != CEIL_DIV_EXPR
1552       && code != EXACT_DIV_EXPR
1553       && code != ROUND_DIV_EXPR
1554       && code != TRUNC_MOD_EXPR
1555       && code != RSHIFT_EXPR
1556       && code != LSHIFT_EXPR
1557       && code != MIN_EXPR
1558       && code != MAX_EXPR
1559       && code != BIT_AND_EXPR
1560       && code != BIT_IOR_EXPR
1561       && code != BIT_XOR_EXPR)
1562     {
1563       vr->set_varying ();
1564       return;
1565     }
1566 
1567   /* If both ranges are UNDEFINED, so is the result.  */
1568   if (vr0.undefined_p () && vr1.undefined_p ())
1569     {
1570       vr->set_undefined ();
1571       return;
1572     }
1573   /* If one of the ranges is UNDEFINED drop it to VARYING for the following
1574      code.  At some point we may want to special-case operations that
1575      have UNDEFINED result for all or some value-ranges of the not UNDEFINED
1576      operand.  */
1577   else if (vr0.undefined_p ())
1578     vr0.set_varying ();
1579   else if (vr1.undefined_p ())
1580     vr1.set_varying ();
1581 
1582   /* We get imprecise results from ranges_from_anti_range when
1583      code is EXACT_DIV_EXPR.  We could mask out bits in the resulting
1584      range, but then we also need to hack up vrp_union.  It's just
1585      easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR.  */
1586   if (code == EXACT_DIV_EXPR && range_is_nonnull (&vr0))
1587     {
1588       vr->set_nonnull (expr_type);
1589       return;
1590     }
1591 
1592   /* Now canonicalize anti-ranges to ranges when they are not symbolic
1593      and express ~[] op X as ([]' op X) U ([]'' op X).  */
1594   if (vr0.kind () == VR_ANTI_RANGE
1595       && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
1596     {
1597       extract_range_from_binary_expr (vr, code, expr_type, &vrtem0, vr1_);
1598       if (!vrtem1.undefined_p ())
1599 	{
1600 	  value_range_base vrres;
1601 	  extract_range_from_binary_expr (&vrres, code, expr_type,
1602 					  &vrtem1, vr1_);
1603 	  vr->union_ (&vrres);
1604 	}
1605       return;
1606     }
1607   /* Likewise for X op ~[].  */
1608   if (vr1.kind () == VR_ANTI_RANGE
1609       && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
1610     {
1611       extract_range_from_binary_expr (vr, code, expr_type, vr0_, &vrtem0);
1612       if (!vrtem1.undefined_p ())
1613 	{
1614 	  value_range_base vrres;
1615 	  extract_range_from_binary_expr (&vrres, code, expr_type,
1616 					  vr0_, &vrtem1);
1617 	  vr->union_ (&vrres);
1618 	}
1619       return;
1620     }
1621 
1622   /* The type of the resulting value range defaults to VR0.TYPE.  */
1623   type = vr0.kind ();
1624 
1625   /* Refuse to operate on VARYING ranges, ranges of different kinds
1626      and symbolic ranges.  As an exception, we allow BIT_{AND,IOR}
1627      because we may be able to derive a useful range even if one of
1628      the operands is VR_VARYING or symbolic range.  Similarly for
1629      divisions, MIN/MAX and PLUS/MINUS.
1630 
1631      TODO, we may be able to derive anti-ranges in some cases.  */
1632   if (code != BIT_AND_EXPR
1633       && code != BIT_IOR_EXPR
1634       && code != TRUNC_DIV_EXPR
1635       && code != FLOOR_DIV_EXPR
1636       && code != CEIL_DIV_EXPR
1637       && code != EXACT_DIV_EXPR
1638       && code != ROUND_DIV_EXPR
1639       && code != TRUNC_MOD_EXPR
1640       && code != MIN_EXPR
1641       && code != MAX_EXPR
1642       && code != PLUS_EXPR
1643       && code != MINUS_EXPR
1644       && code != RSHIFT_EXPR
1645       && code != POINTER_PLUS_EXPR
1646       && (vr0.varying_p ()
1647 	  || vr1.varying_p ()
1648 	  || vr0.kind () != vr1.kind ()
1649 	  || vr0.symbolic_p ()
1650 	  || vr1.symbolic_p ()))
1651     {
1652       vr->set_varying ();
1653       return;
1654     }
1655 
1656   /* Now evaluate the expression to determine the new range.  */
1657   if (POINTER_TYPE_P (expr_type))
1658     {
1659       if (code == MIN_EXPR || code == MAX_EXPR)
1660 	{
1661 	  /* For MIN/MAX expressions with pointers, we only care about
1662 	     nullness, if both are non null, then the result is nonnull.
1663 	     If both are null, then the result is null. Otherwise they
1664 	     are varying.  */
1665 	  if (!range_includes_zero_p (&vr0) && !range_includes_zero_p (&vr1))
1666 	    vr->set_nonnull (expr_type);
1667 	  else if (range_is_null (&vr0) && range_is_null (&vr1))
1668 	    vr->set_null (expr_type);
1669 	  else
1670 	    vr->set_varying ();
1671 	}
1672       else if (code == POINTER_PLUS_EXPR)
1673 	{
1674 	  /* For pointer types, we are really only interested in asserting
1675 	     whether the expression evaluates to non-NULL.
1676 	     With -fno-delete-null-pointer-checks we need to be more
1677 	     conservative.  As some object might reside at address 0,
1678 	     then some offset could be added to it and the same offset
1679 	     subtracted again and the result would be NULL.
1680 	     E.g.
1681 	     static int a[12]; where &a[0] is NULL and
1682 	     ptr = &a[6];
1683 	     ptr -= 6;
1684 	     ptr will be NULL here, even when there is POINTER_PLUS_EXPR
1685 	     where the first range doesn't include zero and the second one
1686 	     doesn't either.  As the second operand is sizetype (unsigned),
1687 	     consider all ranges where the MSB could be set as possible
1688 	     subtractions where the result might be NULL.  */
1689 	  if ((!range_includes_zero_p (&vr0)
1690 	       || !range_includes_zero_p (&vr1))
1691 	      && !TYPE_OVERFLOW_WRAPS (expr_type)
1692 	      && (flag_delete_null_pointer_checks
1693 		  || (range_int_cst_p (&vr1)
1694 		      && !tree_int_cst_sign_bit (vr1.max ()))))
1695 	    vr->set_nonnull (expr_type);
1696 	  else if (range_is_null (&vr0) && range_is_null (&vr1))
1697 	    vr->set_null (expr_type);
1698 	  else
1699 	    vr->set_varying ();
1700 	}
1701       else if (code == BIT_AND_EXPR)
1702 	{
1703 	  /* For pointer types, we are really only interested in asserting
1704 	     whether the expression evaluates to non-NULL.  */
1705 	  if (range_is_null (&vr0) || range_is_null (&vr1))
1706 	    vr->set_null (expr_type);
1707 	  else
1708 	    vr->set_varying ();
1709 	}
1710       else
1711 	vr->set_varying ();
1712 
1713       return;
1714     }
1715 
1716   /* For integer ranges, apply the operation to each end of the
1717      range and see what we end up with.  */
1718   if (code == PLUS_EXPR || code == MINUS_EXPR)
1719     {
1720       /* This will normalize things such that calculating
1721 	 [0,0] - VR_VARYING is not dropped to varying, but is
1722 	 calculated as [MIN+1, MAX].  */
1723       if (vr0.varying_p ())
1724 	vr0.set (VR_RANGE, vrp_val_min (expr_type), vrp_val_max (expr_type));
1725       if (vr1.varying_p ())
1726 	vr1.set (VR_RANGE, vrp_val_min (expr_type), vrp_val_max (expr_type));
1727 
1728       const bool minus_p = (code == MINUS_EXPR);
1729       tree min_op0 = vr0.min ();
1730       tree min_op1 = minus_p ? vr1.max () : vr1.min ();
1731       tree max_op0 = vr0.max ();
1732       tree max_op1 = minus_p ? vr1.min () : vr1.max ();
1733       tree sym_min_op0 = NULL_TREE;
1734       tree sym_min_op1 = NULL_TREE;
1735       tree sym_max_op0 = NULL_TREE;
1736       tree sym_max_op1 = NULL_TREE;
1737       bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
1738 
1739       neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false;
1740 
1741       /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
1742 	 single-symbolic ranges, try to compute the precise resulting range,
1743 	 but only if we know that this resulting range will also be constant
1744 	 or single-symbolic.  */
1745       if (vr0.kind () == VR_RANGE && vr1.kind () == VR_RANGE
1746 	  && (TREE_CODE (min_op0) == INTEGER_CST
1747 	      || (sym_min_op0
1748 		  = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
1749 	  && (TREE_CODE (min_op1) == INTEGER_CST
1750 	      || (sym_min_op1
1751 		  = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
1752 	  && (!(sym_min_op0 && sym_min_op1)
1753 	      || (sym_min_op0 == sym_min_op1
1754 		  && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
1755 	  && (TREE_CODE (max_op0) == INTEGER_CST
1756 	      || (sym_max_op0
1757 		  = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
1758 	  && (TREE_CODE (max_op1) == INTEGER_CST
1759 	      || (sym_max_op1
1760 		  = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
1761 	  && (!(sym_max_op0 && sym_max_op1)
1762 	      || (sym_max_op0 == sym_max_op1
1763 		  && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
1764 	{
1765 	  wide_int wmin, wmax;
1766 	  wi::overflow_type min_ovf = wi::OVF_NONE;
1767 	  wi::overflow_type max_ovf = wi::OVF_NONE;
1768 
1769 	  /* Build the bounds.  */
1770 	  combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1);
1771 	  combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1);
1772 
1773 	  /* If the resulting range will be symbolic, we need to eliminate any
1774 	     explicit or implicit overflow introduced in the above computation
1775 	     because compare_values could make an incorrect use of it.  That's
1776 	     why we require one of the ranges to be a singleton.  */
1777 	  if ((sym_min_op0 != sym_min_op1 || sym_max_op0 != sym_max_op1)
1778 	      && ((bool)min_ovf || (bool)max_ovf
1779 		  || (min_op0 != max_op0 && min_op1 != max_op1)))
1780 	    {
1781 	      vr->set_varying ();
1782 	      return;
1783 	    }
1784 
1785 	  /* Adjust the range for possible overflow.  */
1786 	  set_value_range_with_overflow (type, min, max, expr_type,
1787 					 wmin, wmax, min_ovf, max_ovf);
1788 	  if (type == VR_VARYING)
1789 	    {
1790 	      vr->set_varying ();
1791 	      return;
1792 	    }
1793 
1794 	  /* Build the symbolic bounds if needed.  */
1795 	  adjust_symbolic_bound (min, code, expr_type,
1796 				 sym_min_op0, sym_min_op1,
1797 				 neg_min_op0, neg_min_op1);
1798 	  adjust_symbolic_bound (max, code, expr_type,
1799 				 sym_max_op0, sym_max_op1,
1800 				 neg_max_op0, neg_max_op1);
1801 	}
1802       else
1803 	{
1804 	  /* For other cases, for example if we have a PLUS_EXPR with two
1805 	     VR_ANTI_RANGEs, drop to VR_VARYING.  It would take more effort
1806 	     to compute a precise range for such a case.
1807 	     ???  General even mixed range kind operations can be expressed
1808 	     by for example transforming ~[3, 5] + [1, 2] to range-only
1809 	     operations and a union primitive:
1810 	       [-INF, 2] + [1, 2]  U  [5, +INF] + [1, 2]
1811 	           [-INF+1, 4]     U    [6, +INF(OVF)]
1812 	     though usually the union is not exactly representable with
1813 	     a single range or anti-range as the above is
1814 		 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
1815 	     but one could use a scheme similar to equivalences for this. */
1816 	  vr->set_varying ();
1817 	  return;
1818 	}
1819     }
1820   else if (code == MIN_EXPR
1821 	   || code == MAX_EXPR)
1822     {
1823       wide_int wmin, wmax;
1824       wide_int vr0_min, vr0_max;
1825       wide_int vr1_min, vr1_max;
1826       extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1827       extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1828       if (wide_int_range_min_max (wmin, wmax, code, sign, prec,
1829 				  vr0_min, vr0_max, vr1_min, vr1_max))
1830 	vr->set (VR_RANGE, wide_int_to_tree (expr_type, wmin),
1831 		 wide_int_to_tree (expr_type, wmax));
1832       else
1833 	vr->set_varying ();
1834       return;
1835     }
1836   else if (code == MULT_EXPR)
1837     {
1838       if (!range_int_cst_p (&vr0)
1839 	  || !range_int_cst_p (&vr1))
1840 	{
1841 	  vr->set_varying ();
1842 	  return;
1843 	}
1844       extract_range_from_multiplicative_op (vr, code, &vr0, &vr1);
1845       return;
1846     }
1847   else if (code == RSHIFT_EXPR
1848 	   || code == LSHIFT_EXPR)
1849     {
1850       if (range_int_cst_p (&vr1)
1851 	  && !wide_int_range_shift_undefined_p
1852 		(TYPE_SIGN (TREE_TYPE (vr1.min ())),
1853 		 prec,
1854 		 wi::to_wide (vr1.min ()),
1855 		 wi::to_wide (vr1.max ())))
1856 	{
1857 	  if (code == RSHIFT_EXPR)
1858 	    {
1859 	      /* Even if vr0 is VARYING or otherwise not usable, we can derive
1860 		 useful ranges just from the shift count.  E.g.
1861 		 x >> 63 for signed 64-bit x is always [-1, 0].  */
1862 	      if (vr0.kind () != VR_RANGE || vr0.symbolic_p ())
1863 		vr0.set (VR_RANGE, vrp_val_min (expr_type),
1864 			 vrp_val_max (expr_type));
1865 	      extract_range_from_multiplicative_op (vr, code, &vr0, &vr1);
1866 	      return;
1867 	    }
1868 	  else if (code == LSHIFT_EXPR
1869 		   && range_int_cst_p (&vr0))
1870 	    {
1871 	      wide_int res_lb, res_ub;
1872 	      if (wide_int_range_lshift (res_lb, res_ub, sign, prec,
1873 					 wi::to_wide (vr0.min ()),
1874 					 wi::to_wide (vr0.max ()),
1875 					 wi::to_wide (vr1.min ()),
1876 					 wi::to_wide (vr1.max ()),
1877 					 TYPE_OVERFLOW_UNDEFINED (expr_type)))
1878 		{
1879 		  min = wide_int_to_tree (expr_type, res_lb);
1880 		  max = wide_int_to_tree (expr_type, res_ub);
1881 		  vr->set_and_canonicalize (VR_RANGE, min, max);
1882 		  return;
1883 		}
1884 	    }
1885 	}
1886       vr->set_varying ();
1887       return;
1888     }
1889   else if (code == TRUNC_DIV_EXPR
1890 	   || code == FLOOR_DIV_EXPR
1891 	   || code == CEIL_DIV_EXPR
1892 	   || code == EXACT_DIV_EXPR
1893 	   || code == ROUND_DIV_EXPR)
1894     {
1895       wide_int dividend_min, dividend_max, divisor_min, divisor_max;
1896       wide_int wmin, wmax, extra_min, extra_max;
1897       bool extra_range_p;
1898 
1899       /* Special case explicit division by zero as undefined.  */
1900       if (range_is_null (&vr1))
1901 	{
1902 	  vr->set_undefined ();
1903 	  return;
1904 	}
1905 
1906       /* First, normalize ranges into constants we can handle.  Note
1907 	 that VR_ANTI_RANGE's of constants were already normalized
1908 	 before arriving here.
1909 
1910 	 NOTE: As a future improvement, we may be able to do better
1911 	 with mixed symbolic (anti-)ranges like [0, A].  See note in
1912 	 ranges_from_anti_range.  */
1913       extract_range_into_wide_ints (&vr0, sign, prec,
1914 				    dividend_min, dividend_max);
1915       extract_range_into_wide_ints (&vr1, sign, prec,
1916 				    divisor_min, divisor_max);
1917       if (!wide_int_range_div (wmin, wmax, code, sign, prec,
1918 			       dividend_min, dividend_max,
1919 			       divisor_min, divisor_max,
1920 			       TYPE_OVERFLOW_UNDEFINED (expr_type),
1921 			       extra_range_p, extra_min, extra_max))
1922 	{
1923 	  vr->set_varying ();
1924 	  return;
1925 	}
1926       vr->set (VR_RANGE, wide_int_to_tree (expr_type, wmin),
1927 	       wide_int_to_tree (expr_type, wmax));
1928       if (extra_range_p)
1929 	{
1930 	  value_range_base
1931 	    extra_range (VR_RANGE, wide_int_to_tree (expr_type, extra_min),
1932 			 wide_int_to_tree (expr_type, extra_max));
1933 	  vr->union_ (&extra_range);
1934 	}
1935       return;
1936     }
1937   else if (code == TRUNC_MOD_EXPR)
1938     {
1939       if (range_is_null (&vr1))
1940 	{
1941 	  vr->set_undefined ();
1942 	  return;
1943 	}
1944       wide_int wmin, wmax, tmp;
1945       wide_int vr0_min, vr0_max, vr1_min, vr1_max;
1946       extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1947       extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1948       wide_int_range_trunc_mod (wmin, wmax, sign, prec,
1949 				vr0_min, vr0_max, vr1_min, vr1_max);
1950       min = wide_int_to_tree (expr_type, wmin);
1951       max = wide_int_to_tree (expr_type, wmax);
1952       vr->set (VR_RANGE, min, max);
1953       return;
1954     }
1955   else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
1956     {
1957       wide_int may_be_nonzero0, may_be_nonzero1;
1958       wide_int must_be_nonzero0, must_be_nonzero1;
1959       wide_int wmin, wmax;
1960       wide_int vr0_min, vr0_max, vr1_min, vr1_max;
1961       vrp_set_zero_nonzero_bits (expr_type, &vr0,
1962 				 &may_be_nonzero0, &must_be_nonzero0);
1963       vrp_set_zero_nonzero_bits (expr_type, &vr1,
1964 				 &may_be_nonzero1, &must_be_nonzero1);
1965       extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1966       extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1967       if (code == BIT_AND_EXPR)
1968 	{
1969 	  if (wide_int_range_bit_and (wmin, wmax, sign, prec,
1970 				      vr0_min, vr0_max,
1971 				      vr1_min, vr1_max,
1972 				      must_be_nonzero0,
1973 				      may_be_nonzero0,
1974 				      must_be_nonzero1,
1975 				      may_be_nonzero1))
1976 	    {
1977 	      min = wide_int_to_tree (expr_type, wmin);
1978 	      max = wide_int_to_tree (expr_type, wmax);
1979 	      vr->set (VR_RANGE, min, max);
1980 	    }
1981 	  else
1982 	    vr->set_varying ();
1983 	  return;
1984 	}
1985       else if (code == BIT_IOR_EXPR)
1986 	{
1987 	  if (wide_int_range_bit_ior (wmin, wmax, sign,
1988 				      vr0_min, vr0_max,
1989 				      vr1_min, vr1_max,
1990 				      must_be_nonzero0,
1991 				      may_be_nonzero0,
1992 				      must_be_nonzero1,
1993 				      may_be_nonzero1))
1994 	    {
1995 	      min = wide_int_to_tree (expr_type, wmin);
1996 	      max = wide_int_to_tree (expr_type, wmax);
1997 	      vr->set (VR_RANGE, min, max);
1998 	    }
1999 	  else
2000 	    vr->set_varying ();
2001 	  return;
2002 	}
2003       else if (code == BIT_XOR_EXPR)
2004 	{
2005 	  if (wide_int_range_bit_xor (wmin, wmax, sign, prec,
2006 				      must_be_nonzero0,
2007 				      may_be_nonzero0,
2008 				      must_be_nonzero1,
2009 				      may_be_nonzero1))
2010 	    {
2011 	      min = wide_int_to_tree (expr_type, wmin);
2012 	      max = wide_int_to_tree (expr_type, wmax);
2013 	      vr->set (VR_RANGE, min, max);
2014 	    }
2015 	  else
2016 	    vr->set_varying ();
2017 	  return;
2018 	}
2019     }
2020   else
2021     gcc_unreachable ();
2022 
2023   /* If either MIN or MAX overflowed, then set the resulting range to
2024      VARYING.  */
2025   if (min == NULL_TREE
2026       || TREE_OVERFLOW_P (min)
2027       || max == NULL_TREE
2028       || TREE_OVERFLOW_P (max))
2029     {
2030       vr->set_varying ();
2031       return;
2032     }
2033 
2034   /* We punt for [-INF, +INF].
2035      We learn nothing when we have INF on both sides.
2036      Note that we do accept [-INF, -INF] and [+INF, +INF].  */
2037   if (vrp_val_is_min (min) && vrp_val_is_max (max))
2038     {
2039       vr->set_varying ();
2040       return;
2041     }
2042 
2043   cmp = compare_values (min, max);
2044   if (cmp == -2 || cmp == 1)
2045     {
2046       /* If the new range has its limits swapped around (MIN > MAX),
2047 	 then the operation caused one of them to wrap around, mark
2048 	 the new range VARYING.  */
2049       vr->set_varying ();
2050     }
2051   else
2052     vr->set (type, min, max);
2053 }
2054 
2055 /* Extract range information from a unary operation CODE based on
2056    the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
2057    The resulting range is stored in *VR.  */
2058 
2059 void
2060 extract_range_from_unary_expr (value_range_base *vr,
2061 			       enum tree_code code, tree type,
2062 			       const value_range_base *vr0_, tree op0_type)
2063 {
2064   signop sign = TYPE_SIGN (type);
2065   unsigned int prec = TYPE_PRECISION (type);
2066   value_range_base vr0 = *vr0_;
2067   value_range_base vrtem0, vrtem1;
2068 
2069   /* VRP only operates on integral and pointer types.  */
2070   if (!(INTEGRAL_TYPE_P (op0_type)
2071 	|| POINTER_TYPE_P (op0_type))
2072       || !(INTEGRAL_TYPE_P (type)
2073 	   || POINTER_TYPE_P (type)))
2074     {
2075       vr->set_varying ();
2076       return;
2077     }
2078 
2079   /* If VR0 is UNDEFINED, so is the result.  */
2080   if (vr0.undefined_p ())
2081     {
2082       vr->set_undefined ();
2083       return;
2084     }
2085 
2086   /* Handle operations that we express in terms of others.  */
2087   if (code == PAREN_EXPR)
2088     {
2089       /* PAREN_EXPR and OBJ_TYPE_REF are simple copies.  */
2090       *vr = vr0;
2091       return;
2092     }
2093   else if (code == NEGATE_EXPR)
2094     {
2095       /* -X is simply 0 - X, so re-use existing code that also handles
2096          anti-ranges fine.  */
2097       value_range_base zero;
2098       zero.set (build_int_cst (type, 0));
2099       extract_range_from_binary_expr (vr, MINUS_EXPR, type, &zero, &vr0);
2100       return;
2101     }
2102   else if (code == BIT_NOT_EXPR)
2103     {
2104       /* ~X is simply -1 - X, so re-use existing code that also handles
2105          anti-ranges fine.  */
2106       value_range_base minusone;
2107       minusone.set (build_int_cst (type, -1));
2108       extract_range_from_binary_expr (vr, MINUS_EXPR, type, &minusone, &vr0);
2109       return;
2110     }
2111 
2112   /* Now canonicalize anti-ranges to ranges when they are not symbolic
2113      and express op ~[]  as (op []') U (op []'').  */
2114   if (vr0.kind () == VR_ANTI_RANGE
2115       && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
2116     {
2117       extract_range_from_unary_expr (vr, code, type, &vrtem0, op0_type);
2118       if (!vrtem1.undefined_p ())
2119 	{
2120 	  value_range_base vrres;
2121 	  extract_range_from_unary_expr (&vrres, code, type,
2122 					 &vrtem1, op0_type);
2123 	  vr->union_ (&vrres);
2124 	}
2125       return;
2126     }
2127 
2128   if (CONVERT_EXPR_CODE_P (code))
2129     {
2130       tree inner_type = op0_type;
2131       tree outer_type = type;
2132 
2133       /* If the expression involves a pointer, we are only interested in
2134 	 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]).
2135 
2136 	 This may lose precision when converting (char *)~[0,2] to
2137 	 int, because we'll forget that the pointer can also not be 1
2138 	 or 2.  In practice we don't care, as this is some idiot
2139 	 storing a magic constant to a pointer.  */
2140       if (POINTER_TYPE_P (type) || POINTER_TYPE_P (op0_type))
2141 	{
2142 	  if (!range_includes_zero_p (&vr0))
2143 	    vr->set_nonnull (type);
2144 	  else if (range_is_null (&vr0))
2145 	    vr->set_null (type);
2146 	  else
2147 	    vr->set_varying ();
2148 	  return;
2149 	}
2150 
2151       /* The POINTER_TYPE_P code above will have dealt with all
2152 	 pointer anti-ranges.  Any remaining anti-ranges at this point
2153 	 will be integer conversions from SSA names that will be
2154 	 normalized into VARYING.  For instance: ~[x_55, x_55].  */
2155       gcc_assert (vr0.kind () != VR_ANTI_RANGE
2156 		  || TREE_CODE (vr0.min ()) != INTEGER_CST);
2157 
2158       /* NOTES: Previously we were returning VARYING for all symbolics, but
2159 	 we can do better by treating them as [-MIN, +MAX].  For
2160 	 example, converting [SYM, SYM] from INT to LONG UNSIGNED,
2161 	 we can return: ~[0x8000000, 0xffffffff7fffffff].
2162 
2163 	 We were also failing to convert ~[0,0] from char* to unsigned,
2164 	 instead choosing to return VR_VARYING.  Now we return ~[0,0].  */
2165       wide_int vr0_min, vr0_max, wmin, wmax;
2166       signop inner_sign = TYPE_SIGN (inner_type);
2167       signop outer_sign = TYPE_SIGN (outer_type);
2168       unsigned inner_prec = TYPE_PRECISION (inner_type);
2169       unsigned outer_prec = TYPE_PRECISION (outer_type);
2170       extract_range_into_wide_ints (&vr0, inner_sign, inner_prec,
2171 				    vr0_min, vr0_max);
2172       if (wide_int_range_convert (wmin, wmax,
2173 				  inner_sign, inner_prec,
2174 				  outer_sign, outer_prec,
2175 				  vr0_min, vr0_max))
2176 	{
2177 	  tree min = wide_int_to_tree (outer_type, wmin);
2178 	  tree max = wide_int_to_tree (outer_type, wmax);
2179 	  vr->set_and_canonicalize (VR_RANGE, min, max);
2180 	}
2181       else
2182 	vr->set_varying ();
2183       return;
2184     }
2185   else if (code == ABS_EXPR)
2186     {
2187       wide_int wmin, wmax;
2188       wide_int vr0_min, vr0_max;
2189       extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
2190       if (wide_int_range_abs (wmin, wmax, sign, prec, vr0_min, vr0_max,
2191 			      TYPE_OVERFLOW_UNDEFINED (type)))
2192 	vr->set (VR_RANGE, wide_int_to_tree (type, wmin),
2193 		 wide_int_to_tree (type, wmax));
2194       else
2195 	vr->set_varying ();
2196       return;
2197     }
2198   else if (code == ABSU_EXPR)
2199     {
2200       wide_int wmin, wmax;
2201       wide_int vr0_min, vr0_max;
2202       extract_range_into_wide_ints (&vr0, SIGNED, prec, vr0_min, vr0_max);
2203       wide_int_range_absu (wmin, wmax, prec, vr0_min, vr0_max);
2204       vr->set (VR_RANGE, wide_int_to_tree (type, wmin),
2205 	       wide_int_to_tree (type, wmax));
2206       return;
2207     }
2208 
2209   /* For unhandled operations fall back to varying.  */
2210   vr->set_varying ();
2211   return;
2212 }
2213 
2214 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2215    create a new SSA name N and return the assertion assignment
2216    'N = ASSERT_EXPR <V, V OP W>'.  */
2217 
2218 static gimple *
2219 build_assert_expr_for (tree cond, tree v)
2220 {
2221   tree a;
2222   gassign *assertion;
2223 
2224   gcc_assert (TREE_CODE (v) == SSA_NAME
2225 	      && COMPARISON_CLASS_P (cond));
2226 
2227   a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
2228   assertion = gimple_build_assign (NULL_TREE, a);
2229 
2230   /* The new ASSERT_EXPR, creates a new SSA name that replaces the
2231      operand of the ASSERT_EXPR.  Create it so the new name and the old one
2232      are registered in the replacement table so that we can fix the SSA web
2233      after adding all the ASSERT_EXPRs.  */
2234   tree new_def = create_new_def_for (v, assertion, NULL);
2235   /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
2236      given we have to be able to fully propagate those out to re-create
2237      valid SSA when removing the asserts.  */
2238   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
2239     SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
2240 
2241   return assertion;
2242 }
2243 
2244 
2245 /* Return false if EXPR is a predicate expression involving floating
2246    point values.  */
2247 
2248 static inline bool
2249 fp_predicate (gimple *stmt)
2250 {
2251   GIMPLE_CHECK (stmt, GIMPLE_COND);
2252 
2253   return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
2254 }
2255 
2256 /* If the range of values taken by OP can be inferred after STMT executes,
2257    return the comparison code (COMP_CODE_P) and value (VAL_P) that
2258    describes the inferred range.  Return true if a range could be
2259    inferred.  */
2260 
2261 bool
2262 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
2263 {
2264   *val_p = NULL_TREE;
2265   *comp_code_p = ERROR_MARK;
2266 
2267   /* Do not attempt to infer anything in names that flow through
2268      abnormal edges.  */
2269   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
2270     return false;
2271 
2272   /* If STMT is the last statement of a basic block with no normal
2273      successors, there is no point inferring anything about any of its
2274      operands.  We would not be able to find a proper insertion point
2275      for the assertion, anyway.  */
2276   if (stmt_ends_bb_p (stmt))
2277     {
2278       edge_iterator ei;
2279       edge e;
2280 
2281       FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
2282 	if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
2283 	  break;
2284       if (e == NULL)
2285 	return false;
2286     }
2287 
2288   if (infer_nonnull_range (stmt, op))
2289     {
2290       *val_p = build_int_cst (TREE_TYPE (op), 0);
2291       *comp_code_p = NE_EXPR;
2292       return true;
2293     }
2294 
2295   return false;
2296 }
2297 
2298 
2299 void dump_asserts_for (FILE *, tree);
2300 void debug_asserts_for (tree);
2301 void dump_all_asserts (FILE *);
2302 void debug_all_asserts (void);
2303 
2304 /* Dump all the registered assertions for NAME to FILE.  */
2305 
2306 void
2307 dump_asserts_for (FILE *file, tree name)
2308 {
2309   assert_locus *loc;
2310 
2311   fprintf (file, "Assertions to be inserted for ");
2312   print_generic_expr (file, name);
2313   fprintf (file, "\n");
2314 
2315   loc = asserts_for[SSA_NAME_VERSION (name)];
2316   while (loc)
2317     {
2318       fprintf (file, "\t");
2319       print_gimple_stmt (file, gsi_stmt (loc->si), 0);
2320       fprintf (file, "\n\tBB #%d", loc->bb->index);
2321       if (loc->e)
2322 	{
2323 	  fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
2324 	           loc->e->dest->index);
2325 	  dump_edge_info (file, loc->e, dump_flags, 0);
2326 	}
2327       fprintf (file, "\n\tPREDICATE: ");
2328       print_generic_expr (file, loc->expr);
2329       fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
2330       print_generic_expr (file, loc->val);
2331       fprintf (file, "\n\n");
2332       loc = loc->next;
2333     }
2334 
2335   fprintf (file, "\n");
2336 }
2337 
2338 
2339 /* Dump all the registered assertions for NAME to stderr.  */
2340 
2341 DEBUG_FUNCTION void
2342 debug_asserts_for (tree name)
2343 {
2344   dump_asserts_for (stderr, name);
2345 }
2346 
2347 
2348 /* Dump all the registered assertions for all the names to FILE.  */
2349 
2350 void
2351 dump_all_asserts (FILE *file)
2352 {
2353   unsigned i;
2354   bitmap_iterator bi;
2355 
2356   fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
2357   EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
2358     dump_asserts_for (file, ssa_name (i));
2359   fprintf (file, "\n");
2360 }
2361 
2362 
2363 /* Dump all the registered assertions for all the names to stderr.  */
2364 
2365 DEBUG_FUNCTION void
2366 debug_all_asserts (void)
2367 {
2368   dump_all_asserts (stderr);
2369 }
2370 
2371 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS.  */
2372 
2373 static void
2374 add_assert_info (vec<assert_info> &asserts,
2375 		 tree name, tree expr, enum tree_code comp_code, tree val)
2376 {
2377   assert_info info;
2378   info.comp_code = comp_code;
2379   info.name = name;
2380   if (TREE_OVERFLOW_P (val))
2381     val = drop_tree_overflow (val);
2382   info.val = val;
2383   info.expr = expr;
2384   asserts.safe_push (info);
2385   if (dump_enabled_p ())
2386     dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
2387 		 "Adding assert for %T from %T %s %T\n",
2388 		 name, expr, op_symbol_code (comp_code), val);
2389 }
2390 
2391 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2392    'EXPR COMP_CODE VAL' at a location that dominates block BB or
2393    E->DEST, then register this location as a possible insertion point
2394    for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2395 
2396    BB, E and SI provide the exact insertion point for the new
2397    ASSERT_EXPR.  If BB is NULL, then the ASSERT_EXPR is to be inserted
2398    on edge E.  Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2399    BB.  If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2400    must not be NULL.  */
2401 
2402 static void
2403 register_new_assert_for (tree name, tree expr,
2404 			 enum tree_code comp_code,
2405 			 tree val,
2406 			 basic_block bb,
2407 			 edge e,
2408 			 gimple_stmt_iterator si)
2409 {
2410   assert_locus *n, *loc, *last_loc;
2411   basic_block dest_bb;
2412 
2413   gcc_checking_assert (bb == NULL || e == NULL);
2414 
2415   if (e == NULL)
2416     gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
2417 			 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
2418 
2419   /* Never build an assert comparing against an integer constant with
2420      TREE_OVERFLOW set.  This confuses our undefined overflow warning
2421      machinery.  */
2422   if (TREE_OVERFLOW_P (val))
2423     val = drop_tree_overflow (val);
2424 
2425   /* The new assertion A will be inserted at BB or E.  We need to
2426      determine if the new location is dominated by a previously
2427      registered location for A.  If we are doing an edge insertion,
2428      assume that A will be inserted at E->DEST.  Note that this is not
2429      necessarily true.
2430 
2431      If E is a critical edge, it will be split.  But even if E is
2432      split, the new block will dominate the same set of blocks that
2433      E->DEST dominates.
2434 
2435      The reverse, however, is not true, blocks dominated by E->DEST
2436      will not be dominated by the new block created to split E.  So,
2437      if the insertion location is on a critical edge, we will not use
2438      the new location to move another assertion previously registered
2439      at a block dominated by E->DEST.  */
2440   dest_bb = (bb) ? bb : e->dest;
2441 
2442   /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2443      VAL at a block dominating DEST_BB, then we don't need to insert a new
2444      one.  Similarly, if the same assertion already exists at a block
2445      dominated by DEST_BB and the new location is not on a critical
2446      edge, then update the existing location for the assertion (i.e.,
2447      move the assertion up in the dominance tree).
2448 
2449      Note, this is implemented as a simple linked list because there
2450      should not be more than a handful of assertions registered per
2451      name.  If this becomes a performance problem, a table hashed by
2452      COMP_CODE and VAL could be implemented.  */
2453   loc = asserts_for[SSA_NAME_VERSION (name)];
2454   last_loc = loc;
2455   while (loc)
2456     {
2457       if (loc->comp_code == comp_code
2458 	  && (loc->val == val
2459 	      || operand_equal_p (loc->val, val, 0))
2460 	  && (loc->expr == expr
2461 	      || operand_equal_p (loc->expr, expr, 0)))
2462 	{
2463 	  /* If E is not a critical edge and DEST_BB
2464 	     dominates the existing location for the assertion, move
2465 	     the assertion up in the dominance tree by updating its
2466 	     location information.  */
2467 	  if ((e == NULL || !EDGE_CRITICAL_P (e))
2468 	      && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
2469 	    {
2470 	      loc->bb = dest_bb;
2471 	      loc->e = e;
2472 	      loc->si = si;
2473 	      return;
2474 	    }
2475 	}
2476 
2477       /* Update the last node of the list and move to the next one.  */
2478       last_loc = loc;
2479       loc = loc->next;
2480     }
2481 
2482   /* If we didn't find an assertion already registered for
2483      NAME COMP_CODE VAL, add a new one at the end of the list of
2484      assertions associated with NAME.  */
2485   n = XNEW (struct assert_locus);
2486   n->bb = dest_bb;
2487   n->e = e;
2488   n->si = si;
2489   n->comp_code = comp_code;
2490   n->val = val;
2491   n->expr = expr;
2492   n->next = NULL;
2493 
2494   if (last_loc)
2495     last_loc->next = n;
2496   else
2497     asserts_for[SSA_NAME_VERSION (name)] = n;
2498 
2499   bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
2500 }
2501 
2502 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
2503    Extract a suitable test code and value and store them into *CODE_P and
2504    *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
2505 
2506    If no extraction was possible, return FALSE, otherwise return TRUE.
2507 
2508    If INVERT is true, then we invert the result stored into *CODE_P.  */
2509 
2510 static bool
2511 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
2512 					 tree cond_op0, tree cond_op1,
2513 					 bool invert, enum tree_code *code_p,
2514 					 tree *val_p)
2515 {
2516   enum tree_code comp_code;
2517   tree val;
2518 
2519   /* Otherwise, we have a comparison of the form NAME COMP VAL
2520      or VAL COMP NAME.  */
2521   if (name == cond_op1)
2522     {
2523       /* If the predicate is of the form VAL COMP NAME, flip
2524 	 COMP around because we need to register NAME as the
2525 	 first operand in the predicate.  */
2526       comp_code = swap_tree_comparison (cond_code);
2527       val = cond_op0;
2528     }
2529   else if (name == cond_op0)
2530     {
2531       /* The comparison is of the form NAME COMP VAL, so the
2532 	 comparison code remains unchanged.  */
2533       comp_code = cond_code;
2534       val = cond_op1;
2535     }
2536   else
2537     gcc_unreachable ();
2538 
2539   /* Invert the comparison code as necessary.  */
2540   if (invert)
2541     comp_code = invert_tree_comparison (comp_code, 0);
2542 
2543   /* VRP only handles integral and pointer types.  */
2544   if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
2545       && ! POINTER_TYPE_P (TREE_TYPE (val)))
2546     return false;
2547 
2548   /* Do not register always-false predicates.
2549      FIXME:  this works around a limitation in fold() when dealing with
2550      enumerations.  Given 'enum { N1, N2 } x;', fold will not
2551      fold 'if (x > N2)' to 'if (0)'.  */
2552   if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
2553       && INTEGRAL_TYPE_P (TREE_TYPE (val)))
2554     {
2555       tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
2556       tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
2557 
2558       if (comp_code == GT_EXPR
2559 	  && (!max
2560 	      || compare_values (val, max) == 0))
2561 	return false;
2562 
2563       if (comp_code == LT_EXPR
2564 	  && (!min
2565 	      || compare_values (val, min) == 0))
2566 	return false;
2567     }
2568   *code_p = comp_code;
2569   *val_p = val;
2570   return true;
2571 }
2572 
2573 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
2574    (otherwise return VAL).  VAL and MASK must be zero-extended for
2575    precision PREC.  If SGNBIT is non-zero, first xor VAL with SGNBIT
2576    (to transform signed values into unsigned) and at the end xor
2577    SGNBIT back.  */
2578 
2579 static wide_int
2580 masked_increment (const wide_int &val_in, const wide_int &mask,
2581 		  const wide_int &sgnbit, unsigned int prec)
2582 {
2583   wide_int bit = wi::one (prec), res;
2584   unsigned int i;
2585 
2586   wide_int val = val_in ^ sgnbit;
2587   for (i = 0; i < prec; i++, bit += bit)
2588     {
2589       res = mask;
2590       if ((res & bit) == 0)
2591 	continue;
2592       res = bit - 1;
2593       res = wi::bit_and_not (val + bit, res);
2594       res &= mask;
2595       if (wi::gtu_p (res, val))
2596 	return res ^ sgnbit;
2597     }
2598   return val ^ sgnbit;
2599 }
2600 
2601 /* Helper for overflow_comparison_p
2602 
2603    OP0 CODE OP1 is a comparison.  Examine the comparison and potentially
2604    OP1's defining statement to see if it ultimately has the form
2605    OP0 CODE (OP0 PLUS INTEGER_CST)
2606 
2607    If so, return TRUE indicating this is an overflow test and store into
2608    *NEW_CST an updated constant that can be used in a narrowed range test.
2609 
2610    REVERSED indicates if the comparison was originally:
2611 
2612    OP1 CODE' OP0.
2613 
2614    This affects how we build the updated constant.  */
2615 
2616 static bool
2617 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
2618 		         bool follow_assert_exprs, bool reversed, tree *new_cst)
2619 {
2620   /* See if this is a relational operation between two SSA_NAMES with
2621      unsigned, overflow wrapping values.  If so, check it more deeply.  */
2622   if ((code == LT_EXPR || code == LE_EXPR
2623        || code == GE_EXPR || code == GT_EXPR)
2624       && TREE_CODE (op0) == SSA_NAME
2625       && TREE_CODE (op1) == SSA_NAME
2626       && INTEGRAL_TYPE_P (TREE_TYPE (op0))
2627       && TYPE_UNSIGNED (TREE_TYPE (op0))
2628       && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
2629     {
2630       gimple *op1_def = SSA_NAME_DEF_STMT (op1);
2631 
2632       /* If requested, follow any ASSERT_EXPRs backwards for OP1.  */
2633       if (follow_assert_exprs)
2634 	{
2635 	  while (gimple_assign_single_p (op1_def)
2636 		 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
2637 	    {
2638 	      op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
2639 	      if (TREE_CODE (op1) != SSA_NAME)
2640 		break;
2641 	      op1_def = SSA_NAME_DEF_STMT (op1);
2642 	    }
2643 	}
2644 
2645       /* Now look at the defining statement of OP1 to see if it adds
2646 	 or subtracts a nonzero constant from another operand.  */
2647       if (op1_def
2648 	  && is_gimple_assign (op1_def)
2649 	  && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
2650 	  && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
2651 	  && !integer_zerop (gimple_assign_rhs2 (op1_def)))
2652 	{
2653 	  tree target = gimple_assign_rhs1 (op1_def);
2654 
2655 	  /* If requested, follow ASSERT_EXPRs backwards for op0 looking
2656 	     for one where TARGET appears on the RHS.  */
2657 	  if (follow_assert_exprs)
2658 	    {
2659 	      /* Now see if that "other operand" is op0, following the chain
2660 		 of ASSERT_EXPRs if necessary.  */
2661 	      gimple *op0_def = SSA_NAME_DEF_STMT (op0);
2662 	      while (op0 != target
2663 		     && gimple_assign_single_p (op0_def)
2664 		     && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
2665 		{
2666 		  op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
2667 		  if (TREE_CODE (op0) != SSA_NAME)
2668 		    break;
2669 		  op0_def = SSA_NAME_DEF_STMT (op0);
2670 		}
2671 	    }
2672 
2673 	  /* If we did not find our target SSA_NAME, then this is not
2674 	     an overflow test.  */
2675 	  if (op0 != target)
2676 	    return false;
2677 
2678 	  tree type = TREE_TYPE (op0);
2679 	  wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
2680 	  tree inc = gimple_assign_rhs2 (op1_def);
2681 	  if (reversed)
2682 	    *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
2683 	  else
2684 	    *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
2685 	  return true;
2686 	}
2687     }
2688   return false;
2689 }
2690 
2691 /* OP0 CODE OP1 is a comparison.  Examine the comparison and potentially
2692    OP1's defining statement to see if it ultimately has the form
2693    OP0 CODE (OP0 PLUS INTEGER_CST)
2694 
2695    If so, return TRUE indicating this is an overflow test and store into
2696    *NEW_CST an updated constant that can be used in a narrowed range test.
2697 
2698    These statements are left as-is in the IL to facilitate discovery of
2699    {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline.  But
2700    the alternate range representation is often useful within VRP.  */
2701 
2702 bool
2703 overflow_comparison_p (tree_code code, tree name, tree val,
2704 		       bool use_equiv_p, tree *new_cst)
2705 {
2706   if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
2707     return true;
2708   return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
2709 				  use_equiv_p, true, new_cst);
2710 }
2711 
2712 
2713 /* Try to register an edge assertion for SSA name NAME on edge E for
2714    the condition COND contributing to the conditional jump pointed to by BSI.
2715    Invert the condition COND if INVERT is true.  */
2716 
2717 static void
2718 register_edge_assert_for_2 (tree name, edge e,
2719 			    enum tree_code cond_code,
2720 			    tree cond_op0, tree cond_op1, bool invert,
2721 			    vec<assert_info> &asserts)
2722 {
2723   tree val;
2724   enum tree_code comp_code;
2725 
2726   if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
2727 						cond_op0,
2728 						cond_op1,
2729 						invert, &comp_code, &val))
2730     return;
2731 
2732   /* Queue the assert.  */
2733   tree x;
2734   if (overflow_comparison_p (comp_code, name, val, false, &x))
2735     {
2736       enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
2737 				 ? GT_EXPR : LE_EXPR);
2738       add_assert_info (asserts, name, name, new_code, x);
2739     }
2740   add_assert_info (asserts, name, name, comp_code, val);
2741 
2742   /* In the case of NAME <= CST and NAME being defined as
2743      NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
2744      and NAME2 <= CST - CST2.  We can do the same for NAME > CST.
2745      This catches range and anti-range tests.  */
2746   if ((comp_code == LE_EXPR
2747        || comp_code == GT_EXPR)
2748       && TREE_CODE (val) == INTEGER_CST
2749       && TYPE_UNSIGNED (TREE_TYPE (val)))
2750     {
2751       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2752       tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
2753 
2754       /* Extract CST2 from the (optional) addition.  */
2755       if (is_gimple_assign (def_stmt)
2756 	  && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
2757 	{
2758 	  name2 = gimple_assign_rhs1 (def_stmt);
2759 	  cst2 = gimple_assign_rhs2 (def_stmt);
2760 	  if (TREE_CODE (name2) == SSA_NAME
2761 	      && TREE_CODE (cst2) == INTEGER_CST)
2762 	    def_stmt = SSA_NAME_DEF_STMT (name2);
2763 	}
2764 
2765       /* Extract NAME2 from the (optional) sign-changing cast.  */
2766       if (gimple_assign_cast_p (def_stmt))
2767 	{
2768 	  if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
2769 	      && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
2770 	      && (TYPE_PRECISION (gimple_expr_type (def_stmt))
2771 		  == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
2772 	    name3 = gimple_assign_rhs1 (def_stmt);
2773 	}
2774 
2775       /* If name3 is used later, create an ASSERT_EXPR for it.  */
2776       if (name3 != NULL_TREE
2777       	  && TREE_CODE (name3) == SSA_NAME
2778 	  && (cst2 == NULL_TREE
2779 	      || TREE_CODE (cst2) == INTEGER_CST)
2780 	  && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
2781 	{
2782 	  tree tmp;
2783 
2784 	  /* Build an expression for the range test.  */
2785 	  tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
2786 	  if (cst2 != NULL_TREE)
2787 	    tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2788 	  add_assert_info (asserts, name3, tmp, comp_code, val);
2789 	}
2790 
2791       /* If name2 is used later, create an ASSERT_EXPR for it.  */
2792       if (name2 != NULL_TREE
2793       	  && TREE_CODE (name2) == SSA_NAME
2794 	  && TREE_CODE (cst2) == INTEGER_CST
2795 	  && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
2796 	{
2797 	  tree tmp;
2798 
2799 	  /* Build an expression for the range test.  */
2800 	  tmp = name2;
2801 	  if (TREE_TYPE (name) != TREE_TYPE (name2))
2802 	    tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
2803 	  if (cst2 != NULL_TREE)
2804 	    tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2805 	  add_assert_info (asserts, name2, tmp, comp_code, val);
2806 	}
2807     }
2808 
2809   /* In the case of post-in/decrement tests like if (i++) ... and uses
2810      of the in/decremented value on the edge the extra name we want to
2811      assert for is not on the def chain of the name compared.  Instead
2812      it is in the set of use stmts.
2813      Similar cases happen for conversions that were simplified through
2814      fold_{sign_changed,widened}_comparison.  */
2815   if ((comp_code == NE_EXPR
2816        || comp_code == EQ_EXPR)
2817       && TREE_CODE (val) == INTEGER_CST)
2818     {
2819       imm_use_iterator ui;
2820       gimple *use_stmt;
2821       FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
2822 	{
2823 	  if (!is_gimple_assign (use_stmt))
2824 	    continue;
2825 
2826 	  /* Cut off to use-stmts that are dominating the predecessor.  */
2827 	  if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
2828 	    continue;
2829 
2830 	  tree name2 = gimple_assign_lhs (use_stmt);
2831 	  if (TREE_CODE (name2) != SSA_NAME)
2832 	    continue;
2833 
2834 	  enum tree_code code = gimple_assign_rhs_code (use_stmt);
2835 	  tree cst;
2836 	  if (code == PLUS_EXPR
2837 	      || code == MINUS_EXPR)
2838 	    {
2839 	      cst = gimple_assign_rhs2 (use_stmt);
2840 	      if (TREE_CODE (cst) != INTEGER_CST)
2841 		continue;
2842 	      cst = int_const_binop (code, val, cst);
2843 	    }
2844 	  else if (CONVERT_EXPR_CODE_P (code))
2845 	    {
2846 	      /* For truncating conversions we cannot record
2847 		 an inequality.  */
2848 	      if (comp_code == NE_EXPR
2849 		  && (TYPE_PRECISION (TREE_TYPE (name2))
2850 		      < TYPE_PRECISION (TREE_TYPE (name))))
2851 		continue;
2852 	      cst = fold_convert (TREE_TYPE (name2), val);
2853 	    }
2854 	  else
2855 	    continue;
2856 
2857 	  if (TREE_OVERFLOW_P (cst))
2858 	    cst = drop_tree_overflow (cst);
2859 	  add_assert_info (asserts, name2, name2, comp_code, cst);
2860 	}
2861     }
2862 
2863   if (TREE_CODE_CLASS (comp_code) == tcc_comparison
2864       && TREE_CODE (val) == INTEGER_CST)
2865     {
2866       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2867       tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
2868       tree val2 = NULL_TREE;
2869       unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
2870       wide_int mask = wi::zero (prec);
2871       unsigned int nprec = prec;
2872       enum tree_code rhs_code = ERROR_MARK;
2873 
2874       if (is_gimple_assign (def_stmt))
2875 	rhs_code = gimple_assign_rhs_code (def_stmt);
2876 
2877       /* In the case of NAME != CST1 where NAME = A +- CST2 we can
2878          assert that A != CST1 -+ CST2.  */
2879       if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
2880 	  && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
2881 	{
2882 	  tree op0 = gimple_assign_rhs1 (def_stmt);
2883 	  tree op1 = gimple_assign_rhs2 (def_stmt);
2884 	  if (TREE_CODE (op0) == SSA_NAME
2885 	      && TREE_CODE (op1) == INTEGER_CST)
2886 	    {
2887 	      enum tree_code reverse_op = (rhs_code == PLUS_EXPR
2888 					   ? MINUS_EXPR : PLUS_EXPR);
2889 	      op1 = int_const_binop (reverse_op, val, op1);
2890 	      if (TREE_OVERFLOW (op1))
2891 		op1 = drop_tree_overflow (op1);
2892 	      add_assert_info (asserts, op0, op0, comp_code, op1);
2893 	    }
2894 	}
2895 
2896       /* Add asserts for NAME cmp CST and NAME being defined
2897 	 as NAME = (int) NAME2.  */
2898       if (!TYPE_UNSIGNED (TREE_TYPE (val))
2899 	  && (comp_code == LE_EXPR || comp_code == LT_EXPR
2900 	      || comp_code == GT_EXPR || comp_code == GE_EXPR)
2901 	  && gimple_assign_cast_p (def_stmt))
2902 	{
2903 	  name2 = gimple_assign_rhs1 (def_stmt);
2904 	  if (CONVERT_EXPR_CODE_P (rhs_code)
2905 	      && TREE_CODE (name2) == SSA_NAME
2906 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2907 	      && TYPE_UNSIGNED (TREE_TYPE (name2))
2908 	      && prec == TYPE_PRECISION (TREE_TYPE (name2))
2909 	      && (comp_code == LE_EXPR || comp_code == GT_EXPR
2910 		  || !tree_int_cst_equal (val,
2911 					  TYPE_MIN_VALUE (TREE_TYPE (val)))))
2912 	    {
2913 	      tree tmp, cst;
2914 	      enum tree_code new_comp_code = comp_code;
2915 
2916 	      cst = fold_convert (TREE_TYPE (name2),
2917 				  TYPE_MIN_VALUE (TREE_TYPE (val)));
2918 	      /* Build an expression for the range test.  */
2919 	      tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
2920 	      cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
2921 				 fold_convert (TREE_TYPE (name2), val));
2922 	      if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2923 		{
2924 		  new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
2925 		  cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
2926 				     build_int_cst (TREE_TYPE (name2), 1));
2927 		}
2928 	      add_assert_info (asserts, name2, tmp, new_comp_code, cst);
2929 	    }
2930 	}
2931 
2932       /* Add asserts for NAME cmp CST and NAME being defined as
2933 	 NAME = NAME2 >> CST2.
2934 
2935 	 Extract CST2 from the right shift.  */
2936       if (rhs_code == RSHIFT_EXPR)
2937 	{
2938 	  name2 = gimple_assign_rhs1 (def_stmt);
2939 	  cst2 = gimple_assign_rhs2 (def_stmt);
2940 	  if (TREE_CODE (name2) == SSA_NAME
2941 	      && tree_fits_uhwi_p (cst2)
2942 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2943 	      && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
2944 	      && type_has_mode_precision_p (TREE_TYPE (val)))
2945 	    {
2946 	      mask = wi::mask (tree_to_uhwi (cst2), false, prec);
2947 	      val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
2948 	    }
2949 	}
2950       if (val2 != NULL_TREE
2951 	  && TREE_CODE (val2) == INTEGER_CST
2952 	  && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
2953 					    TREE_TYPE (val),
2954 					    val2, cst2), val))
2955 	{
2956 	  enum tree_code new_comp_code = comp_code;
2957 	  tree tmp, new_val;
2958 
2959 	  tmp = name2;
2960 	  if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
2961 	    {
2962 	      if (!TYPE_UNSIGNED (TREE_TYPE (val)))
2963 		{
2964 		  tree type = build_nonstandard_integer_type (prec, 1);
2965 		  tmp = build1 (NOP_EXPR, type, name2);
2966 		  val2 = fold_convert (type, val2);
2967 		}
2968 	      tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
2969 	      new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
2970 	      new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
2971 	    }
2972 	  else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2973 	    {
2974 	      wide_int minval
2975 		= wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2976 	      new_val = val2;
2977 	      if (minval == wi::to_wide (new_val))
2978 		new_val = NULL_TREE;
2979 	    }
2980 	  else
2981 	    {
2982 	      wide_int maxval
2983 		= wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2984 	      mask |= wi::to_wide (val2);
2985 	      if (wi::eq_p (mask, maxval))
2986 		new_val = NULL_TREE;
2987 	      else
2988 		new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
2989 	    }
2990 
2991 	  if (new_val)
2992 	    add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
2993 	}
2994 
2995       /* If we have a conversion that doesn't change the value of the source
2996          simply register the same assert for it.  */
2997       if (CONVERT_EXPR_CODE_P (rhs_code))
2998 	{
2999 	  wide_int rmin, rmax;
3000 	  tree rhs1 = gimple_assign_rhs1 (def_stmt);
3001 	  if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
3002 	      && TREE_CODE (rhs1) == SSA_NAME
3003 	      /* Make sure the relation preserves the upper/lower boundary of
3004 	         the range conservatively.  */
3005 	      && (comp_code == NE_EXPR
3006 		  || comp_code == EQ_EXPR
3007 		  || (TYPE_SIGN (TREE_TYPE (name))
3008 		      == TYPE_SIGN (TREE_TYPE (rhs1)))
3009 		  || ((comp_code == LE_EXPR
3010 		       || comp_code == LT_EXPR)
3011 		      && !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
3012 		  || ((comp_code == GE_EXPR
3013 		       || comp_code == GT_EXPR)
3014 		      && TYPE_UNSIGNED (TREE_TYPE (rhs1))))
3015 	      /* And the conversion does not alter the value we compare
3016 	         against and all values in rhs1 can be represented in
3017 		 the converted to type.  */
3018 	      && int_fits_type_p (val, TREE_TYPE (rhs1))
3019 	      && ((TYPE_PRECISION (TREE_TYPE (name))
3020 		   > TYPE_PRECISION (TREE_TYPE (rhs1)))
3021 		  || (get_range_info (rhs1, &rmin, &rmax) == VR_RANGE
3022 		      && wi::fits_to_tree_p (rmin, TREE_TYPE (name))
3023 		      && wi::fits_to_tree_p (rmax, TREE_TYPE (name)))))
3024 	    add_assert_info (asserts, rhs1, rhs1,
3025 		 	     comp_code, fold_convert (TREE_TYPE (rhs1), val));
3026 	}
3027 
3028       /* Add asserts for NAME cmp CST and NAME being defined as
3029 	 NAME = NAME2 & CST2.
3030 
3031 	 Extract CST2 from the and.
3032 
3033 	 Also handle
3034 	 NAME = (unsigned) NAME2;
3035 	 casts where NAME's type is unsigned and has smaller precision
3036 	 than NAME2's type as if it was NAME = NAME2 & MASK.  */
3037       names[0] = NULL_TREE;
3038       names[1] = NULL_TREE;
3039       cst2 = NULL_TREE;
3040       if (rhs_code == BIT_AND_EXPR
3041 	  || (CONVERT_EXPR_CODE_P (rhs_code)
3042 	      && INTEGRAL_TYPE_P (TREE_TYPE (val))
3043 	      && TYPE_UNSIGNED (TREE_TYPE (val))
3044 	      && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
3045 		 > prec))
3046 	{
3047 	  name2 = gimple_assign_rhs1 (def_stmt);
3048 	  if (rhs_code == BIT_AND_EXPR)
3049 	    cst2 = gimple_assign_rhs2 (def_stmt);
3050 	  else
3051 	    {
3052 	      cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
3053 	      nprec = TYPE_PRECISION (TREE_TYPE (name2));
3054 	    }
3055 	  if (TREE_CODE (name2) == SSA_NAME
3056 	      && INTEGRAL_TYPE_P (TREE_TYPE (name2))
3057 	      && TREE_CODE (cst2) == INTEGER_CST
3058 	      && !integer_zerop (cst2)
3059 	      && (nprec > 1
3060 		  || TYPE_UNSIGNED (TREE_TYPE (val))))
3061 	    {
3062 	      gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
3063 	      if (gimple_assign_cast_p (def_stmt2))
3064 		{
3065 		  names[1] = gimple_assign_rhs1 (def_stmt2);
3066 		  if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
3067 		      || TREE_CODE (names[1]) != SSA_NAME
3068 		      || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
3069 		      || (TYPE_PRECISION (TREE_TYPE (name2))
3070 			  != TYPE_PRECISION (TREE_TYPE (names[1]))))
3071 		    names[1] = NULL_TREE;
3072 		}
3073 	      names[0] = name2;
3074 	    }
3075 	}
3076       if (names[0] || names[1])
3077 	{
3078 	  wide_int minv, maxv, valv, cst2v;
3079 	  wide_int tem, sgnbit;
3080 	  bool valid_p = false, valn, cst2n;
3081 	  enum tree_code ccode = comp_code;
3082 
3083 	  valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
3084 	  cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
3085 	  valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
3086 	  cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
3087 	  /* If CST2 doesn't have most significant bit set,
3088 	     but VAL is negative, we have comparison like
3089 	     if ((x & 0x123) > -4) (always true).  Just give up.  */
3090 	  if (!cst2n && valn)
3091 	    ccode = ERROR_MARK;
3092 	  if (cst2n)
3093 	    sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3094 	  else
3095 	    sgnbit = wi::zero (nprec);
3096 	  minv = valv & cst2v;
3097 	  switch (ccode)
3098 	    {
3099 	    case EQ_EXPR:
3100 	      /* Minimum unsigned value for equality is VAL & CST2
3101 		 (should be equal to VAL, otherwise we probably should
3102 		 have folded the comparison into false) and
3103 		 maximum unsigned value is VAL | ~CST2.  */
3104 	      maxv = valv | ~cst2v;
3105 	      valid_p = true;
3106 	      break;
3107 
3108 	    case NE_EXPR:
3109 	      tem = valv | ~cst2v;
3110 	      /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U.  */
3111 	      if (valv == 0)
3112 		{
3113 		  cst2n = false;
3114 		  sgnbit = wi::zero (nprec);
3115 		  goto gt_expr;
3116 		}
3117 	      /* If (VAL | ~CST2) is all ones, handle it as
3118 		 (X & CST2) < VAL.  */
3119 	      if (tem == -1)
3120 		{
3121 		  cst2n = false;
3122 		  valn = false;
3123 		  sgnbit = wi::zero (nprec);
3124 		  goto lt_expr;
3125 		}
3126 	      if (!cst2n && wi::neg_p (cst2v))
3127 		sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
3128 	      if (sgnbit != 0)
3129 		{
3130 		  if (valv == sgnbit)
3131 		    {
3132 		      cst2n = true;
3133 		      valn = true;
3134 		      goto gt_expr;
3135 		    }
3136 		  if (tem == wi::mask (nprec - 1, false, nprec))
3137 		    {
3138 		      cst2n = true;
3139 		      goto lt_expr;
3140 		    }
3141 		  if (!cst2n)
3142 		    sgnbit = wi::zero (nprec);
3143 		}
3144 	      break;
3145 
3146 	    case GE_EXPR:
3147 	      /* Minimum unsigned value for >= if (VAL & CST2) == VAL
3148 		 is VAL and maximum unsigned value is ~0.  For signed
3149 		 comparison, if CST2 doesn't have most significant bit
3150 		 set, handle it similarly.  If CST2 has MSB set,
3151 		 the minimum is the same, and maximum is ~0U/2.  */
3152 	      if (minv != valv)
3153 		{
3154 		  /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
3155 		     VAL.  */
3156 		  minv = masked_increment (valv, cst2v, sgnbit, nprec);
3157 		  if (minv == valv)
3158 		    break;
3159 		}
3160 	      maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3161 	      valid_p = true;
3162 	      break;
3163 
3164 	    case GT_EXPR:
3165 	    gt_expr:
3166 	      /* Find out smallest MINV where MINV > VAL
3167 		 && (MINV & CST2) == MINV, if any.  If VAL is signed and
3168 		 CST2 has MSB set, compute it biased by 1 << (nprec - 1).  */
3169 	      minv = masked_increment (valv, cst2v, sgnbit, nprec);
3170 	      if (minv == valv)
3171 		break;
3172 	      maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
3173 	      valid_p = true;
3174 	      break;
3175 
3176 	    case LE_EXPR:
3177 	      /* Minimum unsigned value for <= is 0 and maximum
3178 		 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
3179 		 Otherwise, find smallest VAL2 where VAL2 > VAL
3180 		 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3181 		 as maximum.
3182 		 For signed comparison, if CST2 doesn't have most
3183 		 significant bit set, handle it similarly.  If CST2 has
3184 		 MSB set, the maximum is the same and minimum is INT_MIN.  */
3185 	      if (minv == valv)
3186 		maxv = valv;
3187 	      else
3188 		{
3189 		  maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3190 		  if (maxv == valv)
3191 		    break;
3192 		  maxv -= 1;
3193 		}
3194 	      maxv |= ~cst2v;
3195 	      minv = sgnbit;
3196 	      valid_p = true;
3197 	      break;
3198 
3199 	    case LT_EXPR:
3200 	    lt_expr:
3201 	      /* Minimum unsigned value for < is 0 and maximum
3202 		 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
3203 		 Otherwise, find smallest VAL2 where VAL2 > VAL
3204 		 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3205 		 as maximum.
3206 		 For signed comparison, if CST2 doesn't have most
3207 		 significant bit set, handle it similarly.  If CST2 has
3208 		 MSB set, the maximum is the same and minimum is INT_MIN.  */
3209 	      if (minv == valv)
3210 		{
3211 		  if (valv == sgnbit)
3212 		    break;
3213 		  maxv = valv;
3214 		}
3215 	      else
3216 		{
3217 		  maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3218 		  if (maxv == valv)
3219 		    break;
3220 		}
3221 	      maxv -= 1;
3222 	      maxv |= ~cst2v;
3223 	      minv = sgnbit;
3224 	      valid_p = true;
3225 	      break;
3226 
3227 	    default:
3228 	      break;
3229 	    }
3230 	  if (valid_p
3231 	      && (maxv - minv) != -1)
3232 	    {
3233 	      tree tmp, new_val, type;
3234 	      int i;
3235 
3236 	      for (i = 0; i < 2; i++)
3237 		if (names[i])
3238 		  {
3239 		    wide_int maxv2 = maxv;
3240 		    tmp = names[i];
3241 		    type = TREE_TYPE (names[i]);
3242 		    if (!TYPE_UNSIGNED (type))
3243 		      {
3244 			type = build_nonstandard_integer_type (nprec, 1);
3245 			tmp = build1 (NOP_EXPR, type, names[i]);
3246 		      }
3247 		    if (minv != 0)
3248 		      {
3249 			tmp = build2 (PLUS_EXPR, type, tmp,
3250 				      wide_int_to_tree (type, -minv));
3251 			maxv2 = maxv - minv;
3252 		      }
3253 		    new_val = wide_int_to_tree (type, maxv2);
3254 		    add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
3255 		  }
3256 	    }
3257 	}
3258     }
3259 }
3260 
3261 /* OP is an operand of a truth value expression which is known to have
3262    a particular value.  Register any asserts for OP and for any
3263    operands in OP's defining statement.
3264 
3265    If CODE is EQ_EXPR, then we want to register OP is zero (false),
3266    if CODE is NE_EXPR, then we want to register OP is nonzero (true).   */
3267 
3268 static void
3269 register_edge_assert_for_1 (tree op, enum tree_code code,
3270 			    edge e, vec<assert_info> &asserts)
3271 {
3272   gimple *op_def;
3273   tree val;
3274   enum tree_code rhs_code;
3275 
3276   /* We only care about SSA_NAMEs.  */
3277   if (TREE_CODE (op) != SSA_NAME)
3278     return;
3279 
3280   /* We know that OP will have a zero or nonzero value.  */
3281   val = build_int_cst (TREE_TYPE (op), 0);
3282   add_assert_info (asserts, op, op, code, val);
3283 
3284   /* Now look at how OP is set.  If it's set from a comparison,
3285      a truth operation or some bit operations, then we may be able
3286      to register information about the operands of that assignment.  */
3287   op_def = SSA_NAME_DEF_STMT (op);
3288   if (gimple_code (op_def) != GIMPLE_ASSIGN)
3289     return;
3290 
3291   rhs_code = gimple_assign_rhs_code (op_def);
3292 
3293   if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
3294     {
3295       bool invert = (code == EQ_EXPR ? true : false);
3296       tree op0 = gimple_assign_rhs1 (op_def);
3297       tree op1 = gimple_assign_rhs2 (op_def);
3298 
3299       if (TREE_CODE (op0) == SSA_NAME)
3300         register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
3301       if (TREE_CODE (op1) == SSA_NAME)
3302         register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
3303     }
3304   else if ((code == NE_EXPR
3305 	    && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
3306 	   || (code == EQ_EXPR
3307 	       && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
3308     {
3309       /* Recurse on each operand.  */
3310       tree op0 = gimple_assign_rhs1 (op_def);
3311       tree op1 = gimple_assign_rhs2 (op_def);
3312       if (TREE_CODE (op0) == SSA_NAME
3313 	  && has_single_use (op0))
3314 	register_edge_assert_for_1 (op0, code, e, asserts);
3315       if (TREE_CODE (op1) == SSA_NAME
3316 	  && has_single_use (op1))
3317 	register_edge_assert_for_1 (op1, code, e, asserts);
3318     }
3319   else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
3320 	   && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
3321     {
3322       /* Recurse, flipping CODE.  */
3323       code = invert_tree_comparison (code, false);
3324       register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3325     }
3326   else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
3327     {
3328       /* Recurse through the copy.  */
3329       register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3330     }
3331   else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
3332     {
3333       /* Recurse through the type conversion, unless it is a narrowing
3334 	 conversion or conversion from non-integral type.  */
3335       tree rhs = gimple_assign_rhs1 (op_def);
3336       if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
3337 	  && (TYPE_PRECISION (TREE_TYPE (rhs))
3338 	      <= TYPE_PRECISION (TREE_TYPE (op))))
3339 	register_edge_assert_for_1 (rhs, code, e, asserts);
3340     }
3341 }
3342 
3343 /* Check if comparison
3344      NAME COND_OP INTEGER_CST
3345    has a form of
3346      (X & 11...100..0) COND_OP XX...X00...0
3347    Such comparison can yield assertions like
3348      X >= XX...X00...0
3349      X <= XX...X11...1
3350    in case of COND_OP being EQ_EXPR or
3351      X < XX...X00...0
3352      X > XX...X11...1
3353    in case of NE_EXPR.  */
3354 
3355 static bool
3356 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
3357 		      tree *new_name, tree *low, enum tree_code *low_code,
3358 		      tree *high, enum tree_code *high_code)
3359 {
3360   gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3361 
3362   if (!is_gimple_assign (def_stmt)
3363       || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
3364     return false;
3365 
3366   tree t = gimple_assign_rhs1 (def_stmt);
3367   tree maskt = gimple_assign_rhs2 (def_stmt);
3368   if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
3369     return false;
3370 
3371   wi::tree_to_wide_ref mask = wi::to_wide (maskt);
3372   wide_int inv_mask = ~mask;
3373   /* Must have been removed by now so don't bother optimizing.  */
3374   if (mask == 0 || inv_mask == 0)
3375     return false;
3376 
3377   /* Assume VALT is INTEGER_CST.  */
3378   wi::tree_to_wide_ref val = wi::to_wide (valt);
3379 
3380   if ((inv_mask & (inv_mask + 1)) != 0
3381       || (val & mask) != val)
3382     return false;
3383 
3384   bool is_range = cond_code == EQ_EXPR;
3385 
3386   tree type = TREE_TYPE (t);
3387   wide_int min = wi::min_value (type),
3388     max = wi::max_value (type);
3389 
3390   if (is_range)
3391     {
3392       *low_code = val == min ? ERROR_MARK : GE_EXPR;
3393       *high_code = val == max ? ERROR_MARK : LE_EXPR;
3394     }
3395   else
3396     {
3397       /* We can still generate assertion if one of alternatives
3398 	 is known to always be false.  */
3399       if (val == min)
3400 	{
3401 	  *low_code = (enum tree_code) 0;
3402 	  *high_code = GT_EXPR;
3403 	}
3404       else if ((val | inv_mask) == max)
3405 	{
3406 	  *low_code = LT_EXPR;
3407 	  *high_code = (enum tree_code) 0;
3408 	}
3409       else
3410 	return false;
3411     }
3412 
3413   *new_name = t;
3414   *low = wide_int_to_tree (type, val);
3415   *high = wide_int_to_tree (type, val | inv_mask);
3416 
3417   return true;
3418 }
3419 
3420 /* Try to register an edge assertion for SSA name NAME on edge E for
3421    the condition COND contributing to the conditional jump pointed to by
3422    SI.  */
3423 
3424 void
3425 register_edge_assert_for (tree name, edge e,
3426 			  enum tree_code cond_code, tree cond_op0,
3427 			  tree cond_op1, vec<assert_info> &asserts)
3428 {
3429   tree val;
3430   enum tree_code comp_code;
3431   bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
3432 
3433   /* Do not attempt to infer anything in names that flow through
3434      abnormal edges.  */
3435   if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
3436     return;
3437 
3438   if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
3439 						cond_op0, cond_op1,
3440 						is_else_edge,
3441 						&comp_code, &val))
3442     return;
3443 
3444   /* Register ASSERT_EXPRs for name.  */
3445   register_edge_assert_for_2 (name, e, cond_code, cond_op0,
3446 			      cond_op1, is_else_edge, asserts);
3447 
3448 
3449   /* If COND is effectively an equality test of an SSA_NAME against
3450      the value zero or one, then we may be able to assert values
3451      for SSA_NAMEs which flow into COND.  */
3452 
3453   /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
3454      statement of NAME we can assert both operands of the BIT_AND_EXPR
3455      have nonzero value.  */
3456   if (((comp_code == EQ_EXPR && integer_onep (val))
3457        || (comp_code == NE_EXPR && integer_zerop (val))))
3458     {
3459       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3460 
3461       if (is_gimple_assign (def_stmt)
3462 	  && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
3463 	{
3464 	  tree op0 = gimple_assign_rhs1 (def_stmt);
3465 	  tree op1 = gimple_assign_rhs2 (def_stmt);
3466 	  register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
3467 	  register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
3468 	}
3469     }
3470 
3471   /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
3472      statement of NAME we can assert both operands of the BIT_IOR_EXPR
3473      have zero value.  */
3474   if (((comp_code == EQ_EXPR && integer_zerop (val))
3475        || (comp_code == NE_EXPR && integer_onep (val))))
3476     {
3477       gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3478 
3479       /* For BIT_IOR_EXPR only if NAME == 0 both operands have
3480 	 necessarily zero value, or if type-precision is one.  */
3481       if (is_gimple_assign (def_stmt)
3482 	  && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
3483 	      && (TYPE_PRECISION (TREE_TYPE (name)) == 1
3484 	          || comp_code == EQ_EXPR)))
3485 	{
3486 	  tree op0 = gimple_assign_rhs1 (def_stmt);
3487 	  tree op1 = gimple_assign_rhs2 (def_stmt);
3488 	  register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
3489 	  register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
3490 	}
3491     }
3492 
3493   /* Sometimes we can infer ranges from (NAME & MASK) == VALUE.  */
3494   if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
3495       && TREE_CODE (val) == INTEGER_CST)
3496     {
3497       enum tree_code low_code, high_code;
3498       tree low, high;
3499       if (is_masked_range_test (name, val, comp_code, &name, &low,
3500 				&low_code, &high, &high_code))
3501 	{
3502 	  if (low_code != ERROR_MARK)
3503 	    register_edge_assert_for_2 (name, e, low_code, name,
3504 					low, /*invert*/false, asserts);
3505 	  if (high_code != ERROR_MARK)
3506 	    register_edge_assert_for_2 (name, e, high_code, name,
3507 					high, /*invert*/false, asserts);
3508 	}
3509     }
3510 }
3511 
3512 /* Finish found ASSERTS for E and register them at GSI.  */
3513 
3514 static void
3515 finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
3516 				 vec<assert_info> &asserts)
3517 {
3518   for (unsigned i = 0; i < asserts.length (); ++i)
3519     /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
3520        reachable from E.  */
3521     if (live_on_edge (e, asserts[i].name))
3522       register_new_assert_for (asserts[i].name, asserts[i].expr,
3523 			       asserts[i].comp_code, asserts[i].val,
3524 			       NULL, e, gsi);
3525 }
3526 
3527 
3528 
3529 /* Determine whether the outgoing edges of BB should receive an
3530    ASSERT_EXPR for each of the operands of BB's LAST statement.
3531    The last statement of BB must be a COND_EXPR.
3532 
3533    If any of the sub-graphs rooted at BB have an interesting use of
3534    the predicate operands, an assert location node is added to the
3535    list of assertions for the corresponding operands.  */
3536 
3537 static void
3538 find_conditional_asserts (basic_block bb, gcond *last)
3539 {
3540   gimple_stmt_iterator bsi;
3541   tree op;
3542   edge_iterator ei;
3543   edge e;
3544   ssa_op_iter iter;
3545 
3546   bsi = gsi_for_stmt (last);
3547 
3548   /* Look for uses of the operands in each of the sub-graphs
3549      rooted at BB.  We need to check each of the outgoing edges
3550      separately, so that we know what kind of ASSERT_EXPR to
3551      insert.  */
3552   FOR_EACH_EDGE (e, ei, bb->succs)
3553     {
3554       if (e->dest == bb)
3555 	continue;
3556 
3557       /* Register the necessary assertions for each operand in the
3558 	 conditional predicate.  */
3559       auto_vec<assert_info, 8> asserts;
3560       FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
3561 	register_edge_assert_for (op, e,
3562 				  gimple_cond_code (last),
3563 				  gimple_cond_lhs (last),
3564 				  gimple_cond_rhs (last), asserts);
3565       finish_register_edge_assert_for (e, bsi, asserts);
3566     }
3567 }
3568 
3569 struct case_info
3570 {
3571   tree expr;
3572   basic_block bb;
3573 };
3574 
3575 /* Compare two case labels sorting first by the destination bb index
3576    and then by the case value.  */
3577 
3578 static int
3579 compare_case_labels (const void *p1, const void *p2)
3580 {
3581   const struct case_info *ci1 = (const struct case_info *) p1;
3582   const struct case_info *ci2 = (const struct case_info *) p2;
3583   int idx1 = ci1->bb->index;
3584   int idx2 = ci2->bb->index;
3585 
3586   if (idx1 < idx2)
3587     return -1;
3588   else if (idx1 == idx2)
3589     {
3590       /* Make sure the default label is first in a group.  */
3591       if (!CASE_LOW (ci1->expr))
3592 	return -1;
3593       else if (!CASE_LOW (ci2->expr))
3594 	return 1;
3595       else
3596 	return tree_int_cst_compare (CASE_LOW (ci1->expr),
3597 				     CASE_LOW (ci2->expr));
3598     }
3599   else
3600     return 1;
3601 }
3602 
3603 /* Determine whether the outgoing edges of BB should receive an
3604    ASSERT_EXPR for each of the operands of BB's LAST statement.
3605    The last statement of BB must be a SWITCH_EXPR.
3606 
3607    If any of the sub-graphs rooted at BB have an interesting use of
3608    the predicate operands, an assert location node is added to the
3609    list of assertions for the corresponding operands.  */
3610 
3611 static void
3612 find_switch_asserts (basic_block bb, gswitch *last)
3613 {
3614   gimple_stmt_iterator bsi;
3615   tree op;
3616   edge e;
3617   struct case_info *ci;
3618   size_t n = gimple_switch_num_labels (last);
3619 #if GCC_VERSION >= 4000
3620   unsigned int idx;
3621 #else
3622   /* Work around GCC 3.4 bug (PR 37086).  */
3623   volatile unsigned int idx;
3624 #endif
3625 
3626   bsi = gsi_for_stmt (last);
3627   op = gimple_switch_index (last);
3628   if (TREE_CODE (op) != SSA_NAME)
3629     return;
3630 
3631   /* Build a vector of case labels sorted by destination label.  */
3632   ci = XNEWVEC (struct case_info, n);
3633   for (idx = 0; idx < n; ++idx)
3634     {
3635       ci[idx].expr = gimple_switch_label (last, idx);
3636       ci[idx].bb = label_to_block (cfun, CASE_LABEL (ci[idx].expr));
3637     }
3638   edge default_edge = find_edge (bb, ci[0].bb);
3639   qsort (ci, n, sizeof (struct case_info), compare_case_labels);
3640 
3641   for (idx = 0; idx < n; ++idx)
3642     {
3643       tree min, max;
3644       tree cl = ci[idx].expr;
3645       basic_block cbb = ci[idx].bb;
3646 
3647       min = CASE_LOW (cl);
3648       max = CASE_HIGH (cl);
3649 
3650       /* If there are multiple case labels with the same destination
3651 	 we need to combine them to a single value range for the edge.  */
3652       if (idx + 1 < n && cbb == ci[idx + 1].bb)
3653 	{
3654 	  /* Skip labels until the last of the group.  */
3655 	  do {
3656 	    ++idx;
3657 	  } while (idx < n && cbb == ci[idx].bb);
3658 	  --idx;
3659 
3660 	  /* Pick up the maximum of the case label range.  */
3661 	  if (CASE_HIGH (ci[idx].expr))
3662 	    max = CASE_HIGH (ci[idx].expr);
3663 	  else
3664 	    max = CASE_LOW (ci[idx].expr);
3665 	}
3666 
3667       /* Can't extract a useful assertion out of a range that includes the
3668 	 default label.  */
3669       if (min == NULL_TREE)
3670 	continue;
3671 
3672       /* Find the edge to register the assert expr on.  */
3673       e = find_edge (bb, cbb);
3674 
3675       /* Register the necessary assertions for the operand in the
3676 	 SWITCH_EXPR.  */
3677       auto_vec<assert_info, 8> asserts;
3678       register_edge_assert_for (op, e,
3679 				max ? GE_EXPR : EQ_EXPR,
3680 				op, fold_convert (TREE_TYPE (op), min),
3681 				asserts);
3682       if (max)
3683 	register_edge_assert_for (op, e, LE_EXPR, op,
3684 				  fold_convert (TREE_TYPE (op), max),
3685 				  asserts);
3686       finish_register_edge_assert_for (e, bsi, asserts);
3687     }
3688 
3689   XDELETEVEC (ci);
3690 
3691   if (!live_on_edge (default_edge, op))
3692     return;
3693 
3694   /* Now register along the default label assertions that correspond to the
3695      anti-range of each label.  */
3696   int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
3697   if (insertion_limit == 0)
3698     return;
3699 
3700   /* We can't do this if the default case shares a label with another case.  */
3701   tree default_cl = gimple_switch_default_label (last);
3702   for (idx = 1; idx < n; idx++)
3703     {
3704       tree min, max;
3705       tree cl = gimple_switch_label (last, idx);
3706       if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
3707 	continue;
3708 
3709       min = CASE_LOW (cl);
3710       max = CASE_HIGH (cl);
3711 
3712       /* Combine contiguous case ranges to reduce the number of assertions
3713 	 to insert.  */
3714       for (idx = idx + 1; idx < n; idx++)
3715 	{
3716 	  tree next_min, next_max;
3717 	  tree next_cl = gimple_switch_label (last, idx);
3718 	  if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
3719 	    break;
3720 
3721 	  next_min = CASE_LOW (next_cl);
3722 	  next_max = CASE_HIGH (next_cl);
3723 
3724 	  wide_int difference = (wi::to_wide (next_min)
3725 				 - wi::to_wide (max ? max : min));
3726 	  if (wi::eq_p (difference, 1))
3727 	    max = next_max ? next_max : next_min;
3728 	  else
3729 	    break;
3730 	}
3731       idx--;
3732 
3733       if (max == NULL_TREE)
3734 	{
3735 	  /* Register the assertion OP != MIN.  */
3736 	  auto_vec<assert_info, 8> asserts;
3737 	  min = fold_convert (TREE_TYPE (op), min);
3738 	  register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
3739 				    asserts);
3740 	  finish_register_edge_assert_for (default_edge, bsi, asserts);
3741 	}
3742       else
3743 	{
3744 	  /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
3745 	     which will give OP the anti-range ~[MIN,MAX].  */
3746 	  tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
3747 	  min = fold_convert (TREE_TYPE (uop), min);
3748 	  max = fold_convert (TREE_TYPE (uop), max);
3749 
3750 	  tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
3751 	  tree rhs = int_const_binop (MINUS_EXPR, max, min);
3752 	  register_new_assert_for (op, lhs, GT_EXPR, rhs,
3753 				   NULL, default_edge, bsi);
3754 	}
3755 
3756       if (--insertion_limit == 0)
3757 	break;
3758     }
3759 }
3760 
3761 
3762 /* Traverse all the statements in block BB looking for statements that
3763    may generate useful assertions for the SSA names in their operand.
3764    If a statement produces a useful assertion A for name N_i, then the
3765    list of assertions already generated for N_i is scanned to
3766    determine if A is actually needed.
3767 
3768    If N_i already had the assertion A at a location dominating the
3769    current location, then nothing needs to be done.  Otherwise, the
3770    new location for A is recorded instead.
3771 
3772    1- For every statement S in BB, all the variables used by S are
3773       added to bitmap FOUND_IN_SUBGRAPH.
3774 
3775    2- If statement S uses an operand N in a way that exposes a known
3776       value range for N, then if N was not already generated by an
3777       ASSERT_EXPR, create a new assert location for N.  For instance,
3778       if N is a pointer and the statement dereferences it, we can
3779       assume that N is not NULL.
3780 
3781    3- COND_EXPRs are a special case of #2.  We can derive range
3782       information from the predicate but need to insert different
3783       ASSERT_EXPRs for each of the sub-graphs rooted at the
3784       conditional block.  If the last statement of BB is a conditional
3785       expression of the form 'X op Y', then
3786 
3787       a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
3788 
3789       b) If the conditional is the only entry point to the sub-graph
3790 	 corresponding to the THEN_CLAUSE, recurse into it.  On
3791 	 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
3792 	 an ASSERT_EXPR is added for the corresponding variable.
3793 
3794       c) Repeat step (b) on the ELSE_CLAUSE.
3795 
3796       d) Mark X and Y in FOUND_IN_SUBGRAPH.
3797 
3798       For instance,
3799 
3800 	    if (a == 9)
3801 	      b = a;
3802 	    else
3803 	      b = c + 1;
3804 
3805       In this case, an assertion on the THEN clause is useful to
3806       determine that 'a' is always 9 on that edge.  However, an assertion
3807       on the ELSE clause would be unnecessary.
3808 
3809    4- If BB does not end in a conditional expression, then we recurse
3810       into BB's dominator children.
3811 
3812    At the end of the recursive traversal, every SSA name will have a
3813    list of locations where ASSERT_EXPRs should be added.  When a new
3814    location for name N is found, it is registered by calling
3815    register_new_assert_for.  That function keeps track of all the
3816    registered assertions to prevent adding unnecessary assertions.
3817    For instance, if a pointer P_4 is dereferenced more than once in a
3818    dominator tree, only the location dominating all the dereference of
3819    P_4 will receive an ASSERT_EXPR.  */
3820 
3821 static void
3822 find_assert_locations_1 (basic_block bb, sbitmap live)
3823 {
3824   gimple *last;
3825 
3826   last = last_stmt (bb);
3827 
3828   /* If BB's last statement is a conditional statement involving integer
3829      operands, determine if we need to add ASSERT_EXPRs.  */
3830   if (last
3831       && gimple_code (last) == GIMPLE_COND
3832       && !fp_predicate (last)
3833       && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3834     find_conditional_asserts (bb, as_a <gcond *> (last));
3835 
3836   /* If BB's last statement is a switch statement involving integer
3837      operands, determine if we need to add ASSERT_EXPRs.  */
3838   if (last
3839       && gimple_code (last) == GIMPLE_SWITCH
3840       && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3841     find_switch_asserts (bb, as_a <gswitch *> (last));
3842 
3843   /* Traverse all the statements in BB marking used names and looking
3844      for statements that may infer assertions for their used operands.  */
3845   for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
3846        gsi_prev (&si))
3847     {
3848       gimple *stmt;
3849       tree op;
3850       ssa_op_iter i;
3851 
3852       stmt = gsi_stmt (si);
3853 
3854       if (is_gimple_debug (stmt))
3855 	continue;
3856 
3857       /* See if we can derive an assertion for any of STMT's operands.  */
3858       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3859 	{
3860 	  tree value;
3861 	  enum tree_code comp_code;
3862 
3863 	  /* If op is not live beyond this stmt, do not bother to insert
3864 	     asserts for it.  */
3865 	  if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
3866 	    continue;
3867 
3868 	  /* If OP is used in such a way that we can infer a value
3869 	     range for it, and we don't find a previous assertion for
3870 	     it, create a new assertion location node for OP.  */
3871 	  if (infer_value_range (stmt, op, &comp_code, &value))
3872 	    {
3873 	      /* If we are able to infer a nonzero value range for OP,
3874 		 then walk backwards through the use-def chain to see if OP
3875 		 was set via a typecast.
3876 
3877 		 If so, then we can also infer a nonzero value range
3878 		 for the operand of the NOP_EXPR.  */
3879 	      if (comp_code == NE_EXPR && integer_zerop (value))
3880 		{
3881 		  tree t = op;
3882 		  gimple *def_stmt = SSA_NAME_DEF_STMT (t);
3883 
3884 		  while (is_gimple_assign (def_stmt)
3885 			 && CONVERT_EXPR_CODE_P
3886 			     (gimple_assign_rhs_code (def_stmt))
3887 			 && TREE_CODE
3888 			     (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
3889 			 && POINTER_TYPE_P
3890 			     (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
3891 		    {
3892 		      t = gimple_assign_rhs1 (def_stmt);
3893 		      def_stmt = SSA_NAME_DEF_STMT (t);
3894 
3895 		      /* Note we want to register the assert for the
3896 			 operand of the NOP_EXPR after SI, not after the
3897 			 conversion.  */
3898 		      if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
3899 			register_new_assert_for (t, t, comp_code, value,
3900 						 bb, NULL, si);
3901 		    }
3902 		}
3903 
3904 	      register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
3905 	    }
3906 	}
3907 
3908       /* Update live.  */
3909       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3910 	bitmap_set_bit (live, SSA_NAME_VERSION (op));
3911       FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
3912 	bitmap_clear_bit (live, SSA_NAME_VERSION (op));
3913     }
3914 
3915   /* Traverse all PHI nodes in BB, updating live.  */
3916   for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3917        gsi_next (&si))
3918     {
3919       use_operand_p arg_p;
3920       ssa_op_iter i;
3921       gphi *phi = si.phi ();
3922       tree res = gimple_phi_result (phi);
3923 
3924       if (virtual_operand_p (res))
3925 	continue;
3926 
3927       FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
3928 	{
3929 	  tree arg = USE_FROM_PTR (arg_p);
3930 	  if (TREE_CODE (arg) == SSA_NAME)
3931 	    bitmap_set_bit (live, SSA_NAME_VERSION (arg));
3932 	}
3933 
3934       bitmap_clear_bit (live, SSA_NAME_VERSION (res));
3935     }
3936 }
3937 
3938 /* Do an RPO walk over the function computing SSA name liveness
3939    on-the-fly and deciding on assert expressions to insert.  */
3940 
3941 static void
3942 find_assert_locations (void)
3943 {
3944   int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3945   int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3946   int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3947   int rpo_cnt, i;
3948 
3949   live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
3950   rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3951   for (i = 0; i < rpo_cnt; ++i)
3952     bb_rpo[rpo[i]] = i;
3953 
3954   /* Pre-seed loop latch liveness from loop header PHI nodes.  Due to
3955      the order we compute liveness and insert asserts we otherwise
3956      fail to insert asserts into the loop latch.  */
3957   loop_p loop;
3958   FOR_EACH_LOOP (loop, 0)
3959     {
3960       i = loop->latch->index;
3961       unsigned int j = single_succ_edge (loop->latch)->dest_idx;
3962       for (gphi_iterator gsi = gsi_start_phis (loop->header);
3963 	   !gsi_end_p (gsi); gsi_next (&gsi))
3964 	{
3965 	  gphi *phi = gsi.phi ();
3966 	  if (virtual_operand_p (gimple_phi_result (phi)))
3967 	    continue;
3968 	  tree arg = gimple_phi_arg_def (phi, j);
3969 	  if (TREE_CODE (arg) == SSA_NAME)
3970 	    {
3971 	      if (live[i] == NULL)
3972 		{
3973 		  live[i] = sbitmap_alloc (num_ssa_names);
3974 		  bitmap_clear (live[i]);
3975 		}
3976 	      bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
3977 	    }
3978 	}
3979     }
3980 
3981   for (i = rpo_cnt - 1; i >= 0; --i)
3982     {
3983       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
3984       edge e;
3985       edge_iterator ei;
3986 
3987       if (!live[rpo[i]])
3988 	{
3989 	  live[rpo[i]] = sbitmap_alloc (num_ssa_names);
3990 	  bitmap_clear (live[rpo[i]]);
3991 	}
3992 
3993       /* Process BB and update the live information with uses in
3994          this block.  */
3995       find_assert_locations_1 (bb, live[rpo[i]]);
3996 
3997       /* Merge liveness into the predecessor blocks and free it.  */
3998       if (!bitmap_empty_p (live[rpo[i]]))
3999 	{
4000 	  int pred_rpo = i;
4001 	  FOR_EACH_EDGE (e, ei, bb->preds)
4002 	    {
4003 	      int pred = e->src->index;
4004 	      if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
4005 		continue;
4006 
4007 	      if (!live[pred])
4008 		{
4009 		  live[pred] = sbitmap_alloc (num_ssa_names);
4010 		  bitmap_clear (live[pred]);
4011 		}
4012 	      bitmap_ior (live[pred], live[pred], live[rpo[i]]);
4013 
4014 	      if (bb_rpo[pred] < pred_rpo)
4015 		pred_rpo = bb_rpo[pred];
4016 	    }
4017 
4018 	  /* Record the RPO number of the last visited block that needs
4019 	     live information from this block.  */
4020 	  last_rpo[rpo[i]] = pred_rpo;
4021 	}
4022       else
4023 	{
4024 	  sbitmap_free (live[rpo[i]]);
4025 	  live[rpo[i]] = NULL;
4026 	}
4027 
4028       /* We can free all successors live bitmaps if all their
4029          predecessors have been visited already.  */
4030       FOR_EACH_EDGE (e, ei, bb->succs)
4031 	if (last_rpo[e->dest->index] == i
4032 	    && live[e->dest->index])
4033 	  {
4034 	    sbitmap_free (live[e->dest->index]);
4035 	    live[e->dest->index] = NULL;
4036 	  }
4037     }
4038 
4039   XDELETEVEC (rpo);
4040   XDELETEVEC (bb_rpo);
4041   XDELETEVEC (last_rpo);
4042   for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
4043     if (live[i])
4044       sbitmap_free (live[i]);
4045   XDELETEVEC (live);
4046 }
4047 
4048 /* Create an ASSERT_EXPR for NAME and insert it in the location
4049    indicated by LOC.  Return true if we made any edge insertions.  */
4050 
4051 static bool
4052 process_assert_insertions_for (tree name, assert_locus *loc)
4053 {
4054   /* Build the comparison expression NAME_i COMP_CODE VAL.  */
4055   gimple *stmt;
4056   tree cond;
4057   gimple *assert_stmt;
4058   edge_iterator ei;
4059   edge e;
4060 
4061   /* If we have X <=> X do not insert an assert expr for that.  */
4062   if (loc->expr == loc->val)
4063     return false;
4064 
4065   cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
4066   assert_stmt = build_assert_expr_for (cond, name);
4067   if (loc->e)
4068     {
4069       /* We have been asked to insert the assertion on an edge.  This
4070 	 is used only by COND_EXPR and SWITCH_EXPR assertions.  */
4071       gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
4072 			   || (gimple_code (gsi_stmt (loc->si))
4073 			       == GIMPLE_SWITCH));
4074 
4075       gsi_insert_on_edge (loc->e, assert_stmt);
4076       return true;
4077     }
4078 
4079   /* If the stmt iterator points at the end then this is an insertion
4080      at the beginning of a block.  */
4081   if (gsi_end_p (loc->si))
4082     {
4083       gimple_stmt_iterator si = gsi_after_labels (loc->bb);
4084       gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
4085       return false;
4086 
4087     }
4088   /* Otherwise, we can insert right after LOC->SI iff the
4089      statement must not be the last statement in the block.  */
4090   stmt = gsi_stmt (loc->si);
4091   if (!stmt_ends_bb_p (stmt))
4092     {
4093       gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
4094       return false;
4095     }
4096 
4097   /* If STMT must be the last statement in BB, we can only insert new
4098      assertions on the non-abnormal edge out of BB.  Note that since
4099      STMT is not control flow, there may only be one non-abnormal/eh edge
4100      out of BB.  */
4101   FOR_EACH_EDGE (e, ei, loc->bb->succs)
4102     if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
4103       {
4104 	gsi_insert_on_edge (e, assert_stmt);
4105 	return true;
4106       }
4107 
4108   gcc_unreachable ();
4109 }
4110 
4111 /* Qsort helper for sorting assert locations.  If stable is true, don't
4112    use iterative_hash_expr because it can be unstable for -fcompare-debug,
4113    on the other side some pointers might be NULL.  */
4114 
4115 template <bool stable>
4116 static int
4117 compare_assert_loc (const void *pa, const void *pb)
4118 {
4119   assert_locus * const a = *(assert_locus * const *)pa;
4120   assert_locus * const b = *(assert_locus * const *)pb;
4121 
4122   /* If stable, some asserts might be optimized away already, sort
4123      them last.  */
4124   if (stable)
4125     {
4126       if (a == NULL)
4127 	return b != NULL;
4128       else if (b == NULL)
4129 	return -1;
4130     }
4131 
4132   if (a->e == NULL && b->e != NULL)
4133     return 1;
4134   else if (a->e != NULL && b->e == NULL)
4135     return -1;
4136 
4137   /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
4138      no need to test both a->e and b->e.  */
4139 
4140   /* Sort after destination index.  */
4141   if (a->e == NULL)
4142     ;
4143   else if (a->e->dest->index > b->e->dest->index)
4144     return 1;
4145   else if (a->e->dest->index < b->e->dest->index)
4146     return -1;
4147 
4148   /* Sort after comp_code.  */
4149   if (a->comp_code > b->comp_code)
4150     return 1;
4151   else if (a->comp_code < b->comp_code)
4152     return -1;
4153 
4154   hashval_t ha, hb;
4155 
4156   /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
4157      uses DECL_UID of the VAR_DECL, so sorting might differ between
4158      -g and -g0.  When doing the removal of redundant assert exprs
4159      and commonization to successors, this does not matter, but for
4160      the final sort needs to be stable.  */
4161   if (stable)
4162     {
4163       ha = 0;
4164       hb = 0;
4165     }
4166   else
4167     {
4168       ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
4169       hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
4170     }
4171 
4172   /* Break the tie using hashing and source/bb index.  */
4173   if (ha == hb)
4174     return (a->e != NULL
4175 	    ? a->e->src->index - b->e->src->index
4176 	    : a->bb->index - b->bb->index);
4177   return ha > hb ? 1 : -1;
4178 }
4179 
4180 /* Process all the insertions registered for every name N_i registered
4181    in NEED_ASSERT_FOR.  The list of assertions to be inserted are
4182    found in ASSERTS_FOR[i].  */
4183 
4184 static void
4185 process_assert_insertions (void)
4186 {
4187   unsigned i;
4188   bitmap_iterator bi;
4189   bool update_edges_p = false;
4190   int num_asserts = 0;
4191 
4192   if (dump_file && (dump_flags & TDF_DETAILS))
4193     dump_all_asserts (dump_file);
4194 
4195   EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4196     {
4197       assert_locus *loc = asserts_for[i];
4198       gcc_assert (loc);
4199 
4200       auto_vec<assert_locus *, 16> asserts;
4201       for (; loc; loc = loc->next)
4202 	asserts.safe_push (loc);
4203       asserts.qsort (compare_assert_loc<false>);
4204 
4205       /* Push down common asserts to successors and remove redundant ones.  */
4206       unsigned ecnt = 0;
4207       assert_locus *common = NULL;
4208       unsigned commonj = 0;
4209       for (unsigned j = 0; j < asserts.length (); ++j)
4210 	{
4211 	  loc = asserts[j];
4212 	  if (! loc->e)
4213 	    common = NULL;
4214 	  else if (! common
4215 		   || loc->e->dest != common->e->dest
4216 		   || loc->comp_code != common->comp_code
4217 		   || ! operand_equal_p (loc->val, common->val, 0)
4218 		   || ! operand_equal_p (loc->expr, common->expr, 0))
4219 	    {
4220 	      commonj = j;
4221 	      common = loc;
4222 	      ecnt = 1;
4223 	    }
4224 	  else if (loc->e == asserts[j-1]->e)
4225 	    {
4226 	      /* Remove duplicate asserts.  */
4227 	      if (commonj == j - 1)
4228 		{
4229 		  commonj = j;
4230 		  common = loc;
4231 		}
4232 	      free (asserts[j-1]);
4233 	      asserts[j-1] = NULL;
4234 	    }
4235 	  else
4236 	    {
4237 	      ecnt++;
4238 	      if (EDGE_COUNT (common->e->dest->preds) == ecnt)
4239 		{
4240 		  /* We have the same assertion on all incoming edges of a BB.
4241 		     Insert it at the beginning of that block.  */
4242 		  loc->bb = loc->e->dest;
4243 		  loc->e = NULL;
4244 		  loc->si = gsi_none ();
4245 		  common = NULL;
4246 		  /* Clear asserts commoned.  */
4247 		  for (; commonj != j; ++commonj)
4248 		    if (asserts[commonj])
4249 		      {
4250 			free (asserts[commonj]);
4251 			asserts[commonj] = NULL;
4252 		      }
4253 		}
4254 	    }
4255 	}
4256 
4257       /* The asserts vector sorting above might be unstable for
4258 	 -fcompare-debug, sort again to ensure a stable sort.  */
4259       asserts.qsort (compare_assert_loc<true>);
4260       for (unsigned j = 0; j < asserts.length (); ++j)
4261 	{
4262 	  loc = asserts[j];
4263 	  if (! loc)
4264 	    break;
4265 	  update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
4266 	  num_asserts++;
4267 	  free (loc);
4268 	}
4269     }
4270 
4271   if (update_edges_p)
4272     gsi_commit_edge_inserts ();
4273 
4274   statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
4275 			    num_asserts);
4276 }
4277 
4278 
4279 /* Traverse the flowgraph looking for conditional jumps to insert range
4280    expressions.  These range expressions are meant to provide information
4281    to optimizations that need to reason in terms of value ranges.  They
4282    will not be expanded into RTL.  For instance, given:
4283 
4284    x = ...
4285    y = ...
4286    if (x < y)
4287      y = x - 2;
4288    else
4289      x = y + 3;
4290 
4291    this pass will transform the code into:
4292 
4293    x = ...
4294    y = ...
4295    if (x < y)
4296     {
4297       x = ASSERT_EXPR <x, x < y>
4298       y = x - 2
4299     }
4300    else
4301     {
4302       y = ASSERT_EXPR <y, x >= y>
4303       x = y + 3
4304     }
4305 
4306    The idea is that once copy and constant propagation have run, other
4307    optimizations will be able to determine what ranges of values can 'x'
4308    take in different paths of the code, simply by checking the reaching
4309    definition of 'x'.  */
4310 
4311 static void
4312 insert_range_assertions (void)
4313 {
4314   need_assert_for = BITMAP_ALLOC (NULL);
4315   asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
4316 
4317   calculate_dominance_info (CDI_DOMINATORS);
4318 
4319   find_assert_locations ();
4320   if (!bitmap_empty_p (need_assert_for))
4321     {
4322       process_assert_insertions ();
4323       update_ssa (TODO_update_ssa_no_phi);
4324     }
4325 
4326   if (dump_file && (dump_flags & TDF_DETAILS))
4327     {
4328       fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
4329       dump_function_to_file (current_function_decl, dump_file, dump_flags);
4330     }
4331 
4332   free (asserts_for);
4333   BITMAP_FREE (need_assert_for);
4334 }
4335 
4336 class vrp_prop : public ssa_propagation_engine
4337 {
4338  public:
4339   enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
4340   enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
4341 
4342   void vrp_initialize (void);
4343   void vrp_finalize (bool);
4344   void check_all_array_refs (void);
4345   void check_array_ref (location_t, tree, bool);
4346   void check_mem_ref (location_t, tree, bool);
4347   void search_for_addr_array (tree, location_t);
4348 
4349   class vr_values vr_values;
4350   /* Temporary delegator to minimize code churn.  */
4351   value_range *get_value_range (const_tree op)
4352     { return vr_values.get_value_range (op); }
4353   void set_defs_to_varying (gimple *stmt)
4354     { return vr_values.set_defs_to_varying (stmt); }
4355   void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
4356 				tree *output_p, value_range *vr)
4357     { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); }
4358   bool update_value_range (const_tree op, value_range *vr)
4359     { return vr_values.update_value_range (op, vr); }
4360   void extract_range_basic (value_range *vr, gimple *stmt)
4361     { vr_values.extract_range_basic (vr, stmt); }
4362   void extract_range_from_phi_node (gphi *phi, value_range *vr)
4363     { vr_values.extract_range_from_phi_node (phi, vr); }
4364 };
4365 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
4366    and "struct" hacks. If VRP can determine that the
4367    array subscript is a constant, check if it is outside valid
4368    range. If the array subscript is a RANGE, warn if it is
4369    non-overlapping with valid range.
4370    IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR.  */
4371 
4372 void
4373 vrp_prop::check_array_ref (location_t location, tree ref,
4374 			   bool ignore_off_by_one)
4375 {
4376   const value_range *vr = NULL;
4377   tree low_sub, up_sub;
4378   tree low_bound, up_bound, up_bound_p1;
4379 
4380   if (TREE_NO_WARNING (ref))
4381     return;
4382 
4383   low_sub = up_sub = TREE_OPERAND (ref, 1);
4384   up_bound = array_ref_up_bound (ref);
4385 
4386   if (!up_bound
4387       || TREE_CODE (up_bound) != INTEGER_CST
4388       || (warn_array_bounds < 2
4389 	  && array_at_struct_end_p (ref)))
4390     {
4391       /* Accesses to trailing arrays via pointers may access storage
4392 	 beyond the types array bounds.  For such arrays, or for flexible
4393 	 array members, as well as for other arrays of an unknown size,
4394 	 replace the upper bound with a more permissive one that assumes
4395 	 the size of the largest object is PTRDIFF_MAX.  */
4396       tree eltsize = array_ref_element_size (ref);
4397 
4398       if (TREE_CODE (eltsize) != INTEGER_CST
4399 	  || integer_zerop (eltsize))
4400 	{
4401 	  up_bound = NULL_TREE;
4402 	  up_bound_p1 = NULL_TREE;
4403 	}
4404       else
4405 	{
4406 	  tree maxbound = TYPE_MAX_VALUE (ptrdiff_type_node);
4407 	  tree arg = TREE_OPERAND (ref, 0);
4408 	  poly_int64 off;
4409 
4410 	  if (get_addr_base_and_unit_offset (arg, &off) && known_gt (off, 0))
4411 	    maxbound = wide_int_to_tree (sizetype,
4412 					 wi::sub (wi::to_wide (maxbound),
4413 						  off));
4414 	  else
4415 	    maxbound = fold_convert (sizetype, maxbound);
4416 
4417 	  up_bound_p1 = int_const_binop (TRUNC_DIV_EXPR, maxbound, eltsize);
4418 
4419 	  up_bound = int_const_binop (MINUS_EXPR, up_bound_p1,
4420 				      build_int_cst (ptrdiff_type_node, 1));
4421 	}
4422     }
4423   else
4424     up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
4425 				   build_int_cst (TREE_TYPE (up_bound), 1));
4426 
4427   low_bound = array_ref_low_bound (ref);
4428 
4429   tree artype = TREE_TYPE (TREE_OPERAND (ref, 0));
4430 
4431   bool warned = false;
4432 
4433   /* Empty array.  */
4434   if (up_bound && tree_int_cst_equal (low_bound, up_bound_p1))
4435     warned = warning_at (location, OPT_Warray_bounds,
4436 			 "array subscript %E is above array bounds of %qT",
4437 			 low_bound, artype);
4438 
4439   if (TREE_CODE (low_sub) == SSA_NAME)
4440     {
4441       vr = get_value_range (low_sub);
4442       if (!vr->undefined_p () && !vr->varying_p ())
4443         {
4444           low_sub = vr->kind () == VR_RANGE ? vr->max () : vr->min ();
4445           up_sub = vr->kind () == VR_RANGE ? vr->min () : vr->max ();
4446         }
4447     }
4448 
4449   if (vr && vr->kind () == VR_ANTI_RANGE)
4450     {
4451       if (up_bound
4452 	  && TREE_CODE (up_sub) == INTEGER_CST
4453           && (ignore_off_by_one
4454 	      ? tree_int_cst_lt (up_bound, up_sub)
4455 	      : tree_int_cst_le (up_bound, up_sub))
4456           && TREE_CODE (low_sub) == INTEGER_CST
4457           && tree_int_cst_le (low_sub, low_bound))
4458 	warned = warning_at (location, OPT_Warray_bounds,
4459 			     "array subscript [%E, %E] is outside "
4460 			     "array bounds of %qT",
4461 			     low_sub, up_sub, artype);
4462     }
4463   else if (up_bound
4464 	   && TREE_CODE (up_sub) == INTEGER_CST
4465 	   && (ignore_off_by_one
4466 	       ? !tree_int_cst_le (up_sub, up_bound_p1)
4467 	       : !tree_int_cst_le (up_sub, up_bound)))
4468     {
4469       if (dump_file && (dump_flags & TDF_DETAILS))
4470 	{
4471 	  fprintf (dump_file, "Array bound warning for ");
4472 	  dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4473 	  fprintf (dump_file, "\n");
4474 	}
4475       warned = warning_at (location, OPT_Warray_bounds,
4476 			   "array subscript %E is above array bounds of %qT",
4477 			   up_sub, artype);
4478     }
4479   else if (TREE_CODE (low_sub) == INTEGER_CST
4480            && tree_int_cst_lt (low_sub, low_bound))
4481     {
4482       if (dump_file && (dump_flags & TDF_DETAILS))
4483 	{
4484 	  fprintf (dump_file, "Array bound warning for ");
4485 	  dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4486 	  fprintf (dump_file, "\n");
4487 	}
4488       warned = warning_at (location, OPT_Warray_bounds,
4489 			   "array subscript %E is below array bounds of %qT",
4490 			   low_sub, artype);
4491     }
4492 
4493   if (warned)
4494     {
4495       ref = TREE_OPERAND (ref, 0);
4496 
4497       if (DECL_P (ref))
4498 	inform (DECL_SOURCE_LOCATION (ref), "while referencing %qD", ref);
4499 
4500       TREE_NO_WARNING (ref) = 1;
4501     }
4502 }
4503 
4504 /* Checks one MEM_REF in REF, located at LOCATION, for out-of-bounds
4505    references to string constants.  If VRP can determine that the array
4506    subscript is a constant, check if it is outside valid range.
4507    If the array subscript is a RANGE, warn if it is non-overlapping
4508    with valid range.
4509    IGNORE_OFF_BY_ONE is true if the MEM_REF is inside an ADDR_EXPR
4510    (used to allow one-past-the-end indices for code that takes
4511    the address of the just-past-the-end element of an array).  */
4512 
4513 void
4514 vrp_prop::check_mem_ref (location_t location, tree ref,
4515 			 bool ignore_off_by_one)
4516 {
4517   if (TREE_NO_WARNING (ref))
4518     return;
4519 
4520   tree arg = TREE_OPERAND (ref, 0);
4521   /* The constant and variable offset of the reference.  */
4522   tree cstoff = TREE_OPERAND (ref, 1);
4523   tree varoff = NULL_TREE;
4524 
4525   const offset_int maxobjsize = tree_to_shwi (max_object_size ());
4526 
4527   /* The array or string constant bounds in bytes.  Initially set
4528      to [-MAXOBJSIZE - 1, MAXOBJSIZE]  until a tighter bound is
4529      determined.  */
4530   offset_int arrbounds[2] = { -maxobjsize - 1, maxobjsize };
4531 
4532   /* The minimum and maximum intermediate offset.  For a reference
4533      to be valid, not only does the final offset/subscript must be
4534      in bounds but all intermediate offsets should be as well.
4535      GCC may be able to deal gracefully with such out-of-bounds
4536      offsets so the checking is only enbaled at -Warray-bounds=2
4537      where it may help detect bugs in uses of the intermediate
4538      offsets that could otherwise not be detectable.  */
4539   offset_int ioff = wi::to_offset (fold_convert (ptrdiff_type_node, cstoff));
4540   offset_int extrema[2] = { 0, wi::abs (ioff) };
4541 
4542   /* The range of the byte offset into the reference.  */
4543   offset_int offrange[2] = { 0, 0 };
4544 
4545   const value_range *vr = NULL;
4546 
4547   /* Determine the offsets and increment OFFRANGE for the bounds of each.
4548      The loop computes the range of the final offset for expressions such
4549      as (A + i0 + ... + iN)[CSTOFF] where i0 through iN are SSA_NAMEs in
4550      some range.  */
4551   const unsigned limit = PARAM_VALUE (PARAM_SSA_NAME_DEF_CHAIN_LIMIT);
4552   for (unsigned n = 0; TREE_CODE (arg) == SSA_NAME && n < limit; ++n)
4553     {
4554       gimple *def = SSA_NAME_DEF_STMT (arg);
4555       if (!is_gimple_assign (def))
4556 	break;
4557 
4558       tree_code code = gimple_assign_rhs_code (def);
4559       if (code == POINTER_PLUS_EXPR)
4560 	{
4561 	  arg = gimple_assign_rhs1 (def);
4562 	  varoff = gimple_assign_rhs2 (def);
4563 	}
4564       else if (code == ASSERT_EXPR)
4565 	{
4566 	  arg = TREE_OPERAND (gimple_assign_rhs1 (def), 0);
4567 	  continue;
4568 	}
4569       else
4570 	return;
4571 
4572       /* VAROFF should always be a SSA_NAME here (and not even
4573 	 INTEGER_CST) but there's no point in taking chances.  */
4574       if (TREE_CODE (varoff) != SSA_NAME)
4575 	break;
4576 
4577       vr = get_value_range (varoff);
4578       if (!vr || vr->undefined_p () || vr->varying_p ())
4579 	break;
4580 
4581       if (!vr->constant_p ())
4582         break;
4583 
4584       if (vr->kind () == VR_RANGE)
4585 	{
4586 	  offset_int min
4587 	    = wi::to_offset (fold_convert (ptrdiff_type_node, vr->min ()));
4588 	  offset_int max
4589 	    = wi::to_offset (fold_convert (ptrdiff_type_node, vr->max ()));
4590 	  if (min < max)
4591 	    {
4592 	      offrange[0] += min;
4593 	      offrange[1] += max;
4594 	    }
4595 	  else
4596 	    {
4597 	      /* When MIN >= MAX, the offset is effectively in a union
4598 		 of two ranges: [-MAXOBJSIZE -1, MAX] and [MIN, MAXOBJSIZE].
4599 		 Since there is no way to represent such a range across
4600 		 additions, conservatively add [-MAXOBJSIZE -1, MAXOBJSIZE]
4601 		 to OFFRANGE.  */
4602 	      offrange[0] += arrbounds[0];
4603 	      offrange[1] += arrbounds[1];
4604 	    }
4605 	}
4606       else
4607 	{
4608 	  /* For an anti-range, analogously to the above, conservatively
4609 	     add [-MAXOBJSIZE -1, MAXOBJSIZE] to OFFRANGE.  */
4610 	  offrange[0] += arrbounds[0];
4611 	  offrange[1] += arrbounds[1];
4612 	}
4613 
4614       /* Keep track of the minimum and maximum offset.  */
4615       if (offrange[1] < 0 && offrange[1] < extrema[0])
4616 	extrema[0] = offrange[1];
4617       if (offrange[0] > 0 && offrange[0] > extrema[1])
4618 	extrema[1] = offrange[0];
4619 
4620       if (offrange[0] < arrbounds[0])
4621 	offrange[0] = arrbounds[0];
4622 
4623       if (offrange[1] > arrbounds[1])
4624 	offrange[1] = arrbounds[1];
4625     }
4626 
4627   if (TREE_CODE (arg) == ADDR_EXPR)
4628     {
4629       arg = TREE_OPERAND (arg, 0);
4630       if (TREE_CODE (arg) != STRING_CST
4631 	  && TREE_CODE (arg) != VAR_DECL)
4632 	return;
4633     }
4634   else
4635     return;
4636 
4637   /* The type of the object being referred to.  It can be an array,
4638      string literal, or a non-array type when the MEM_REF represents
4639      a reference/subscript via a pointer to an object that is not
4640      an element of an array.  References to members of structs and
4641      unions are excluded because MEM_REF doesn't make it possible
4642      to identify the member where the reference originated.
4643      Incomplete types are excluded as well because their size is
4644      not known.  */
4645   tree reftype = TREE_TYPE (arg);
4646   if (POINTER_TYPE_P (reftype)
4647       || !COMPLETE_TYPE_P (reftype)
4648       || TREE_CODE (TYPE_SIZE_UNIT (reftype)) != INTEGER_CST
4649       || RECORD_OR_UNION_TYPE_P (reftype))
4650     return;
4651 
4652   offset_int eltsize;
4653   if (TREE_CODE (reftype) == ARRAY_TYPE)
4654     {
4655       eltsize = wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (reftype)));
4656 
4657       if (tree dom = TYPE_DOMAIN (reftype))
4658 	{
4659 	  tree bnds[] = { TYPE_MIN_VALUE (dom), TYPE_MAX_VALUE (dom) };
4660 	  if (array_at_struct_end_p (arg)
4661 	      || !bnds[0] || !bnds[1])
4662 	    {
4663 	      arrbounds[0] = 0;
4664 	      arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4665 	    }
4666 	  else
4667 	    {
4668 	      arrbounds[0] = wi::to_offset (bnds[0]) * eltsize;
4669 	      arrbounds[1] = (wi::to_offset (bnds[1]) + 1) * eltsize;
4670 	    }
4671 	}
4672       else
4673 	{
4674 	  arrbounds[0] = 0;
4675 	  arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4676 	}
4677 
4678       if (TREE_CODE (ref) == MEM_REF)
4679 	{
4680 	  /* For MEM_REF determine a tighter bound of the non-array
4681 	     element type.  */
4682 	  tree eltype = TREE_TYPE (reftype);
4683 	  while (TREE_CODE (eltype) == ARRAY_TYPE)
4684 	    eltype = TREE_TYPE (eltype);
4685 	  eltsize = wi::to_offset (TYPE_SIZE_UNIT (eltype));
4686 	}
4687     }
4688   else
4689     {
4690       eltsize = 1;
4691       arrbounds[0] = 0;
4692       arrbounds[1] = wi::to_offset (TYPE_SIZE_UNIT (reftype));
4693     }
4694 
4695   offrange[0] += ioff;
4696   offrange[1] += ioff;
4697 
4698   /* Compute the more permissive upper bound when IGNORE_OFF_BY_ONE
4699      is set (when taking the address of the one-past-last element
4700      of an array) but always use the stricter bound in diagnostics. */
4701   offset_int ubound = arrbounds[1];
4702   if (ignore_off_by_one)
4703     ubound += 1;
4704 
4705   if (offrange[0] >= ubound || offrange[1] < arrbounds[0])
4706     {
4707       /* Treat a reference to a non-array object as one to an array
4708 	 of a single element.  */
4709       if (TREE_CODE (reftype) != ARRAY_TYPE)
4710 	reftype = build_array_type_nelts (reftype, 1);
4711 
4712       if (TREE_CODE (ref) == MEM_REF)
4713 	{
4714 	  /* Extract the element type out of MEM_REF and use its size
4715 	     to compute the index to print in the diagnostic; arrays
4716 	     in MEM_REF don't mean anything.  A type with no size like
4717 	     void is as good as having a size of 1.  */
4718 	  tree type = TREE_TYPE (ref);
4719 	  while (TREE_CODE (type) == ARRAY_TYPE)
4720 	    type = TREE_TYPE (type);
4721 	  if (tree size = TYPE_SIZE_UNIT (type))
4722 	    {
4723 	      offrange[0] = offrange[0] / wi::to_offset (size);
4724 	      offrange[1] = offrange[1] / wi::to_offset (size);
4725 	    }
4726 	}
4727       else
4728 	{
4729 	  /* For anything other than MEM_REF, compute the index to
4730 	     print in the diagnostic as the offset over element size.  */
4731 	  offrange[0] = offrange[0] / eltsize;
4732 	  offrange[1] = offrange[1] / eltsize;
4733 	}
4734 
4735       bool warned;
4736       if (offrange[0] == offrange[1])
4737 	warned = warning_at (location, OPT_Warray_bounds,
4738 			     "array subscript %wi is outside array bounds "
4739 			     "of %qT",
4740 			     offrange[0].to_shwi (), reftype);
4741       else
4742 	warned = warning_at (location, OPT_Warray_bounds,
4743 			     "array subscript [%wi, %wi] is outside "
4744 			     "array bounds of %qT",
4745 			     offrange[0].to_shwi (),
4746 			     offrange[1].to_shwi (), reftype);
4747       if (warned && DECL_P (arg))
4748 	inform (DECL_SOURCE_LOCATION (arg), "while referencing %qD", arg);
4749 
4750       if (warned)
4751 	TREE_NO_WARNING (ref) = 1;
4752       return;
4753     }
4754 
4755   if (warn_array_bounds < 2)
4756     return;
4757 
4758   /* At level 2 check also intermediate offsets.  */
4759   int i = 0;
4760   if (extrema[i] < -arrbounds[1] || extrema[i = 1] > ubound)
4761     {
4762       HOST_WIDE_INT tmpidx = extrema[i].to_shwi () / eltsize.to_shwi ();
4763 
4764       if (warning_at (location, OPT_Warray_bounds,
4765 		      "intermediate array offset %wi is outside array bounds "
4766 		      "of %qT", tmpidx, reftype))
4767 	TREE_NO_WARNING (ref) = 1;
4768     }
4769 }
4770 
4771 /* Searches if the expr T, located at LOCATION computes
4772    address of an ARRAY_REF, and call check_array_ref on it.  */
4773 
4774 void
4775 vrp_prop::search_for_addr_array (tree t, location_t location)
4776 {
4777   /* Check each ARRAY_REF and MEM_REF in the reference chain. */
4778   do
4779     {
4780       if (TREE_CODE (t) == ARRAY_REF)
4781 	check_array_ref (location, t, true /*ignore_off_by_one*/);
4782       else if (TREE_CODE (t) == MEM_REF)
4783 	check_mem_ref (location, t, true /*ignore_off_by_one*/);
4784 
4785       t = TREE_OPERAND (t, 0);
4786     }
4787   while (handled_component_p (t) || TREE_CODE (t) == MEM_REF);
4788 
4789   if (TREE_CODE (t) != MEM_REF
4790       || TREE_CODE (TREE_OPERAND (t, 0)) != ADDR_EXPR
4791       || TREE_NO_WARNING (t))
4792     return;
4793 
4794   tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
4795   tree low_bound, up_bound, el_sz;
4796   if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
4797       || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
4798       || !TYPE_DOMAIN (TREE_TYPE (tem)))
4799     return;
4800 
4801   low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4802   up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4803   el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
4804   if (!low_bound
4805       || TREE_CODE (low_bound) != INTEGER_CST
4806       || !up_bound
4807       || TREE_CODE (up_bound) != INTEGER_CST
4808       || !el_sz
4809       || TREE_CODE (el_sz) != INTEGER_CST)
4810     return;
4811 
4812   offset_int idx;
4813   if (!mem_ref_offset (t).is_constant (&idx))
4814     return;
4815 
4816   bool warned = false;
4817   idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
4818   if (idx < 0)
4819     {
4820       if (dump_file && (dump_flags & TDF_DETAILS))
4821 	{
4822 	  fprintf (dump_file, "Array bound warning for ");
4823 	  dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4824 	  fprintf (dump_file, "\n");
4825 	}
4826       warned = warning_at (location, OPT_Warray_bounds,
4827 			   "array subscript %wi is below "
4828 			   "array bounds of %qT",
4829 			   idx.to_shwi (), TREE_TYPE (tem));
4830     }
4831   else if (idx > (wi::to_offset (up_bound)
4832 		  - wi::to_offset (low_bound) + 1))
4833     {
4834       if (dump_file && (dump_flags & TDF_DETAILS))
4835 	{
4836 	  fprintf (dump_file, "Array bound warning for ");
4837 	  dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4838 	  fprintf (dump_file, "\n");
4839 	}
4840       warned = warning_at (location, OPT_Warray_bounds,
4841 			   "array subscript %wu is above "
4842 			   "array bounds of %qT",
4843 			   idx.to_uhwi (), TREE_TYPE (tem));
4844     }
4845 
4846   if (warned)
4847     {
4848       if (DECL_P (t))
4849 	inform (DECL_SOURCE_LOCATION (t), "while referencing %qD", t);
4850 
4851       TREE_NO_WARNING (t) = 1;
4852     }
4853 }
4854 
4855 /* walk_tree() callback that checks if *TP is
4856    an ARRAY_REF inside an ADDR_EXPR (in which an array
4857    subscript one outside the valid range is allowed). Call
4858    check_array_ref for each ARRAY_REF found. The location is
4859    passed in DATA.  */
4860 
4861 static tree
4862 check_array_bounds (tree *tp, int *walk_subtree, void *data)
4863 {
4864   tree t = *tp;
4865   struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
4866   location_t location;
4867 
4868   if (EXPR_HAS_LOCATION (t))
4869     location = EXPR_LOCATION (t);
4870   else
4871     location = gimple_location (wi->stmt);
4872 
4873   *walk_subtree = TRUE;
4874 
4875   vrp_prop *vrp_prop = (class vrp_prop *)wi->info;
4876   if (TREE_CODE (t) == ARRAY_REF)
4877     vrp_prop->check_array_ref (location, t, false /*ignore_off_by_one*/);
4878   else if (TREE_CODE (t) == MEM_REF)
4879     vrp_prop->check_mem_ref (location, t, false /*ignore_off_by_one*/);
4880   else if (TREE_CODE (t) == ADDR_EXPR)
4881     {
4882       vrp_prop->search_for_addr_array (t, location);
4883       *walk_subtree = FALSE;
4884     }
4885 
4886   return NULL_TREE;
4887 }
4888 
4889 /* A dom_walker subclass for use by vrp_prop::check_all_array_refs,
4890    to walk over all statements of all reachable BBs and call
4891    check_array_bounds on them.  */
4892 
4893 class check_array_bounds_dom_walker : public dom_walker
4894 {
4895  public:
4896   check_array_bounds_dom_walker (vrp_prop *prop)
4897     : dom_walker (CDI_DOMINATORS,
4898 		  /* Discover non-executable edges, preserving EDGE_EXECUTABLE
4899 		     flags, so that we can merge in information on
4900 		     non-executable edges from vrp_folder .  */
4901 		  REACHABLE_BLOCKS_PRESERVING_FLAGS),
4902       m_prop (prop) {}
4903   ~check_array_bounds_dom_walker () {}
4904 
4905   edge before_dom_children (basic_block) FINAL OVERRIDE;
4906 
4907  private:
4908   vrp_prop *m_prop;
4909 };
4910 
4911 /* Implementation of dom_walker::before_dom_children.
4912 
4913    Walk over all statements of BB and call check_array_bounds on them,
4914    and determine if there's a unique successor edge.  */
4915 
4916 edge
4917 check_array_bounds_dom_walker::before_dom_children (basic_block bb)
4918 {
4919   gimple_stmt_iterator si;
4920   for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4921     {
4922       gimple *stmt = gsi_stmt (si);
4923       struct walk_stmt_info wi;
4924       if (!gimple_has_location (stmt)
4925 	  || is_gimple_debug (stmt))
4926 	continue;
4927 
4928       memset (&wi, 0, sizeof (wi));
4929 
4930       wi.info = m_prop;
4931 
4932       walk_gimple_op (stmt, check_array_bounds, &wi);
4933     }
4934 
4935   /* Determine if there's a unique successor edge, and if so, return
4936      that back to dom_walker, ensuring that we don't visit blocks that
4937      became unreachable during the VRP propagation
4938      (PR tree-optimization/83312).  */
4939   return find_taken_edge (bb, NULL_TREE);
4940 }
4941 
4942 /* Walk over all statements of all reachable BBs and call check_array_bounds
4943    on them.  */
4944 
4945 void
4946 vrp_prop::check_all_array_refs ()
4947 {
4948   check_array_bounds_dom_walker w (this);
4949   w.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
4950 }
4951 
4952 /* Return true if all imm uses of VAR are either in STMT, or
4953    feed (optionally through a chain of single imm uses) GIMPLE_COND
4954    in basic block COND_BB.  */
4955 
4956 static bool
4957 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
4958 {
4959   use_operand_p use_p, use2_p;
4960   imm_use_iterator iter;
4961 
4962   FOR_EACH_IMM_USE_FAST (use_p, iter, var)
4963     if (USE_STMT (use_p) != stmt)
4964       {
4965 	gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
4966 	if (is_gimple_debug (use_stmt))
4967 	  continue;
4968 	while (is_gimple_assign (use_stmt)
4969 	       && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
4970 	       && single_imm_use (gimple_assign_lhs (use_stmt),
4971 				  &use2_p, &use_stmt2))
4972 	  use_stmt = use_stmt2;
4973 	if (gimple_code (use_stmt) != GIMPLE_COND
4974 	    || gimple_bb (use_stmt) != cond_bb)
4975 	  return false;
4976       }
4977   return true;
4978 }
4979 
4980 /* Handle
4981    _4 = x_3 & 31;
4982    if (_4 != 0)
4983      goto <bb 6>;
4984    else
4985      goto <bb 7>;
4986    <bb 6>:
4987    __builtin_unreachable ();
4988    <bb 7>:
4989    x_5 = ASSERT_EXPR <x_3, ...>;
4990    If x_3 has no other immediate uses (checked by caller),
4991    var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
4992    from the non-zero bitmask.  */
4993 
4994 void
4995 maybe_set_nonzero_bits (edge e, tree var)
4996 {
4997   basic_block cond_bb = e->src;
4998   gimple *stmt = last_stmt (cond_bb);
4999   tree cst;
5000 
5001   if (stmt == NULL
5002       || gimple_code (stmt) != GIMPLE_COND
5003       || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
5004 				     ? EQ_EXPR : NE_EXPR)
5005       || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
5006       || !integer_zerop (gimple_cond_rhs (stmt)))
5007     return;
5008 
5009   stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
5010   if (!is_gimple_assign (stmt)
5011       || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
5012       || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
5013     return;
5014   if (gimple_assign_rhs1 (stmt) != var)
5015     {
5016       gimple *stmt2;
5017 
5018       if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5019 	return;
5020       stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
5021       if (!gimple_assign_cast_p (stmt2)
5022 	  || gimple_assign_rhs1 (stmt2) != var
5023 	  || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
5024 	  || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
5025 			      != TYPE_PRECISION (TREE_TYPE (var))))
5026 	return;
5027     }
5028   cst = gimple_assign_rhs2 (stmt);
5029   set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
5030 					  wi::to_wide (cst)));
5031 }
5032 
5033 /* Convert range assertion expressions into the implied copies and
5034    copy propagate away the copies.  Doing the trivial copy propagation
5035    here avoids the need to run the full copy propagation pass after
5036    VRP.
5037 
5038    FIXME, this will eventually lead to copy propagation removing the
5039    names that had useful range information attached to them.  For
5040    instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
5041    then N_i will have the range [3, +INF].
5042 
5043    However, by converting the assertion into the implied copy
5044    operation N_i = N_j, we will then copy-propagate N_j into the uses
5045    of N_i and lose the range information.  We may want to hold on to
5046    ASSERT_EXPRs a little while longer as the ranges could be used in
5047    things like jump threading.
5048 
5049    The problem with keeping ASSERT_EXPRs around is that passes after
5050    VRP need to handle them appropriately.
5051 
5052    Another approach would be to make the range information a first
5053    class property of the SSA_NAME so that it can be queried from
5054    any pass.  This is made somewhat more complex by the need for
5055    multiple ranges to be associated with one SSA_NAME.  */
5056 
5057 static void
5058 remove_range_assertions (void)
5059 {
5060   basic_block bb;
5061   gimple_stmt_iterator si;
5062   /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
5063      a basic block preceeded by GIMPLE_COND branching to it and
5064      __builtin_trap, -1 if not yet checked, 0 otherwise.  */
5065   int is_unreachable;
5066 
5067   /* Note that the BSI iterator bump happens at the bottom of the
5068      loop and no bump is necessary if we're removing the statement
5069      referenced by the current BSI.  */
5070   FOR_EACH_BB_FN (bb, cfun)
5071     for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
5072       {
5073 	gimple *stmt = gsi_stmt (si);
5074 
5075 	if (is_gimple_assign (stmt)
5076 	    && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
5077 	  {
5078 	    tree lhs = gimple_assign_lhs (stmt);
5079 	    tree rhs = gimple_assign_rhs1 (stmt);
5080 	    tree var;
5081 
5082 	    var = ASSERT_EXPR_VAR (rhs);
5083 
5084 	    if (TREE_CODE (var) == SSA_NAME
5085 		&& !POINTER_TYPE_P (TREE_TYPE (lhs))
5086 		&& SSA_NAME_RANGE_INFO (lhs))
5087 	      {
5088 		if (is_unreachable == -1)
5089 		  {
5090 		    is_unreachable = 0;
5091 		    if (single_pred_p (bb)
5092 			&& assert_unreachable_fallthru_edge_p
5093 						    (single_pred_edge (bb)))
5094 		      is_unreachable = 1;
5095 		  }
5096 		/* Handle
5097 		   if (x_7 >= 10 && x_7 < 20)
5098 		     __builtin_unreachable ();
5099 		   x_8 = ASSERT_EXPR <x_7, ...>;
5100 		   if the only uses of x_7 are in the ASSERT_EXPR and
5101 		   in the condition.  In that case, we can copy the
5102 		   range info from x_8 computed in this pass also
5103 		   for x_7.  */
5104 		if (is_unreachable
5105 		    && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
5106 							  single_pred (bb)))
5107 		  {
5108 		    set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
5109 				    SSA_NAME_RANGE_INFO (lhs)->get_min (),
5110 				    SSA_NAME_RANGE_INFO (lhs)->get_max ());
5111 		    maybe_set_nonzero_bits (single_pred_edge (bb), var);
5112 		  }
5113 	      }
5114 
5115 	    /* Propagate the RHS into every use of the LHS.  For SSA names
5116 	       also propagate abnormals as it merely restores the original
5117 	       IL in this case (an replace_uses_by would assert).  */
5118 	    if (TREE_CODE (var) == SSA_NAME)
5119 	      {
5120 		imm_use_iterator iter;
5121 		use_operand_p use_p;
5122 		gimple *use_stmt;
5123 		FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
5124 		  FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
5125 		    SET_USE (use_p, var);
5126 	      }
5127 	    else
5128 	      replace_uses_by (lhs, var);
5129 
5130 	    /* And finally, remove the copy, it is not needed.  */
5131 	    gsi_remove (&si, true);
5132 	    release_defs (stmt);
5133 	  }
5134 	else
5135 	  {
5136 	    if (!is_gimple_debug (gsi_stmt (si)))
5137 	      is_unreachable = 0;
5138 	    gsi_next (&si);
5139 	  }
5140       }
5141 }
5142 
5143 /* Return true if STMT is interesting for VRP.  */
5144 
5145 bool
5146 stmt_interesting_for_vrp (gimple *stmt)
5147 {
5148   if (gimple_code (stmt) == GIMPLE_PHI)
5149     {
5150       tree res = gimple_phi_result (stmt);
5151       return (!virtual_operand_p (res)
5152 	      && (INTEGRAL_TYPE_P (TREE_TYPE (res))
5153 		  || POINTER_TYPE_P (TREE_TYPE (res))));
5154     }
5155   else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
5156     {
5157       tree lhs = gimple_get_lhs (stmt);
5158 
5159       /* In general, assignments with virtual operands are not useful
5160 	 for deriving ranges, with the obvious exception of calls to
5161 	 builtin functions.  */
5162       if (lhs && TREE_CODE (lhs) == SSA_NAME
5163 	  && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5164 	      || POINTER_TYPE_P (TREE_TYPE (lhs)))
5165 	  && (is_gimple_call (stmt)
5166 	      || !gimple_vuse (stmt)))
5167 	return true;
5168       else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5169 	switch (gimple_call_internal_fn (stmt))
5170 	  {
5171 	  case IFN_ADD_OVERFLOW:
5172 	  case IFN_SUB_OVERFLOW:
5173 	  case IFN_MUL_OVERFLOW:
5174 	  case IFN_ATOMIC_COMPARE_EXCHANGE:
5175 	    /* These internal calls return _Complex integer type,
5176 	       but are interesting to VRP nevertheless.  */
5177 	    if (lhs && TREE_CODE (lhs) == SSA_NAME)
5178 	      return true;
5179 	    break;
5180 	  default:
5181 	    break;
5182 	  }
5183     }
5184   else if (gimple_code (stmt) == GIMPLE_COND
5185 	   || gimple_code (stmt) == GIMPLE_SWITCH)
5186     return true;
5187 
5188   return false;
5189 }
5190 
5191 /* Initialization required by ssa_propagate engine.  */
5192 
5193 void
5194 vrp_prop::vrp_initialize ()
5195 {
5196   basic_block bb;
5197 
5198   FOR_EACH_BB_FN (bb, cfun)
5199     {
5200       for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
5201 	   gsi_next (&si))
5202 	{
5203 	  gphi *phi = si.phi ();
5204 	  if (!stmt_interesting_for_vrp (phi))
5205 	    {
5206 	      tree lhs = PHI_RESULT (phi);
5207 	      get_value_range (lhs)->set_varying ();
5208 	      prop_set_simulate_again (phi, false);
5209 	    }
5210 	  else
5211 	    prop_set_simulate_again (phi, true);
5212 	}
5213 
5214       for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
5215 	   gsi_next (&si))
5216         {
5217 	  gimple *stmt = gsi_stmt (si);
5218 
5219  	  /* If the statement is a control insn, then we do not
5220  	     want to avoid simulating the statement once.  Failure
5221  	     to do so means that those edges will never get added.  */
5222 	  if (stmt_ends_bb_p (stmt))
5223 	    prop_set_simulate_again (stmt, true);
5224 	  else if (!stmt_interesting_for_vrp (stmt))
5225 	    {
5226 	      set_defs_to_varying (stmt);
5227 	      prop_set_simulate_again (stmt, false);
5228 	    }
5229 	  else
5230 	    prop_set_simulate_again (stmt, true);
5231 	}
5232     }
5233 }
5234 
5235 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
5236    that includes the value VAL.  The search is restricted to the range
5237    [START_IDX, n - 1] where n is the size of VEC.
5238 
5239    If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
5240    returned.
5241 
5242    If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
5243    it is placed in IDX and false is returned.
5244 
5245    If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
5246    returned. */
5247 
5248 bool
5249 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
5250 {
5251   size_t n = gimple_switch_num_labels (stmt);
5252   size_t low, high;
5253 
5254   /* Find case label for minimum of the value range or the next one.
5255      At each iteration we are searching in [low, high - 1]. */
5256 
5257   for (low = start_idx, high = n; high != low; )
5258     {
5259       tree t;
5260       int cmp;
5261       /* Note that i != high, so we never ask for n. */
5262       size_t i = (high + low) / 2;
5263       t = gimple_switch_label (stmt, i);
5264 
5265       /* Cache the result of comparing CASE_LOW and val.  */
5266       cmp = tree_int_cst_compare (CASE_LOW (t), val);
5267 
5268       if (cmp == 0)
5269 	{
5270 	  /* Ranges cannot be empty. */
5271 	  *idx = i;
5272 	  return true;
5273 	}
5274       else if (cmp > 0)
5275         high = i;
5276       else
5277 	{
5278 	  low = i + 1;
5279 	  if (CASE_HIGH (t) != NULL
5280 	      && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
5281 	    {
5282 	      *idx = i;
5283 	      return true;
5284 	    }
5285         }
5286     }
5287 
5288   *idx = high;
5289   return false;
5290 }
5291 
5292 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
5293    for values between MIN and MAX. The first index is placed in MIN_IDX. The
5294    last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
5295    then MAX_IDX < MIN_IDX.
5296    Returns true if the default label is not needed. */
5297 
5298 bool
5299 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
5300 		       size_t *max_idx)
5301 {
5302   size_t i, j;
5303   bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
5304   bool max_take_default = !find_case_label_index (stmt, i, max, &j);
5305 
5306   if (i == j
5307       && min_take_default
5308       && max_take_default)
5309     {
5310       /* Only the default case label reached.
5311          Return an empty range. */
5312       *min_idx = 1;
5313       *max_idx = 0;
5314       return false;
5315     }
5316   else
5317     {
5318       bool take_default = min_take_default || max_take_default;
5319       tree low, high;
5320       size_t k;
5321 
5322       if (max_take_default)
5323 	j--;
5324 
5325       /* If the case label range is continuous, we do not need
5326 	 the default case label.  Verify that.  */
5327       high = CASE_LOW (gimple_switch_label (stmt, i));
5328       if (CASE_HIGH (gimple_switch_label (stmt, i)))
5329 	high = CASE_HIGH (gimple_switch_label (stmt, i));
5330       for (k = i + 1; k <= j; ++k)
5331 	{
5332 	  low = CASE_LOW (gimple_switch_label (stmt, k));
5333 	  if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
5334 	    {
5335 	      take_default = true;
5336 	      break;
5337 	    }
5338 	  high = low;
5339 	  if (CASE_HIGH (gimple_switch_label (stmt, k)))
5340 	    high = CASE_HIGH (gimple_switch_label (stmt, k));
5341 	}
5342 
5343       *min_idx = i;
5344       *max_idx = j;
5345       return !take_default;
5346     }
5347 }
5348 
5349 /* Evaluate statement STMT.  If the statement produces a useful range,
5350    return SSA_PROP_INTERESTING and record the SSA name with the
5351    interesting range into *OUTPUT_P.
5352 
5353    If STMT is a conditional branch and we can determine its truth
5354    value, the taken edge is recorded in *TAKEN_EDGE_P.
5355 
5356    If STMT produces a varying value, return SSA_PROP_VARYING.  */
5357 
5358 enum ssa_prop_result
5359 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
5360 {
5361   tree lhs = gimple_get_lhs (stmt);
5362   value_range vr;
5363   extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
5364 
5365   if (*output_p)
5366     {
5367       if (update_value_range (*output_p, &vr))
5368 	{
5369 	  if (dump_file && (dump_flags & TDF_DETAILS))
5370 	    {
5371 	      fprintf (dump_file, "Found new range for ");
5372 	      print_generic_expr (dump_file, *output_p);
5373 	      fprintf (dump_file, ": ");
5374 	      dump_value_range (dump_file, &vr);
5375 	      fprintf (dump_file, "\n");
5376 	    }
5377 
5378 	  if (vr.varying_p ())
5379 	    return SSA_PROP_VARYING;
5380 
5381 	  return SSA_PROP_INTERESTING;
5382 	}
5383       return SSA_PROP_NOT_INTERESTING;
5384     }
5385 
5386   if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5387     switch (gimple_call_internal_fn (stmt))
5388       {
5389       case IFN_ADD_OVERFLOW:
5390       case IFN_SUB_OVERFLOW:
5391       case IFN_MUL_OVERFLOW:
5392       case IFN_ATOMIC_COMPARE_EXCHANGE:
5393 	/* These internal calls return _Complex integer type,
5394 	   which VRP does not track, but the immediate uses
5395 	   thereof might be interesting.  */
5396 	if (lhs && TREE_CODE (lhs) == SSA_NAME)
5397 	  {
5398 	    imm_use_iterator iter;
5399 	    use_operand_p use_p;
5400 	    enum ssa_prop_result res = SSA_PROP_VARYING;
5401 
5402 	    get_value_range (lhs)->set_varying ();
5403 
5404 	    FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
5405 	      {
5406 		gimple *use_stmt = USE_STMT (use_p);
5407 		if (!is_gimple_assign (use_stmt))
5408 		  continue;
5409 		enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
5410 		if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
5411 		  continue;
5412 		tree rhs1 = gimple_assign_rhs1 (use_stmt);
5413 		tree use_lhs = gimple_assign_lhs (use_stmt);
5414 		if (TREE_CODE (rhs1) != rhs_code
5415 		    || TREE_OPERAND (rhs1, 0) != lhs
5416 		    || TREE_CODE (use_lhs) != SSA_NAME
5417 		    || !stmt_interesting_for_vrp (use_stmt)
5418 		    || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
5419 			|| !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
5420 			|| !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
5421 		  continue;
5422 
5423 		/* If there is a change in the value range for any of the
5424 		   REALPART_EXPR/IMAGPART_EXPR immediate uses, return
5425 		   SSA_PROP_INTERESTING.  If there are any REALPART_EXPR
5426 		   or IMAGPART_EXPR immediate uses, but none of them have
5427 		   a change in their value ranges, return
5428 		   SSA_PROP_NOT_INTERESTING.  If there are no
5429 		   {REAL,IMAG}PART_EXPR uses at all,
5430 		   return SSA_PROP_VARYING.  */
5431 		value_range new_vr;
5432 		extract_range_basic (&new_vr, use_stmt);
5433 		const value_range *old_vr = get_value_range (use_lhs);
5434 		if (!old_vr->equal_p (new_vr, /*ignore_equivs=*/false))
5435 		  res = SSA_PROP_INTERESTING;
5436 		else
5437 		  res = SSA_PROP_NOT_INTERESTING;
5438 		new_vr.equiv_clear ();
5439 		if (res == SSA_PROP_INTERESTING)
5440 		  {
5441 		    *output_p = lhs;
5442 		    return res;
5443 		  }
5444 	      }
5445 
5446 	    return res;
5447 	  }
5448 	break;
5449       default:
5450 	break;
5451       }
5452 
5453   /* All other statements produce nothing of interest for VRP, so mark
5454      their outputs varying and prevent further simulation.  */
5455   set_defs_to_varying (stmt);
5456 
5457   return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
5458 }
5459 
5460 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5461    { VR1TYPE, VR0MIN, VR0MAX } and store the result
5462    in { *VR0TYPE, *VR0MIN, *VR0MAX }.  This may not be the smallest
5463    possible such range.  The resulting range is not canonicalized.  */
5464 
5465 static void
5466 union_ranges (enum value_range_kind *vr0type,
5467 	      tree *vr0min, tree *vr0max,
5468 	      enum value_range_kind vr1type,
5469 	      tree vr1min, tree vr1max)
5470 {
5471   bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5472   bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5473 
5474   /* [] is vr0, () is vr1 in the following classification comments.  */
5475   if (mineq && maxeq)
5476     {
5477       /* [(  )] */
5478       if (*vr0type == vr1type)
5479 	/* Nothing to do for equal ranges.  */
5480 	;
5481       else if ((*vr0type == VR_RANGE
5482 		&& vr1type == VR_ANTI_RANGE)
5483 	       || (*vr0type == VR_ANTI_RANGE
5484 		   && vr1type == VR_RANGE))
5485 	{
5486 	  /* For anti-range with range union the result is varying.  */
5487 	  goto give_up;
5488 	}
5489       else
5490 	gcc_unreachable ();
5491     }
5492   else if (operand_less_p (*vr0max, vr1min) == 1
5493 	   || operand_less_p (vr1max, *vr0min) == 1)
5494     {
5495       /* [ ] ( ) or ( ) [ ]
5496 	 If the ranges have an empty intersection, result of the union
5497 	 operation is the anti-range or if both are anti-ranges
5498 	 it covers all.  */
5499       if (*vr0type == VR_ANTI_RANGE
5500 	  && vr1type == VR_ANTI_RANGE)
5501 	goto give_up;
5502       else if (*vr0type == VR_ANTI_RANGE
5503 	       && vr1type == VR_RANGE)
5504 	;
5505       else if (*vr0type == VR_RANGE
5506 	       && vr1type == VR_ANTI_RANGE)
5507 	{
5508 	  *vr0type = vr1type;
5509 	  *vr0min = vr1min;
5510 	  *vr0max = vr1max;
5511 	}
5512       else if (*vr0type == VR_RANGE
5513 	       && vr1type == VR_RANGE)
5514 	{
5515 	  /* The result is the convex hull of both ranges.  */
5516 	  if (operand_less_p (*vr0max, vr1min) == 1)
5517 	    {
5518 	      /* If the result can be an anti-range, create one.  */
5519 	      if (TREE_CODE (*vr0max) == INTEGER_CST
5520 		  && TREE_CODE (vr1min) == INTEGER_CST
5521 		  && vrp_val_is_min (*vr0min)
5522 		  && vrp_val_is_max (vr1max))
5523 		{
5524 		  tree min = int_const_binop (PLUS_EXPR,
5525 					      *vr0max,
5526 					      build_int_cst (TREE_TYPE (*vr0max), 1));
5527 		  tree max = int_const_binop (MINUS_EXPR,
5528 					      vr1min,
5529 					      build_int_cst (TREE_TYPE (vr1min), 1));
5530 		  if (!operand_less_p (max, min))
5531 		    {
5532 		      *vr0type = VR_ANTI_RANGE;
5533 		      *vr0min = min;
5534 		      *vr0max = max;
5535 		    }
5536 		  else
5537 		    *vr0max = vr1max;
5538 		}
5539 	      else
5540 		*vr0max = vr1max;
5541 	    }
5542 	  else
5543 	    {
5544 	      /* If the result can be an anti-range, create one.  */
5545 	      if (TREE_CODE (vr1max) == INTEGER_CST
5546 		  && TREE_CODE (*vr0min) == INTEGER_CST
5547 		  && vrp_val_is_min (vr1min)
5548 		  && vrp_val_is_max (*vr0max))
5549 		{
5550 		  tree min = int_const_binop (PLUS_EXPR,
5551 					      vr1max,
5552 					      build_int_cst (TREE_TYPE (vr1max), 1));
5553 		  tree max = int_const_binop (MINUS_EXPR,
5554 					      *vr0min,
5555 					      build_int_cst (TREE_TYPE (*vr0min), 1));
5556 		  if (!operand_less_p (max, min))
5557 		    {
5558 		      *vr0type = VR_ANTI_RANGE;
5559 		      *vr0min = min;
5560 		      *vr0max = max;
5561 		    }
5562 		  else
5563 		    *vr0min = vr1min;
5564 		}
5565 	      else
5566 		*vr0min = vr1min;
5567 	    }
5568 	}
5569       else
5570 	gcc_unreachable ();
5571     }
5572   else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5573 	   && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5574     {
5575       /* [ (  ) ] or [(  ) ] or [ (  )] */
5576       if (*vr0type == VR_RANGE
5577 	  && vr1type == VR_RANGE)
5578 	;
5579       else if (*vr0type == VR_ANTI_RANGE
5580 	       && vr1type == VR_ANTI_RANGE)
5581 	{
5582 	  *vr0type = vr1type;
5583 	  *vr0min = vr1min;
5584 	  *vr0max = vr1max;
5585 	}
5586       else if (*vr0type == VR_ANTI_RANGE
5587 	       && vr1type == VR_RANGE)
5588 	{
5589 	  /* Arbitrarily choose the right or left gap.  */
5590 	  if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
5591 	    *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5592 				       build_int_cst (TREE_TYPE (vr1min), 1));
5593 	  else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
5594 	    *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5595 				       build_int_cst (TREE_TYPE (vr1max), 1));
5596 	  else
5597 	    goto give_up;
5598 	}
5599       else if (*vr0type == VR_RANGE
5600 	       && vr1type == VR_ANTI_RANGE)
5601 	/* The result covers everything.  */
5602 	goto give_up;
5603       else
5604 	gcc_unreachable ();
5605     }
5606   else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5607 	   && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5608     {
5609       /* ( [  ] ) or ([  ] ) or ( [  ]) */
5610       if (*vr0type == VR_RANGE
5611 	  && vr1type == VR_RANGE)
5612 	{
5613 	  *vr0type = vr1type;
5614 	  *vr0min = vr1min;
5615 	  *vr0max = vr1max;
5616 	}
5617       else if (*vr0type == VR_ANTI_RANGE
5618 	       && vr1type == VR_ANTI_RANGE)
5619 	;
5620       else if (*vr0type == VR_RANGE
5621 	       && vr1type == VR_ANTI_RANGE)
5622 	{
5623 	  *vr0type = VR_ANTI_RANGE;
5624 	  if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
5625 	    {
5626 	      *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5627 					 build_int_cst (TREE_TYPE (*vr0min), 1));
5628 	      *vr0min = vr1min;
5629 	    }
5630 	  else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
5631 	    {
5632 	      *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5633 					 build_int_cst (TREE_TYPE (*vr0max), 1));
5634 	      *vr0max = vr1max;
5635 	    }
5636 	  else
5637 	    goto give_up;
5638 	}
5639       else if (*vr0type == VR_ANTI_RANGE
5640 	       && vr1type == VR_RANGE)
5641 	/* The result covers everything.  */
5642 	goto give_up;
5643       else
5644 	gcc_unreachable ();
5645     }
5646   else if ((operand_less_p (vr1min, *vr0max) == 1
5647 	    || operand_equal_p (vr1min, *vr0max, 0))
5648 	   && operand_less_p (*vr0min, vr1min) == 1
5649 	   && operand_less_p (*vr0max, vr1max) == 1)
5650     {
5651       /* [  (  ]  ) or [   ](   ) */
5652       if (*vr0type == VR_RANGE
5653 	  && vr1type == VR_RANGE)
5654 	*vr0max = vr1max;
5655       else if (*vr0type == VR_ANTI_RANGE
5656 	       && vr1type == VR_ANTI_RANGE)
5657 	*vr0min = vr1min;
5658       else if (*vr0type == VR_ANTI_RANGE
5659 	       && vr1type == VR_RANGE)
5660 	{
5661 	  if (TREE_CODE (vr1min) == INTEGER_CST)
5662 	    *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5663 				       build_int_cst (TREE_TYPE (vr1min), 1));
5664 	  else
5665 	    goto give_up;
5666 	}
5667       else if (*vr0type == VR_RANGE
5668 	       && vr1type == VR_ANTI_RANGE)
5669 	{
5670 	  if (TREE_CODE (*vr0max) == INTEGER_CST)
5671 	    {
5672 	      *vr0type = vr1type;
5673 	      *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5674 					 build_int_cst (TREE_TYPE (*vr0max), 1));
5675 	      *vr0max = vr1max;
5676 	    }
5677 	  else
5678 	    goto give_up;
5679 	}
5680       else
5681 	gcc_unreachable ();
5682     }
5683   else if ((operand_less_p (*vr0min, vr1max) == 1
5684 	    || operand_equal_p (*vr0min, vr1max, 0))
5685 	   && operand_less_p (vr1min, *vr0min) == 1
5686 	   && operand_less_p (vr1max, *vr0max) == 1)
5687     {
5688       /* (  [  )  ] or (   )[   ] */
5689       if (*vr0type == VR_RANGE
5690 	  && vr1type == VR_RANGE)
5691 	*vr0min = vr1min;
5692       else if (*vr0type == VR_ANTI_RANGE
5693 	       && vr1type == VR_ANTI_RANGE)
5694 	*vr0max = vr1max;
5695       else if (*vr0type == VR_ANTI_RANGE
5696 	       && vr1type == VR_RANGE)
5697 	{
5698 	  if (TREE_CODE (vr1max) == INTEGER_CST)
5699 	    *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5700 				       build_int_cst (TREE_TYPE (vr1max), 1));
5701 	  else
5702 	    goto give_up;
5703 	}
5704       else if (*vr0type == VR_RANGE
5705 	       && vr1type == VR_ANTI_RANGE)
5706 	{
5707 	  if (TREE_CODE (*vr0min) == INTEGER_CST)
5708 	    {
5709 	      *vr0type = vr1type;
5710 	      *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5711 					 build_int_cst (TREE_TYPE (*vr0min), 1));
5712 	      *vr0min = vr1min;
5713 	    }
5714 	  else
5715 	    goto give_up;
5716 	}
5717       else
5718 	gcc_unreachable ();
5719     }
5720   else
5721     goto give_up;
5722 
5723   return;
5724 
5725 give_up:
5726   *vr0type = VR_VARYING;
5727   *vr0min = NULL_TREE;
5728   *vr0max = NULL_TREE;
5729 }
5730 
5731 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5732    { VR1TYPE, VR0MIN, VR0MAX } and store the result
5733    in { *VR0TYPE, *VR0MIN, *VR0MAX }.  This may not be the smallest
5734    possible such range.  The resulting range is not canonicalized.  */
5735 
5736 static void
5737 intersect_ranges (enum value_range_kind *vr0type,
5738 		  tree *vr0min, tree *vr0max,
5739 		  enum value_range_kind vr1type,
5740 		  tree vr1min, tree vr1max)
5741 {
5742   bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5743   bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5744 
5745   /* [] is vr0, () is vr1 in the following classification comments.  */
5746   if (mineq && maxeq)
5747     {
5748       /* [(  )] */
5749       if (*vr0type == vr1type)
5750 	/* Nothing to do for equal ranges.  */
5751 	;
5752       else if ((*vr0type == VR_RANGE
5753 		&& vr1type == VR_ANTI_RANGE)
5754 	       || (*vr0type == VR_ANTI_RANGE
5755 		   && vr1type == VR_RANGE))
5756 	{
5757 	  /* For anti-range with range intersection the result is empty.  */
5758 	  *vr0type = VR_UNDEFINED;
5759 	  *vr0min = NULL_TREE;
5760 	  *vr0max = NULL_TREE;
5761 	}
5762       else
5763 	gcc_unreachable ();
5764     }
5765   else if (operand_less_p (*vr0max, vr1min) == 1
5766 	   || operand_less_p (vr1max, *vr0min) == 1)
5767     {
5768       /* [ ] ( ) or ( ) [ ]
5769 	 If the ranges have an empty intersection, the result of the
5770 	 intersect operation is the range for intersecting an
5771 	 anti-range with a range or empty when intersecting two ranges.  */
5772       if (*vr0type == VR_RANGE
5773 	  && vr1type == VR_ANTI_RANGE)
5774 	;
5775       else if (*vr0type == VR_ANTI_RANGE
5776 	       && vr1type == VR_RANGE)
5777 	{
5778 	  *vr0type = vr1type;
5779 	  *vr0min = vr1min;
5780 	  *vr0max = vr1max;
5781 	}
5782       else if (*vr0type == VR_RANGE
5783 	       && vr1type == VR_RANGE)
5784 	{
5785 	  *vr0type = VR_UNDEFINED;
5786 	  *vr0min = NULL_TREE;
5787 	  *vr0max = NULL_TREE;
5788 	}
5789       else if (*vr0type == VR_ANTI_RANGE
5790 	       && vr1type == VR_ANTI_RANGE)
5791 	{
5792 	  /* If the anti-ranges are adjacent to each other merge them.  */
5793 	  if (TREE_CODE (*vr0max) == INTEGER_CST
5794 	      && TREE_CODE (vr1min) == INTEGER_CST
5795 	      && operand_less_p (*vr0max, vr1min) == 1
5796 	      && integer_onep (int_const_binop (MINUS_EXPR,
5797 						vr1min, *vr0max)))
5798 	    *vr0max = vr1max;
5799 	  else if (TREE_CODE (vr1max) == INTEGER_CST
5800 		   && TREE_CODE (*vr0min) == INTEGER_CST
5801 		   && operand_less_p (vr1max, *vr0min) == 1
5802 		   && integer_onep (int_const_binop (MINUS_EXPR,
5803 						     *vr0min, vr1max)))
5804 	    *vr0min = vr1min;
5805 	  /* Else arbitrarily take VR0.  */
5806 	}
5807     }
5808   else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5809 	   && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5810     {
5811       /* [ (  ) ] or [(  ) ] or [ (  )] */
5812       if (*vr0type == VR_RANGE
5813 	  && vr1type == VR_RANGE)
5814 	{
5815 	  /* If both are ranges the result is the inner one.  */
5816 	  *vr0type = vr1type;
5817 	  *vr0min = vr1min;
5818 	  *vr0max = vr1max;
5819 	}
5820       else if (*vr0type == VR_RANGE
5821 	       && vr1type == VR_ANTI_RANGE)
5822 	{
5823 	  /* Choose the right gap if the left one is empty.  */
5824 	  if (mineq)
5825 	    {
5826 	      if (TREE_CODE (vr1max) != INTEGER_CST)
5827 		*vr0min = vr1max;
5828 	      else if (TYPE_PRECISION (TREE_TYPE (vr1max)) == 1
5829 		       && !TYPE_UNSIGNED (TREE_TYPE (vr1max)))
5830 		*vr0min
5831 		  = int_const_binop (MINUS_EXPR, vr1max,
5832 				     build_int_cst (TREE_TYPE (vr1max), -1));
5833 	      else
5834 		*vr0min
5835 		  = int_const_binop (PLUS_EXPR, vr1max,
5836 				     build_int_cst (TREE_TYPE (vr1max), 1));
5837 	    }
5838 	  /* Choose the left gap if the right one is empty.  */
5839 	  else if (maxeq)
5840 	    {
5841 	      if (TREE_CODE (vr1min) != INTEGER_CST)
5842 		*vr0max = vr1min;
5843 	      else if (TYPE_PRECISION (TREE_TYPE (vr1min)) == 1
5844 		       && !TYPE_UNSIGNED (TREE_TYPE (vr1min)))
5845 		*vr0max
5846 		  = int_const_binop (PLUS_EXPR, vr1min,
5847 				     build_int_cst (TREE_TYPE (vr1min), -1));
5848 	      else
5849 		*vr0max
5850 		  = int_const_binop (MINUS_EXPR, vr1min,
5851 				     build_int_cst (TREE_TYPE (vr1min), 1));
5852 	    }
5853 	  /* Choose the anti-range if the range is effectively varying.  */
5854 	  else if (vrp_val_is_min (*vr0min)
5855 		   && vrp_val_is_max (*vr0max))
5856 	    {
5857 	      *vr0type = vr1type;
5858 	      *vr0min = vr1min;
5859 	      *vr0max = vr1max;
5860 	    }
5861 	  /* Else choose the range.  */
5862 	}
5863       else if (*vr0type == VR_ANTI_RANGE
5864 	       && vr1type == VR_ANTI_RANGE)
5865 	/* If both are anti-ranges the result is the outer one.  */
5866 	;
5867       else if (*vr0type == VR_ANTI_RANGE
5868 	       && vr1type == VR_RANGE)
5869 	{
5870 	  /* The intersection is empty.  */
5871 	  *vr0type = VR_UNDEFINED;
5872 	  *vr0min = NULL_TREE;
5873 	  *vr0max = NULL_TREE;
5874 	}
5875       else
5876 	gcc_unreachable ();
5877     }
5878   else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5879 	   && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5880     {
5881       /* ( [  ] ) or ([  ] ) or ( [  ]) */
5882       if (*vr0type == VR_RANGE
5883 	  && vr1type == VR_RANGE)
5884 	/* Choose the inner range.  */
5885 	;
5886       else if (*vr0type == VR_ANTI_RANGE
5887 	       && vr1type == VR_RANGE)
5888 	{
5889 	  /* Choose the right gap if the left is empty.  */
5890 	  if (mineq)
5891 	    {
5892 	      *vr0type = VR_RANGE;
5893 	      if (TREE_CODE (*vr0max) != INTEGER_CST)
5894 		*vr0min = *vr0max;
5895 	      else if (TYPE_PRECISION (TREE_TYPE (*vr0max)) == 1
5896 		       && !TYPE_UNSIGNED (TREE_TYPE (*vr0max)))
5897 		*vr0min
5898 		  = int_const_binop (MINUS_EXPR, *vr0max,
5899 				     build_int_cst (TREE_TYPE (*vr0max), -1));
5900 	      else
5901 		*vr0min
5902 		  = int_const_binop (PLUS_EXPR, *vr0max,
5903 				     build_int_cst (TREE_TYPE (*vr0max), 1));
5904 	      *vr0max = vr1max;
5905 	    }
5906 	  /* Choose the left gap if the right is empty.  */
5907 	  else if (maxeq)
5908 	    {
5909 	      *vr0type = VR_RANGE;
5910 	      if (TREE_CODE (*vr0min) != INTEGER_CST)
5911 		*vr0max = *vr0min;
5912 	      else if (TYPE_PRECISION (TREE_TYPE (*vr0min)) == 1
5913 		       && !TYPE_UNSIGNED (TREE_TYPE (*vr0min)))
5914 		*vr0max
5915 		  = int_const_binop (PLUS_EXPR, *vr0min,
5916 				     build_int_cst (TREE_TYPE (*vr0min), -1));
5917 	      else
5918 		*vr0max
5919 		  = int_const_binop (MINUS_EXPR, *vr0min,
5920 				     build_int_cst (TREE_TYPE (*vr0min), 1));
5921 	      *vr0min = vr1min;
5922 	    }
5923 	  /* Choose the anti-range if the range is effectively varying.  */
5924 	  else if (vrp_val_is_min (vr1min)
5925 		   && vrp_val_is_max (vr1max))
5926 	    ;
5927 	  /* Choose the anti-range if it is ~[0,0], that range is special
5928 	     enough to special case when vr1's range is relatively wide.
5929 	     At least for types bigger than int - this covers pointers
5930 	     and arguments to functions like ctz.  */
5931 	  else if (*vr0min == *vr0max
5932 		   && integer_zerop (*vr0min)
5933 		   && ((TYPE_PRECISION (TREE_TYPE (*vr0min))
5934 			>= TYPE_PRECISION (integer_type_node))
5935 		       || POINTER_TYPE_P (TREE_TYPE (*vr0min)))
5936 		   && TREE_CODE (vr1max) == INTEGER_CST
5937 		   && TREE_CODE (vr1min) == INTEGER_CST
5938 		   && (wi::clz (wi::to_wide (vr1max) - wi::to_wide (vr1min))
5939 		       < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
5940 	    ;
5941 	  /* Else choose the range.  */
5942 	  else
5943 	    {
5944 	      *vr0type = vr1type;
5945 	      *vr0min = vr1min;
5946 	      *vr0max = vr1max;
5947 	    }
5948 	}
5949       else if (*vr0type == VR_ANTI_RANGE
5950 	       && vr1type == VR_ANTI_RANGE)
5951 	{
5952 	  /* If both are anti-ranges the result is the outer one.  */
5953 	  *vr0type = vr1type;
5954 	  *vr0min = vr1min;
5955 	  *vr0max = vr1max;
5956 	}
5957       else if (vr1type == VR_ANTI_RANGE
5958 	       && *vr0type == VR_RANGE)
5959 	{
5960 	  /* The intersection is empty.  */
5961 	  *vr0type = VR_UNDEFINED;
5962 	  *vr0min = NULL_TREE;
5963 	  *vr0max = NULL_TREE;
5964 	}
5965       else
5966 	gcc_unreachable ();
5967     }
5968   else if ((operand_less_p (vr1min, *vr0max) == 1
5969 	    || operand_equal_p (vr1min, *vr0max, 0))
5970 	   && operand_less_p (*vr0min, vr1min) == 1)
5971     {
5972       /* [  (  ]  ) or [  ](  ) */
5973       if (*vr0type == VR_ANTI_RANGE
5974 	  && vr1type == VR_ANTI_RANGE)
5975 	*vr0max = vr1max;
5976       else if (*vr0type == VR_RANGE
5977 	       && vr1type == VR_RANGE)
5978 	*vr0min = vr1min;
5979       else if (*vr0type == VR_RANGE
5980 	       && vr1type == VR_ANTI_RANGE)
5981 	{
5982 	  if (TREE_CODE (vr1min) == INTEGER_CST)
5983 	    *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5984 				       build_int_cst (TREE_TYPE (vr1min), 1));
5985 	  else
5986 	    *vr0max = vr1min;
5987 	}
5988       else if (*vr0type == VR_ANTI_RANGE
5989 	       && vr1type == VR_RANGE)
5990 	{
5991 	  *vr0type = VR_RANGE;
5992 	  if (TREE_CODE (*vr0max) == INTEGER_CST)
5993 	    *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5994 				       build_int_cst (TREE_TYPE (*vr0max), 1));
5995 	  else
5996 	    *vr0min = *vr0max;
5997 	  *vr0max = vr1max;
5998 	}
5999       else
6000 	gcc_unreachable ();
6001     }
6002   else if ((operand_less_p (*vr0min, vr1max) == 1
6003 	    || operand_equal_p (*vr0min, vr1max, 0))
6004 	   && operand_less_p (vr1min, *vr0min) == 1)
6005     {
6006       /* (  [  )  ] or (  )[  ] */
6007       if (*vr0type == VR_ANTI_RANGE
6008 	  && vr1type == VR_ANTI_RANGE)
6009 	*vr0min = vr1min;
6010       else if (*vr0type == VR_RANGE
6011 	       && vr1type == VR_RANGE)
6012 	*vr0max = vr1max;
6013       else if (*vr0type == VR_RANGE
6014 	       && vr1type == VR_ANTI_RANGE)
6015 	{
6016 	  if (TREE_CODE (vr1max) == INTEGER_CST)
6017 	    *vr0min = int_const_binop (PLUS_EXPR, vr1max,
6018 				       build_int_cst (TREE_TYPE (vr1max), 1));
6019 	  else
6020 	    *vr0min = vr1max;
6021 	}
6022       else if (*vr0type == VR_ANTI_RANGE
6023 	       && vr1type == VR_RANGE)
6024 	{
6025 	  *vr0type = VR_RANGE;
6026 	  if (TREE_CODE (*vr0min) == INTEGER_CST)
6027 	    *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
6028 				       build_int_cst (TREE_TYPE (*vr0min), 1));
6029 	  else
6030 	    *vr0max = *vr0min;
6031 	  *vr0min = vr1min;
6032 	}
6033       else
6034 	gcc_unreachable ();
6035     }
6036 
6037   /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
6038      result for the intersection.  That's always a conservative
6039      correct estimate unless VR1 is a constant singleton range
6040      in which case we choose that.  */
6041   if (vr1type == VR_RANGE
6042       && is_gimple_min_invariant (vr1min)
6043       && vrp_operand_equal_p (vr1min, vr1max))
6044     {
6045       *vr0type = vr1type;
6046       *vr0min = vr1min;
6047       *vr0max = vr1max;
6048     }
6049 }
6050 
6051 
6052 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
6053    in *VR0.  This may not be the smallest possible such range.  */
6054 
6055 void
6056 value_range::intersect_helper (value_range *vr0, const value_range *vr1)
6057 {
6058   /* If either range is VR_VARYING the other one wins.  */
6059   if (vr1->varying_p ())
6060     return;
6061   if (vr0->varying_p ())
6062     {
6063       vr0->deep_copy (vr1);
6064       return;
6065     }
6066 
6067   /* When either range is VR_UNDEFINED the resulting range is
6068      VR_UNDEFINED, too.  */
6069   if (vr0->undefined_p ())
6070     return;
6071   if (vr1->undefined_p ())
6072     {
6073       vr0->set_undefined ();
6074       return;
6075     }
6076 
6077   value_range_kind vr0type = vr0->kind ();
6078   tree vr0min = vr0->min ();
6079   tree vr0max = vr0->max ();
6080   intersect_ranges (&vr0type, &vr0min, &vr0max,
6081 		    vr1->kind (), vr1->min (), vr1->max ());
6082   /* Make sure to canonicalize the result though as the inversion of a
6083      VR_RANGE can still be a VR_RANGE.  Work on a temporary so we can
6084      fall back to vr0 when this turns things to varying.  */
6085   value_range tem;
6086   tem.set_and_canonicalize (vr0type, vr0min, vr0max);
6087   /* If that failed, use the saved original VR0.  */
6088   if (tem.varying_p ())
6089     return;
6090   vr0->update (tem.kind (), tem.min (), tem.max ());
6091 
6092   /* If the result is VR_UNDEFINED there is no need to mess with
6093      the equivalencies.  */
6094   if (vr0->undefined_p ())
6095     return;
6096 
6097   /* The resulting set of equivalences for range intersection is the union of
6098      the two sets.  */
6099   if (vr0->m_equiv && vr1->m_equiv && vr0->m_equiv != vr1->m_equiv)
6100     bitmap_ior_into (vr0->m_equiv, vr1->m_equiv);
6101   else if (vr1->m_equiv && !vr0->m_equiv)
6102     {
6103       /* All equivalence bitmaps are allocated from the same obstack.  So
6104 	 we can use the obstack associated with VR to allocate vr0->equiv.  */
6105       vr0->m_equiv = BITMAP_ALLOC (vr1->m_equiv->obstack);
6106       bitmap_copy (m_equiv, vr1->m_equiv);
6107     }
6108 }
6109 
6110 void
6111 value_range::intersect (const value_range *other)
6112 {
6113   if (dump_file && (dump_flags & TDF_DETAILS))
6114     {
6115       fprintf (dump_file, "Intersecting\n  ");
6116       dump_value_range (dump_file, this);
6117       fprintf (dump_file, "\nand\n  ");
6118       dump_value_range (dump_file, other);
6119       fprintf (dump_file, "\n");
6120     }
6121   intersect_helper (this, other);
6122   if (dump_file && (dump_flags & TDF_DETAILS))
6123     {
6124       fprintf (dump_file, "to\n  ");
6125       dump_value_range (dump_file, this);
6126       fprintf (dump_file, "\n");
6127     }
6128 }
6129 
6130 /* Helper for meet operation for value ranges.  Given two value ranges VR0 and
6131    VR1, return a range that contains both VR0 and VR1.  This may not be the
6132    smallest possible such range.  */
6133 
6134 value_range_base
6135 value_range_base::union_helper (const value_range_base *vr0,
6136 				const value_range_base *vr1)
6137 {
6138   /* VR0 has the resulting range if VR1 is undefined or VR0 is varying.  */
6139   if (vr1->undefined_p ()
6140       || vr0->varying_p ())
6141     return *vr0;
6142 
6143   /* VR1 has the resulting range if VR0 is undefined or VR1 is varying.  */
6144   if (vr0->undefined_p ()
6145       || vr1->varying_p ())
6146     return *vr1;
6147 
6148   value_range_kind vr0type = vr0->kind ();
6149   tree vr0min = vr0->min ();
6150   tree vr0max = vr0->max ();
6151   union_ranges (&vr0type, &vr0min, &vr0max,
6152 		vr1->kind (), vr1->min (), vr1->max ());
6153 
6154   /* Work on a temporary so we can still use vr0 when union returns varying.  */
6155   value_range tem;
6156   tem.set_and_canonicalize (vr0type, vr0min, vr0max);
6157 
6158   /* Failed to find an efficient meet.  Before giving up and setting
6159      the result to VARYING, see if we can at least derive a useful
6160      anti-range.  */
6161   if (tem.varying_p ()
6162       && range_includes_zero_p (vr0) == 0
6163       && range_includes_zero_p (vr1) == 0)
6164     {
6165       tem.set_nonnull (vr0->type ());
6166       return tem;
6167     }
6168 
6169   return tem;
6170 }
6171 
6172 
6173 /* Meet operation for value ranges.  Given two value ranges VR0 and
6174    VR1, store in VR0 a range that contains both VR0 and VR1.  This
6175    may not be the smallest possible such range.  */
6176 
6177 void
6178 value_range_base::union_ (const value_range_base *other)
6179 {
6180   if (dump_file && (dump_flags & TDF_DETAILS))
6181     {
6182       fprintf (dump_file, "Meeting\n  ");
6183       dump_value_range (dump_file, this);
6184       fprintf (dump_file, "\nand\n  ");
6185       dump_value_range (dump_file, other);
6186       fprintf (dump_file, "\n");
6187     }
6188 
6189   *this = union_helper (this, other);
6190 
6191   if (dump_file && (dump_flags & TDF_DETAILS))
6192     {
6193       fprintf (dump_file, "to\n  ");
6194       dump_value_range (dump_file, this);
6195       fprintf (dump_file, "\n");
6196     }
6197 }
6198 
6199 void
6200 value_range::union_ (const value_range *other)
6201 {
6202   if (dump_file && (dump_flags & TDF_DETAILS))
6203     {
6204       fprintf (dump_file, "Meeting\n  ");
6205       dump_value_range (dump_file, this);
6206       fprintf (dump_file, "\nand\n  ");
6207       dump_value_range (dump_file, other);
6208       fprintf (dump_file, "\n");
6209     }
6210 
6211   /* If THIS is undefined we want to pick up equivalences from OTHER.
6212      Just special-case this here rather than trying to fixup after the fact.  */
6213   if (this->undefined_p ())
6214     this->deep_copy (other);
6215   else
6216     {
6217       value_range_base tem = union_helper (this, other);
6218       this->update (tem.kind (), tem.min (), tem.max ());
6219 
6220       /* The resulting set of equivalences is always the intersection of
6221 	 the two sets.  */
6222       if (this->m_equiv && other->m_equiv && this->m_equiv != other->m_equiv)
6223 	bitmap_and_into (this->m_equiv, other->m_equiv);
6224       else if (this->m_equiv && !other->m_equiv)
6225 	bitmap_clear (this->m_equiv);
6226     }
6227 
6228   if (dump_file && (dump_flags & TDF_DETAILS))
6229     {
6230       fprintf (dump_file, "to\n  ");
6231       dump_value_range (dump_file, this);
6232       fprintf (dump_file, "\n");
6233     }
6234 }
6235 
6236 /* Visit all arguments for PHI node PHI that flow through executable
6237    edges.  If a valid value range can be derived from all the incoming
6238    value ranges, set a new range for the LHS of PHI.  */
6239 
6240 enum ssa_prop_result
6241 vrp_prop::visit_phi (gphi *phi)
6242 {
6243   tree lhs = PHI_RESULT (phi);
6244   value_range vr_result;
6245   extract_range_from_phi_node (phi, &vr_result);
6246   if (update_value_range (lhs, &vr_result))
6247     {
6248       if (dump_file && (dump_flags & TDF_DETAILS))
6249 	{
6250 	  fprintf (dump_file, "Found new range for ");
6251 	  print_generic_expr (dump_file, lhs);
6252 	  fprintf (dump_file, ": ");
6253 	  dump_value_range (dump_file, &vr_result);
6254 	  fprintf (dump_file, "\n");
6255 	}
6256 
6257       if (vr_result.varying_p ())
6258 	return SSA_PROP_VARYING;
6259 
6260       return SSA_PROP_INTERESTING;
6261     }
6262 
6263   /* Nothing changed, don't add outgoing edges.  */
6264   return SSA_PROP_NOT_INTERESTING;
6265 }
6266 
6267 class vrp_folder : public substitute_and_fold_engine
6268 {
6269  public:
6270   tree get_value (tree) FINAL OVERRIDE;
6271   bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
6272   bool fold_predicate_in (gimple_stmt_iterator *);
6273 
6274   class vr_values *vr_values;
6275 
6276   /* Delegators.  */
6277   tree vrp_evaluate_conditional (tree_code code, tree op0,
6278 				 tree op1, gimple *stmt)
6279     { return vr_values->vrp_evaluate_conditional (code, op0, op1, stmt); }
6280   bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
6281     { return vr_values->simplify_stmt_using_ranges (gsi); }
6282  tree op_with_constant_singleton_value_range (tree op)
6283     { return vr_values->op_with_constant_singleton_value_range (op); }
6284 };
6285 
6286 /* If the statement pointed by SI has a predicate whose value can be
6287    computed using the value range information computed by VRP, compute
6288    its value and return true.  Otherwise, return false.  */
6289 
6290 bool
6291 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
6292 {
6293   bool assignment_p = false;
6294   tree val;
6295   gimple *stmt = gsi_stmt (*si);
6296 
6297   if (is_gimple_assign (stmt)
6298       && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
6299     {
6300       assignment_p = true;
6301       val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
6302 				      gimple_assign_rhs1 (stmt),
6303 				      gimple_assign_rhs2 (stmt),
6304 				      stmt);
6305     }
6306   else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6307     val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6308 				    gimple_cond_lhs (cond_stmt),
6309 				    gimple_cond_rhs (cond_stmt),
6310 				    stmt);
6311   else
6312     return false;
6313 
6314   if (val)
6315     {
6316       if (assignment_p)
6317         val = fold_convert (gimple_expr_type (stmt), val);
6318 
6319       if (dump_file)
6320 	{
6321 	  fprintf (dump_file, "Folding predicate ");
6322 	  print_gimple_expr (dump_file, stmt, 0);
6323 	  fprintf (dump_file, " to ");
6324 	  print_generic_expr (dump_file, val);
6325 	  fprintf (dump_file, "\n");
6326 	}
6327 
6328       if (is_gimple_assign (stmt))
6329 	gimple_assign_set_rhs_from_tree (si, val);
6330       else
6331 	{
6332 	  gcc_assert (gimple_code (stmt) == GIMPLE_COND);
6333 	  gcond *cond_stmt = as_a <gcond *> (stmt);
6334 	  if (integer_zerop (val))
6335 	    gimple_cond_make_false (cond_stmt);
6336 	  else if (integer_onep (val))
6337 	    gimple_cond_make_true (cond_stmt);
6338 	  else
6339 	    gcc_unreachable ();
6340 	}
6341 
6342       return true;
6343     }
6344 
6345   return false;
6346 }
6347 
6348 /* Callback for substitute_and_fold folding the stmt at *SI.  */
6349 
6350 bool
6351 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
6352 {
6353   if (fold_predicate_in (si))
6354     return true;
6355 
6356   return simplify_stmt_using_ranges (si);
6357 }
6358 
6359 /* If OP has a value range with a single constant value return that,
6360    otherwise return NULL_TREE.  This returns OP itself if OP is a
6361    constant.
6362 
6363    Implemented as a pure wrapper right now, but this will change.  */
6364 
6365 tree
6366 vrp_folder::get_value (tree op)
6367 {
6368   return op_with_constant_singleton_value_range (op);
6369 }
6370 
6371 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
6372    argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
6373    BB.  If no such ASSERT_EXPR is found, return OP.  */
6374 
6375 static tree
6376 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
6377 {
6378   imm_use_iterator imm_iter;
6379   gimple *use_stmt;
6380   use_operand_p use_p;
6381 
6382   if (TREE_CODE (op) == SSA_NAME)
6383     {
6384       FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
6385 	{
6386 	  use_stmt = USE_STMT (use_p);
6387 	  if (use_stmt != stmt
6388 	      && gimple_assign_single_p (use_stmt)
6389 	      && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
6390 	      && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
6391 	      && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
6392 	    return gimple_assign_lhs (use_stmt);
6393 	}
6394     }
6395   return op;
6396 }
6397 
6398 /* A hack.  */
6399 static class vr_values *x_vr_values;
6400 
6401 /* A trivial wrapper so that we can present the generic jump threading
6402    code with a simple API for simplifying statements.  STMT is the
6403    statement we want to simplify, WITHIN_STMT provides the location
6404    for any overflow warnings.  */
6405 
6406 static tree
6407 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
6408     class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED,
6409     basic_block bb)
6410 {
6411   /* First see if the conditional is in the hash table.  */
6412   tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
6413   if (cached_lhs && is_gimple_min_invariant (cached_lhs))
6414     return cached_lhs;
6415 
6416   vr_values *vr_values = x_vr_values;
6417   if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6418     {
6419       tree op0 = gimple_cond_lhs (cond_stmt);
6420       op0 = lhs_of_dominating_assert (op0, bb, stmt);
6421 
6422       tree op1 = gimple_cond_rhs (cond_stmt);
6423       op1 = lhs_of_dominating_assert (op1, bb, stmt);
6424 
6425       return vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6426 						  op0, op1, within_stmt);
6427     }
6428 
6429   /* We simplify a switch statement by trying to determine which case label
6430      will be taken.  If we are successful then we return the corresponding
6431      CASE_LABEL_EXPR.  */
6432   if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
6433     {
6434       tree op = gimple_switch_index (switch_stmt);
6435       if (TREE_CODE (op) != SSA_NAME)
6436 	return NULL_TREE;
6437 
6438       op = lhs_of_dominating_assert (op, bb, stmt);
6439 
6440       const value_range *vr = vr_values->get_value_range (op);
6441       if (vr->undefined_p ()
6442 	  || vr->varying_p ()
6443 	  || vr->symbolic_p ())
6444 	return NULL_TREE;
6445 
6446       if (vr->kind () == VR_RANGE)
6447 	{
6448 	  size_t i, j;
6449 	  /* Get the range of labels that contain a part of the operand's
6450 	     value range.  */
6451 	  find_case_label_range (switch_stmt, vr->min (), vr->max (), &i, &j);
6452 
6453 	  /* Is there only one such label?  */
6454 	  if (i == j)
6455 	    {
6456 	      tree label = gimple_switch_label (switch_stmt, i);
6457 
6458 	      /* The i'th label will be taken only if the value range of the
6459 		 operand is entirely within the bounds of this label.  */
6460 	      if (CASE_HIGH (label) != NULL_TREE
6461 		  ? (tree_int_cst_compare (CASE_LOW (label), vr->min ()) <= 0
6462 		     && tree_int_cst_compare (CASE_HIGH (label),
6463 					      vr->max ()) >= 0)
6464 		  : (tree_int_cst_equal (CASE_LOW (label), vr->min ())
6465 		     && tree_int_cst_equal (vr->min (), vr->max ())))
6466 		return label;
6467 	    }
6468 
6469 	  /* If there are no such labels then the default label will be
6470 	     taken.  */
6471 	  if (i > j)
6472 	    return gimple_switch_label (switch_stmt, 0);
6473 	}
6474 
6475       if (vr->kind () == VR_ANTI_RANGE)
6476 	{
6477 	  unsigned n = gimple_switch_num_labels (switch_stmt);
6478 	  tree min_label = gimple_switch_label (switch_stmt, 1);
6479 	  tree max_label = gimple_switch_label (switch_stmt, n - 1);
6480 
6481 	  /* The default label will be taken only if the anti-range of the
6482 	     operand is entirely outside the bounds of all the (non-default)
6483 	     case labels.  */
6484 	  if (tree_int_cst_compare (vr->min (), CASE_LOW (min_label)) <= 0
6485 	      && (CASE_HIGH (max_label) != NULL_TREE
6486 		  ? tree_int_cst_compare (vr->max (),
6487 					  CASE_HIGH (max_label)) >= 0
6488 		  : tree_int_cst_compare (vr->max (),
6489 					  CASE_LOW (max_label)) >= 0))
6490 	  return gimple_switch_label (switch_stmt, 0);
6491 	}
6492 
6493       return NULL_TREE;
6494     }
6495 
6496   if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
6497     {
6498       tree lhs = gimple_assign_lhs (assign_stmt);
6499       if (TREE_CODE (lhs) == SSA_NAME
6500 	  && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6501 	      || POINTER_TYPE_P (TREE_TYPE (lhs)))
6502 	  && stmt_interesting_for_vrp (stmt))
6503 	{
6504 	  edge dummy_e;
6505 	  tree dummy_tree;
6506 	  value_range new_vr;
6507 	  vr_values->extract_range_from_stmt (stmt, &dummy_e,
6508 					      &dummy_tree, &new_vr);
6509 	  tree singleton;
6510 	  if (new_vr.singleton_p (&singleton))
6511 	    return singleton;
6512 	}
6513     }
6514 
6515   return NULL_TREE;
6516 }
6517 
6518 class vrp_dom_walker : public dom_walker
6519 {
6520 public:
6521   vrp_dom_walker (cdi_direction direction,
6522 		  class const_and_copies *const_and_copies,
6523 		  class avail_exprs_stack *avail_exprs_stack)
6524     : dom_walker (direction, REACHABLE_BLOCKS),
6525       m_const_and_copies (const_and_copies),
6526       m_avail_exprs_stack (avail_exprs_stack),
6527       m_dummy_cond (NULL) {}
6528 
6529   virtual edge before_dom_children (basic_block);
6530   virtual void after_dom_children (basic_block);
6531 
6532   class vr_values *vr_values;
6533 
6534 private:
6535   class const_and_copies *m_const_and_copies;
6536   class avail_exprs_stack *m_avail_exprs_stack;
6537 
6538   gcond *m_dummy_cond;
6539 
6540 };
6541 
6542 /* Called before processing dominator children of BB.  We want to look
6543    at ASSERT_EXPRs and record information from them in the appropriate
6544    tables.
6545 
6546    We could look at other statements here.  It's not seen as likely
6547    to significantly increase the jump threads we discover.  */
6548 
6549 edge
6550 vrp_dom_walker::before_dom_children (basic_block bb)
6551 {
6552   gimple_stmt_iterator gsi;
6553 
6554   m_avail_exprs_stack->push_marker ();
6555   m_const_and_copies->push_marker ();
6556   for (gsi = gsi_start_nondebug_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
6557     {
6558       gimple *stmt = gsi_stmt (gsi);
6559       if (gimple_assign_single_p (stmt)
6560          && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
6561 	{
6562 	  tree rhs1 = gimple_assign_rhs1 (stmt);
6563 	  tree cond = TREE_OPERAND (rhs1, 1);
6564 	  tree inverted = invert_truthvalue (cond);
6565 	  vec<cond_equivalence> p;
6566 	  p.create (3);
6567 	  record_conditions (&p, cond, inverted);
6568 	  for (unsigned int i = 0; i < p.length (); i++)
6569 	    m_avail_exprs_stack->record_cond (&p[i]);
6570 
6571 	  tree lhs = gimple_assign_lhs (stmt);
6572 	  m_const_and_copies->record_const_or_copy (lhs,
6573 						    TREE_OPERAND (rhs1, 0));
6574 	  p.release ();
6575 	  continue;
6576 	}
6577       break;
6578     }
6579   return NULL;
6580 }
6581 
6582 /* Called after processing dominator children of BB.  This is where we
6583    actually call into the threader.  */
6584 void
6585 vrp_dom_walker::after_dom_children (basic_block bb)
6586 {
6587   if (!m_dummy_cond)
6588     m_dummy_cond = gimple_build_cond (NE_EXPR,
6589 				      integer_zero_node, integer_zero_node,
6590 				      NULL, NULL);
6591 
6592   x_vr_values = vr_values;
6593   thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
6594 			 m_avail_exprs_stack, NULL,
6595 			 simplify_stmt_for_jump_threading);
6596   x_vr_values = NULL;
6597 
6598   m_avail_exprs_stack->pop_to_marker ();
6599   m_const_and_copies->pop_to_marker ();
6600 }
6601 
6602 /* Blocks which have more than one predecessor and more than
6603    one successor present jump threading opportunities, i.e.,
6604    when the block is reached from a specific predecessor, we
6605    may be able to determine which of the outgoing edges will
6606    be traversed.  When this optimization applies, we are able
6607    to avoid conditionals at runtime and we may expose secondary
6608    optimization opportunities.
6609 
6610    This routine is effectively a driver for the generic jump
6611    threading code.  It basically just presents the generic code
6612    with edges that may be suitable for jump threading.
6613 
6614    Unlike DOM, we do not iterate VRP if jump threading was successful.
6615    While iterating may expose new opportunities for VRP, it is expected
6616    those opportunities would be very limited and the compile time cost
6617    to expose those opportunities would be significant.
6618 
6619    As jump threading opportunities are discovered, they are registered
6620    for later realization.  */
6621 
6622 static void
6623 identify_jump_threads (class vr_values *vr_values)
6624 {
6625   /* Ugh.  When substituting values earlier in this pass we can
6626      wipe the dominance information.  So rebuild the dominator
6627      information as we need it within the jump threading code.  */
6628   calculate_dominance_info (CDI_DOMINATORS);
6629 
6630   /* We do not allow VRP information to be used for jump threading
6631      across a back edge in the CFG.  Otherwise it becomes too
6632      difficult to avoid eliminating loop exit tests.  Of course
6633      EDGE_DFS_BACK is not accurate at this time so we have to
6634      recompute it.  */
6635   mark_dfs_back_edges ();
6636 
6637   /* Allocate our unwinder stack to unwind any temporary equivalences
6638      that might be recorded.  */
6639   const_and_copies *equiv_stack = new const_and_copies ();
6640 
6641   hash_table<expr_elt_hasher> *avail_exprs
6642     = new hash_table<expr_elt_hasher> (1024);
6643   avail_exprs_stack *avail_exprs_stack
6644     = new class avail_exprs_stack (avail_exprs);
6645 
6646   vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
6647   walker.vr_values = vr_values;
6648   walker.walk (cfun->cfg->x_entry_block_ptr);
6649 
6650   /* We do not actually update the CFG or SSA graphs at this point as
6651      ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
6652      handle ASSERT_EXPRs gracefully.  */
6653   delete equiv_stack;
6654   delete avail_exprs;
6655   delete avail_exprs_stack;
6656 }
6657 
6658 /* Traverse all the blocks folding conditionals with known ranges.  */
6659 
6660 void
6661 vrp_prop::vrp_finalize (bool warn_array_bounds_p)
6662 {
6663   size_t i;
6664 
6665   /* We have completed propagating through the lattice.  */
6666   vr_values.set_lattice_propagation_complete ();
6667 
6668   if (dump_file)
6669     {
6670       fprintf (dump_file, "\nValue ranges after VRP:\n\n");
6671       vr_values.dump_all_value_ranges (dump_file);
6672       fprintf (dump_file, "\n");
6673     }
6674 
6675   /* Set value range to non pointer SSA_NAMEs.  */
6676   for (i = 0; i < num_ssa_names; i++)
6677     {
6678       tree name = ssa_name (i);
6679       if (!name)
6680 	continue;
6681 
6682       const value_range *vr = get_value_range (name);
6683       if (!name || !vr->constant_p ())
6684 	continue;
6685 
6686       if (POINTER_TYPE_P (TREE_TYPE (name))
6687 	  && range_includes_zero_p (vr) == 0)
6688 	set_ptr_nonnull (name);
6689       else if (!POINTER_TYPE_P (TREE_TYPE (name)))
6690 	set_range_info (name, *vr);
6691     }
6692 
6693   /* If we're checking array refs, we want to merge information on
6694      the executability of each edge between vrp_folder and the
6695      check_array_bounds_dom_walker: each can clear the
6696      EDGE_EXECUTABLE flag on edges, in different ways.
6697 
6698      Hence, if we're going to call check_all_array_refs, set
6699      the flag on every edge now, rather than in
6700      check_array_bounds_dom_walker's ctor; vrp_folder may clear
6701      it from some edges.  */
6702   if (warn_array_bounds && warn_array_bounds_p)
6703     set_all_edges_as_executable (cfun);
6704 
6705   class vrp_folder vrp_folder;
6706   vrp_folder.vr_values = &vr_values;
6707   vrp_folder.substitute_and_fold ();
6708 
6709   if (warn_array_bounds && warn_array_bounds_p)
6710     check_all_array_refs ();
6711 }
6712 
6713 /* Main entry point to VRP (Value Range Propagation).  This pass is
6714    loosely based on J. R. C. Patterson, ``Accurate Static Branch
6715    Prediction by Value Range Propagation,'' in SIGPLAN Conference on
6716    Programming Language Design and Implementation, pp. 67-78, 1995.
6717    Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
6718 
6719    This is essentially an SSA-CCP pass modified to deal with ranges
6720    instead of constants.
6721 
6722    While propagating ranges, we may find that two or more SSA name
6723    have equivalent, though distinct ranges.  For instance,
6724 
6725      1	x_9 = p_3->a;
6726      2	p_4 = ASSERT_EXPR <p_3, p_3 != 0>
6727      3	if (p_4 == q_2)
6728      4	  p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
6729      5	endif
6730      6	if (q_2)
6731 
6732    In the code above, pointer p_5 has range [q_2, q_2], but from the
6733    code we can also determine that p_5 cannot be NULL and, if q_2 had
6734    a non-varying range, p_5's range should also be compatible with it.
6735 
6736    These equivalences are created by two expressions: ASSERT_EXPR and
6737    copy operations.  Since p_5 is an assertion on p_4, and p_4 was the
6738    result of another assertion, then we can use the fact that p_5 and
6739    p_4 are equivalent when evaluating p_5's range.
6740 
6741    Together with value ranges, we also propagate these equivalences
6742    between names so that we can take advantage of information from
6743    multiple ranges when doing final replacement.  Note that this
6744    equivalency relation is transitive but not symmetric.
6745 
6746    In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
6747    cannot assert that q_2 is equivalent to p_5 because q_2 may be used
6748    in contexts where that assertion does not hold (e.g., in line 6).
6749 
6750    TODO, the main difference between this pass and Patterson's is that
6751    we do not propagate edge probabilities.  We only compute whether
6752    edges can be taken or not.  That is, instead of having a spectrum
6753    of jump probabilities between 0 and 1, we only deal with 0, 1 and
6754    DON'T KNOW.  In the future, it may be worthwhile to propagate
6755    probabilities to aid branch prediction.  */
6756 
6757 static unsigned int
6758 execute_vrp (bool warn_array_bounds_p)
6759 {
6760 
6761   loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
6762   rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
6763   scev_initialize ();
6764 
6765   /* ???  This ends up using stale EDGE_DFS_BACK for liveness computation.
6766      Inserting assertions may split edges which will invalidate
6767      EDGE_DFS_BACK.  */
6768   insert_range_assertions ();
6769 
6770   threadedge_initialize_values ();
6771 
6772   /* For visiting PHI nodes we need EDGE_DFS_BACK computed.  */
6773   mark_dfs_back_edges ();
6774 
6775   class vrp_prop vrp_prop;
6776   vrp_prop.vrp_initialize ();
6777   vrp_prop.ssa_propagate ();
6778   vrp_prop.vrp_finalize (warn_array_bounds_p);
6779 
6780   /* We must identify jump threading opportunities before we release
6781      the datastructures built by VRP.  */
6782   identify_jump_threads (&vrp_prop.vr_values);
6783 
6784   /* A comparison of an SSA_NAME against a constant where the SSA_NAME
6785      was set by a type conversion can often be rewritten to use the
6786      RHS of the type conversion.
6787 
6788      However, doing so inhibits jump threading through the comparison.
6789      So that transformation is not performed until after jump threading
6790      is complete.  */
6791   basic_block bb;
6792   FOR_EACH_BB_FN (bb, cfun)
6793     {
6794       gimple *last = last_stmt (bb);
6795       if (last && gimple_code (last) == GIMPLE_COND)
6796 	vrp_prop.vr_values.simplify_cond_using_ranges_2 (as_a <gcond *> (last));
6797     }
6798 
6799   free_numbers_of_iterations_estimates (cfun);
6800 
6801   /* ASSERT_EXPRs must be removed before finalizing jump threads
6802      as finalizing jump threads calls the CFG cleanup code which
6803      does not properly handle ASSERT_EXPRs.  */
6804   remove_range_assertions ();
6805 
6806   /* If we exposed any new variables, go ahead and put them into
6807      SSA form now, before we handle jump threading.  This simplifies
6808      interactions between rewriting of _DECL nodes into SSA form
6809      and rewriting SSA_NAME nodes into SSA form after block
6810      duplication and CFG manipulation.  */
6811   update_ssa (TODO_update_ssa);
6812 
6813   /* We identified all the jump threading opportunities earlier, but could
6814      not transform the CFG at that time.  This routine transforms the
6815      CFG and arranges for the dominator tree to be rebuilt if necessary.
6816 
6817      Note the SSA graph update will occur during the normal TODO
6818      processing by the pass manager.  */
6819   thread_through_all_blocks (false);
6820 
6821   vrp_prop.vr_values.cleanup_edges_and_switches ();
6822   threadedge_finalize_values ();
6823 
6824   scev_finalize ();
6825   loop_optimizer_finalize ();
6826   return 0;
6827 }
6828 
6829 namespace {
6830 
6831 const pass_data pass_data_vrp =
6832 {
6833   GIMPLE_PASS, /* type */
6834   "vrp", /* name */
6835   OPTGROUP_NONE, /* optinfo_flags */
6836   TV_TREE_VRP, /* tv_id */
6837   PROP_ssa, /* properties_required */
6838   0, /* properties_provided */
6839   0, /* properties_destroyed */
6840   0, /* todo_flags_start */
6841   ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
6842 };
6843 
6844 class pass_vrp : public gimple_opt_pass
6845 {
6846 public:
6847   pass_vrp (gcc::context *ctxt)
6848     : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
6849   {}
6850 
6851   /* opt_pass methods: */
6852   opt_pass * clone () { return new pass_vrp (m_ctxt); }
6853   void set_pass_param (unsigned int n, bool param)
6854     {
6855       gcc_assert (n == 0);
6856       warn_array_bounds_p = param;
6857     }
6858   virtual bool gate (function *) { return flag_tree_vrp != 0; }
6859   virtual unsigned int execute (function *)
6860     { return execute_vrp (warn_array_bounds_p); }
6861 
6862  private:
6863   bool warn_array_bounds_p;
6864 }; // class pass_vrp
6865 
6866 } // anon namespace
6867 
6868 gimple_opt_pass *
6869 make_pass_vrp (gcc::context *ctxt)
6870 {
6871   return new pass_vrp (ctxt);
6872 }
6873 
6874 
6875 /* Worker for determine_value_range.  */
6876 
6877 static void
6878 determine_value_range_1 (value_range_base *vr, tree expr)
6879 {
6880   if (BINARY_CLASS_P (expr))
6881     {
6882       value_range_base vr0, vr1;
6883       determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
6884       determine_value_range_1 (&vr1, TREE_OPERAND (expr, 1));
6885       extract_range_from_binary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
6886 				      &vr0, &vr1);
6887     }
6888   else if (UNARY_CLASS_P (expr))
6889     {
6890       value_range_base vr0;
6891       determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
6892       extract_range_from_unary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
6893 				     &vr0, TREE_TYPE (TREE_OPERAND (expr, 0)));
6894     }
6895   else if (TREE_CODE (expr) == INTEGER_CST)
6896     vr->set (expr);
6897   else
6898     {
6899       value_range_kind kind;
6900       wide_int min, max;
6901       /* For SSA names try to extract range info computed by VRP.  Otherwise
6902 	 fall back to varying.  */
6903       if (TREE_CODE (expr) == SSA_NAME
6904 	  && INTEGRAL_TYPE_P (TREE_TYPE (expr))
6905 	  && (kind = get_range_info (expr, &min, &max)) != VR_VARYING)
6906 	vr->set (kind, wide_int_to_tree (TREE_TYPE (expr), min),
6907 		 wide_int_to_tree (TREE_TYPE (expr), max));
6908       else
6909 	vr->set_varying ();
6910     }
6911 }
6912 
6913 /* Compute a value-range for EXPR and set it in *MIN and *MAX.  Return
6914    the determined range type.  */
6915 
6916 value_range_kind
6917 determine_value_range (tree expr, wide_int *min, wide_int *max)
6918 {
6919   value_range_base vr;
6920   determine_value_range_1 (&vr, expr);
6921   if (vr.constant_p ())
6922     {
6923       *min = wi::to_wide (vr.min ());
6924       *max = wi::to_wide (vr.max ());
6925       return vr.kind ();
6926     }
6927 
6928   return VR_VARYING;
6929 }
6930