1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2020 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
44 #include "tree-cfg.h"
45 #include "tree-dfa.h"
46 #include "tree-ssa-loop-manip.h"
47 #include "tree-ssa-loop-niter.h"
48 #include "tree-ssa-loop.h"
49 #include "tree-into-ssa.h"
50 #include "tree-ssa.h"
51 #include "intl.h"
52 #include "cfgloop.h"
53 #include "tree-scalar-evolution.h"
54 #include "tree-ssa-propagate.h"
55 #include "tree-chrec.h"
56 #include "tree-ssa-threadupdate.h"
57 #include "tree-ssa-scopedtables.h"
58 #include "tree-ssa-threadedge.h"
59 #include "omp-general.h"
60 #include "target.h"
61 #include "case-cfn-macros.h"
62 #include "alloc-pool.h"
63 #include "domwalk.h"
64 #include "tree-cfgcleanup.h"
65 #include "stringpool.h"
66 #include "attribs.h"
67 #include "vr-values.h"
68 #include "builtins.h"
69 #include "range-op.h"
70
71 /* Set of SSA names found live during the RPO traversal of the function
72 for still active basic-blocks. */
73 static sbitmap *live;
74
75 void
set_equiv(bitmap equiv)76 value_range_equiv::set_equiv (bitmap equiv)
77 {
78 if (undefined_p () || varying_p ())
79 equiv = NULL;
80 /* Since updating the equivalence set involves deep copying the
81 bitmaps, only do it if absolutely necessary.
82
83 All equivalence bitmaps are allocated from the same obstack. So
84 we can use the obstack associated with EQUIV to allocate vr->equiv. */
85 if (m_equiv == NULL
86 && equiv != NULL)
87 m_equiv = BITMAP_ALLOC (equiv->obstack);
88
89 if (equiv != m_equiv)
90 {
91 if (equiv && !bitmap_empty_p (equiv))
92 bitmap_copy (m_equiv, equiv);
93 else
94 bitmap_clear (m_equiv);
95 }
96 }
97
98 /* Initialize value_range. */
99
100 void
set(tree min,tree max,bitmap equiv,value_range_kind kind)101 value_range_equiv::set (tree min, tree max, bitmap equiv,
102 value_range_kind kind)
103 {
104 value_range::set (min, max, kind);
105 set_equiv (equiv);
106 if (flag_checking)
107 check ();
108 }
109
value_range_equiv(tree min,tree max,bitmap equiv,value_range_kind kind)110 value_range_equiv::value_range_equiv (tree min, tree max, bitmap equiv,
111 value_range_kind kind)
112 {
113 m_equiv = NULL;
114 set (min, max, equiv, kind);
115 }
116
value_range_equiv(const value_range & other)117 value_range_equiv::value_range_equiv (const value_range &other)
118 {
119 m_equiv = NULL;
120 set (other.min(), other.max (), NULL, other.kind ());
121 }
122
123 /* Like set, but keep the equivalences in place. */
124
125 void
update(tree min,tree max,value_range_kind kind)126 value_range_equiv::update (tree min, tree max, value_range_kind kind)
127 {
128 set (min, max,
129 (kind != VR_UNDEFINED && kind != VR_VARYING) ? m_equiv : NULL, kind);
130 }
131
132 /* Copy value_range in FROM into THIS while avoiding bitmap sharing.
133
134 Note: The code that avoids the bitmap sharing looks at the existing
135 this->m_equiv, so this function cannot be used to initalize an
136 object. Use the constructors for initialization. */
137
138 void
deep_copy(const value_range_equiv * from)139 value_range_equiv::deep_copy (const value_range_equiv *from)
140 {
141 set (from->min (), from->max (), from->m_equiv, from->m_kind);
142 }
143
144 void
move(value_range_equiv * from)145 value_range_equiv::move (value_range_equiv *from)
146 {
147 set (from->min (), from->max (), NULL, from->m_kind);
148 m_equiv = from->m_equiv;
149 from->m_equiv = NULL;
150 }
151
152 void
check()153 value_range_equiv::check ()
154 {
155 value_range::check ();
156 switch (m_kind)
157 {
158 case VR_UNDEFINED:
159 case VR_VARYING:
160 gcc_assert (!m_equiv || bitmap_empty_p (m_equiv));
161 default:;
162 }
163 }
164
165 /* Return true if the bitmaps B1 and B2 are equal. */
166
167 static bool
vrp_bitmap_equal_p(const_bitmap b1,const_bitmap b2)168 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
169 {
170 return (b1 == b2
171 || ((!b1 || bitmap_empty_p (b1))
172 && (!b2 || bitmap_empty_p (b2)))
173 || (b1 && b2
174 && bitmap_equal_p (b1, b2)));
175 }
176
177 /* Returns TRUE if THIS == OTHER. Ignores the equivalence bitmap if
178 IGNORE_EQUIVS is TRUE. */
179
180 bool
equal_p(const value_range_equiv & other,bool ignore_equivs)181 value_range_equiv::equal_p (const value_range_equiv &other,
182 bool ignore_equivs) const
183 {
184 return (value_range::equal_p (other)
185 && (ignore_equivs
186 || vrp_bitmap_equal_p (m_equiv, other.m_equiv)));
187 }
188
189 void
set_undefined()190 value_range_equiv::set_undefined ()
191 {
192 set (NULL, NULL, NULL, VR_UNDEFINED);
193 }
194
195 void
set_varying(tree type)196 value_range_equiv::set_varying (tree type)
197 {
198 value_range::set_varying (type);
199 equiv_clear ();
200 }
201
202 void
equiv_clear()203 value_range_equiv::equiv_clear ()
204 {
205 if (m_equiv)
206 bitmap_clear (m_equiv);
207 }
208
209 /* Add VAR and VAR's equivalence set (VAR_VR) to the equivalence
210 bitmap. If no equivalence table has been created, OBSTACK is the
211 obstack to use (NULL for the default obstack).
212
213 This is the central point where equivalence processing can be
214 turned on/off. */
215
216 void
equiv_add(const_tree var,const value_range_equiv * var_vr,bitmap_obstack * obstack)217 value_range_equiv::equiv_add (const_tree var,
218 const value_range_equiv *var_vr,
219 bitmap_obstack *obstack)
220 {
221 if (!m_equiv)
222 m_equiv = BITMAP_ALLOC (obstack);
223 unsigned ver = SSA_NAME_VERSION (var);
224 bitmap_set_bit (m_equiv, ver);
225 if (var_vr && var_vr->m_equiv)
226 bitmap_ior_into (m_equiv, var_vr->m_equiv);
227 }
228
229 void
dump(FILE * file)230 value_range_equiv::dump (FILE *file) const
231 {
232 value_range::dump (file);
233 if ((m_kind == VR_RANGE || m_kind == VR_ANTI_RANGE)
234 && m_equiv)
235 {
236 bitmap_iterator bi;
237 unsigned i, c = 0;
238
239 fprintf (file, " EQUIVALENCES: { ");
240
241 EXECUTE_IF_SET_IN_BITMAP (m_equiv, 0, i, bi)
242 {
243 print_generic_expr (file, ssa_name (i));
244 fprintf (file, " ");
245 c++;
246 }
247
248 fprintf (file, "} (%u elements)", c);
249 }
250 }
251
252 void
dump()253 value_range_equiv::dump () const
254 {
255 dump (stderr);
256 }
257
258 void
dump_value_range(FILE * file,const value_range_equiv * vr)259 dump_value_range (FILE *file, const value_range_equiv *vr)
260 {
261 if (!vr)
262 fprintf (file, "[]");
263 else
264 vr->dump (file);
265 }
266
267 DEBUG_FUNCTION void
debug(const value_range_equiv * vr)268 debug (const value_range_equiv *vr)
269 {
270 dump_value_range (stderr, vr);
271 }
272
273 DEBUG_FUNCTION void
debug(const value_range_equiv & vr)274 debug (const value_range_equiv &vr)
275 {
276 dump_value_range (stderr, &vr);
277 }
278
279 /* Return true if the SSA name NAME is live on the edge E. */
280
281 static bool
live_on_edge(edge e,tree name)282 live_on_edge (edge e, tree name)
283 {
284 return (live[e->dest->index]
285 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
286 }
287
288 /* Location information for ASSERT_EXPRs. Each instance of this
289 structure describes an ASSERT_EXPR for an SSA name. Since a single
290 SSA name may have more than one assertion associated with it, these
291 locations are kept in a linked list attached to the corresponding
292 SSA name. */
293 struct assert_locus
294 {
295 /* Basic block where the assertion would be inserted. */
296 basic_block bb;
297
298 /* Some assertions need to be inserted on an edge (e.g., assertions
299 generated by COND_EXPRs). In those cases, BB will be NULL. */
300 edge e;
301
302 /* Pointer to the statement that generated this assertion. */
303 gimple_stmt_iterator si;
304
305 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
306 enum tree_code comp_code;
307
308 /* Value being compared against. */
309 tree val;
310
311 /* Expression to compare. */
312 tree expr;
313
314 /* Next node in the linked list. */
315 assert_locus *next;
316 };
317
318 /* If bit I is present, it means that SSA name N_i has a list of
319 assertions that should be inserted in the IL. */
320 static bitmap need_assert_for;
321
322 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
323 holds a list of ASSERT_LOCUS_T nodes that describe where
324 ASSERT_EXPRs for SSA name N_I should be inserted. */
325 static assert_locus **asserts_for;
326
327 /* VR_TYPE describes a range with mininum value *MIN and maximum
328 value *MAX. Restrict the range to the set of values that have
329 no bits set outside NONZERO_BITS. Update *MIN and *MAX and
330 return the new range type.
331
332 SGN gives the sign of the values described by the range. */
333
334 enum value_range_kind
intersect_range_with_nonzero_bits(enum value_range_kind vr_type,wide_int * min,wide_int * max,const wide_int & nonzero_bits,signop sgn)335 intersect_range_with_nonzero_bits (enum value_range_kind vr_type,
336 wide_int *min, wide_int *max,
337 const wide_int &nonzero_bits,
338 signop sgn)
339 {
340 if (vr_type == VR_ANTI_RANGE)
341 {
342 /* The VR_ANTI_RANGE is equivalent to the union of the ranges
343 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS
344 to create an inclusive upper bound for A and an inclusive lower
345 bound for B. */
346 wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
347 wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
348
349 /* If the calculation of A_MAX wrapped, A is effectively empty
350 and A_MAX is the highest value that satisfies NONZERO_BITS.
351 Likewise if the calculation of B_MIN wrapped, B is effectively
352 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */
353 bool a_empty = wi::ge_p (a_max, *min, sgn);
354 bool b_empty = wi::le_p (b_min, *max, sgn);
355
356 /* If both A and B are empty, there are no valid values. */
357 if (a_empty && b_empty)
358 return VR_UNDEFINED;
359
360 /* If exactly one of A or B is empty, return a VR_RANGE for the
361 other one. */
362 if (a_empty || b_empty)
363 {
364 *min = b_min;
365 *max = a_max;
366 gcc_checking_assert (wi::le_p (*min, *max, sgn));
367 return VR_RANGE;
368 }
369
370 /* Update the VR_ANTI_RANGE bounds. */
371 *min = a_max + 1;
372 *max = b_min - 1;
373 gcc_checking_assert (wi::le_p (*min, *max, sgn));
374
375 /* Now check whether the excluded range includes any values that
376 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */
377 if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
378 {
379 unsigned int precision = min->get_precision ();
380 *min = wi::min_value (precision, sgn);
381 *max = wi::max_value (precision, sgn);
382 vr_type = VR_RANGE;
383 }
384 }
385 if (vr_type == VR_RANGE)
386 {
387 *max = wi::round_down_for_mask (*max, nonzero_bits);
388
389 /* Check that the range contains at least one valid value. */
390 if (wi::gt_p (*min, *max, sgn))
391 return VR_UNDEFINED;
392
393 *min = wi::round_up_for_mask (*min, nonzero_bits);
394 gcc_checking_assert (wi::le_p (*min, *max, sgn));
395 }
396 return vr_type;
397 }
398
399 void
set(tree val)400 value_range_equiv::set (tree val)
401 {
402 gcc_assert (TREE_CODE (val) == SSA_NAME || is_gimple_min_invariant (val));
403 if (TREE_OVERFLOW_P (val))
404 val = drop_tree_overflow (val);
405 set (val, val);
406 }
407
408 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
409 a singleton. */
410
411 bool
range_int_cst_p(const value_range * vr)412 range_int_cst_p (const value_range *vr)
413 {
414 return (vr->kind () == VR_RANGE && range_has_numeric_bounds_p (vr));
415 }
416
417 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
418 otherwise. We only handle additive operations and set NEG to true if the
419 symbol is negated and INV to the invariant part, if any. */
420
421 tree
get_single_symbol(tree t,bool * neg,tree * inv)422 get_single_symbol (tree t, bool *neg, tree *inv)
423 {
424 bool neg_;
425 tree inv_;
426
427 *inv = NULL_TREE;
428 *neg = false;
429
430 if (TREE_CODE (t) == PLUS_EXPR
431 || TREE_CODE (t) == POINTER_PLUS_EXPR
432 || TREE_CODE (t) == MINUS_EXPR)
433 {
434 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
435 {
436 neg_ = (TREE_CODE (t) == MINUS_EXPR);
437 inv_ = TREE_OPERAND (t, 0);
438 t = TREE_OPERAND (t, 1);
439 }
440 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
441 {
442 neg_ = false;
443 inv_ = TREE_OPERAND (t, 1);
444 t = TREE_OPERAND (t, 0);
445 }
446 else
447 return NULL_TREE;
448 }
449 else
450 {
451 neg_ = false;
452 inv_ = NULL_TREE;
453 }
454
455 if (TREE_CODE (t) == NEGATE_EXPR)
456 {
457 t = TREE_OPERAND (t, 0);
458 neg_ = !neg_;
459 }
460
461 if (TREE_CODE (t) != SSA_NAME)
462 return NULL_TREE;
463
464 if (inv_ && TREE_OVERFLOW_P (inv_))
465 inv_ = drop_tree_overflow (inv_);
466
467 *neg = neg_;
468 *inv = inv_;
469 return t;
470 }
471
472 /* The reverse operation: build a symbolic expression with TYPE
473 from symbol SYM, negated according to NEG, and invariant INV. */
474
475 static tree
build_symbolic_expr(tree type,tree sym,bool neg,tree inv)476 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
477 {
478 const bool pointer_p = POINTER_TYPE_P (type);
479 tree t = sym;
480
481 if (neg)
482 t = build1 (NEGATE_EXPR, type, t);
483
484 if (integer_zerop (inv))
485 return t;
486
487 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
488 }
489
490 /* Return
491 1 if VAL < VAL2
492 0 if !(VAL < VAL2)
493 -2 if those are incomparable. */
494 int
operand_less_p(tree val,tree val2)495 operand_less_p (tree val, tree val2)
496 {
497 /* LT is folded faster than GE and others. Inline the common case. */
498 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
499 return tree_int_cst_lt (val, val2);
500 else if (TREE_CODE (val) == SSA_NAME && TREE_CODE (val2) == SSA_NAME)
501 return val == val2 ? 0 : -2;
502 else
503 {
504 int cmp = compare_values (val, val2);
505 if (cmp == -1)
506 return 1;
507 else if (cmp == 0 || cmp == 1)
508 return 0;
509 else
510 return -2;
511 }
512
513 return 0;
514 }
515
516 /* Compare two values VAL1 and VAL2. Return
517
518 -2 if VAL1 and VAL2 cannot be compared at compile-time,
519 -1 if VAL1 < VAL2,
520 0 if VAL1 == VAL2,
521 +1 if VAL1 > VAL2, and
522 +2 if VAL1 != VAL2
523
524 This is similar to tree_int_cst_compare but supports pointer values
525 and values that cannot be compared at compile time.
526
527 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
528 true if the return value is only valid if we assume that signed
529 overflow is undefined. */
530
531 int
compare_values_warnv(tree val1,tree val2,bool * strict_overflow_p)532 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
533 {
534 if (val1 == val2)
535 return 0;
536
537 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
538 both integers. */
539 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
540 == POINTER_TYPE_P (TREE_TYPE (val2)));
541
542 /* Convert the two values into the same type. This is needed because
543 sizetype causes sign extension even for unsigned types. */
544 if (!useless_type_conversion_p (TREE_TYPE (val1), TREE_TYPE (val2)))
545 val2 = fold_convert (TREE_TYPE (val1), val2);
546
547 const bool overflow_undefined
548 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
549 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
550 tree inv1, inv2;
551 bool neg1, neg2;
552 tree sym1 = get_single_symbol (val1, &neg1, &inv1);
553 tree sym2 = get_single_symbol (val2, &neg2, &inv2);
554
555 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
556 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
557 if (sym1 && sym2)
558 {
559 /* Both values must use the same name with the same sign. */
560 if (sym1 != sym2 || neg1 != neg2)
561 return -2;
562
563 /* [-]NAME + CST == [-]NAME + CST. */
564 if (inv1 == inv2)
565 return 0;
566
567 /* If overflow is defined we cannot simplify more. */
568 if (!overflow_undefined)
569 return -2;
570
571 if (strict_overflow_p != NULL
572 /* Symbolic range building sets TREE_NO_WARNING to declare
573 that overflow doesn't happen. */
574 && (!inv1 || !TREE_NO_WARNING (val1))
575 && (!inv2 || !TREE_NO_WARNING (val2)))
576 *strict_overflow_p = true;
577
578 if (!inv1)
579 inv1 = build_int_cst (TREE_TYPE (val1), 0);
580 if (!inv2)
581 inv2 = build_int_cst (TREE_TYPE (val2), 0);
582
583 return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
584 TYPE_SIGN (TREE_TYPE (val1)));
585 }
586
587 const bool cst1 = is_gimple_min_invariant (val1);
588 const bool cst2 = is_gimple_min_invariant (val2);
589
590 /* If one is of the form '[-]NAME + CST' and the other is constant, then
591 it might be possible to say something depending on the constants. */
592 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
593 {
594 if (!overflow_undefined)
595 return -2;
596
597 if (strict_overflow_p != NULL
598 /* Symbolic range building sets TREE_NO_WARNING to declare
599 that overflow doesn't happen. */
600 && (!sym1 || !TREE_NO_WARNING (val1))
601 && (!sym2 || !TREE_NO_WARNING (val2)))
602 *strict_overflow_p = true;
603
604 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
605 tree cst = cst1 ? val1 : val2;
606 tree inv = cst1 ? inv2 : inv1;
607
608 /* Compute the difference between the constants. If it overflows or
609 underflows, this means that we can trivially compare the NAME with
610 it and, consequently, the two values with each other. */
611 wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
612 if (wi::cmp (0, wi::to_wide (inv), sgn)
613 != wi::cmp (diff, wi::to_wide (cst), sgn))
614 {
615 const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
616 return cst1 ? res : -res;
617 }
618
619 return -2;
620 }
621
622 /* We cannot say anything more for non-constants. */
623 if (!cst1 || !cst2)
624 return -2;
625
626 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
627 {
628 /* We cannot compare overflowed values. */
629 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
630 return -2;
631
632 if (TREE_CODE (val1) == INTEGER_CST
633 && TREE_CODE (val2) == INTEGER_CST)
634 return tree_int_cst_compare (val1, val2);
635
636 if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
637 {
638 if (known_eq (wi::to_poly_widest (val1),
639 wi::to_poly_widest (val2)))
640 return 0;
641 if (known_lt (wi::to_poly_widest (val1),
642 wi::to_poly_widest (val2)))
643 return -1;
644 if (known_gt (wi::to_poly_widest (val1),
645 wi::to_poly_widest (val2)))
646 return 1;
647 }
648
649 return -2;
650 }
651 else
652 {
653 if (TREE_CODE (val1) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
654 {
655 /* We cannot compare overflowed values. */
656 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
657 return -2;
658
659 return tree_int_cst_compare (val1, val2);
660 }
661
662 /* First see if VAL1 and VAL2 are not the same. */
663 if (operand_equal_p (val1, val2, 0))
664 return 0;
665
666 fold_defer_overflow_warnings ();
667
668 /* If VAL1 is a lower address than VAL2, return -1. */
669 tree t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val1, val2);
670 if (t && integer_onep (t))
671 {
672 fold_undefer_and_ignore_overflow_warnings ();
673 return -1;
674 }
675
676 /* If VAL1 is a higher address than VAL2, return +1. */
677 t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val2, val1);
678 if (t && integer_onep (t))
679 {
680 fold_undefer_and_ignore_overflow_warnings ();
681 return 1;
682 }
683
684 /* If VAL1 is different than VAL2, return +2. */
685 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
686 fold_undefer_and_ignore_overflow_warnings ();
687 if (t && integer_onep (t))
688 return 2;
689
690 return -2;
691 }
692 }
693
694 /* Compare values like compare_values_warnv. */
695
696 int
compare_values(tree val1,tree val2)697 compare_values (tree val1, tree val2)
698 {
699 bool sop;
700 return compare_values_warnv (val1, val2, &sop);
701 }
702
703 /* If BOUND will include a symbolic bound, adjust it accordingly,
704 otherwise leave it as is.
705
706 CODE is the original operation that combined the bounds (PLUS_EXPR
707 or MINUS_EXPR).
708
709 TYPE is the type of the original operation.
710
711 SYM_OPn is the symbolic for OPn if it has a symbolic.
712
713 NEG_OPn is TRUE if the OPn was negated. */
714
715 static void
adjust_symbolic_bound(tree & bound,enum tree_code code,tree type,tree sym_op0,tree sym_op1,bool neg_op0,bool neg_op1)716 adjust_symbolic_bound (tree &bound, enum tree_code code, tree type,
717 tree sym_op0, tree sym_op1,
718 bool neg_op0, bool neg_op1)
719 {
720 bool minus_p = (code == MINUS_EXPR);
721 /* If the result bound is constant, we're done; otherwise, build the
722 symbolic lower bound. */
723 if (sym_op0 == sym_op1)
724 ;
725 else if (sym_op0)
726 bound = build_symbolic_expr (type, sym_op0,
727 neg_op0, bound);
728 else if (sym_op1)
729 {
730 /* We may not negate if that might introduce
731 undefined overflow. */
732 if (!minus_p
733 || neg_op1
734 || TYPE_OVERFLOW_WRAPS (type))
735 bound = build_symbolic_expr (type, sym_op1,
736 neg_op1 ^ minus_p, bound);
737 else
738 bound = NULL_TREE;
739 }
740 }
741
742 /* Combine OP1 and OP1, which are two parts of a bound, into one wide
743 int bound according to CODE. CODE is the operation combining the
744 bound (either a PLUS_EXPR or a MINUS_EXPR).
745
746 TYPE is the type of the combine operation.
747
748 WI is the wide int to store the result.
749
750 OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
751 if over/underflow occurred. */
752
753 static void
combine_bound(enum tree_code code,wide_int & wi,wi::overflow_type & ovf,tree type,tree op0,tree op1)754 combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf,
755 tree type, tree op0, tree op1)
756 {
757 bool minus_p = (code == MINUS_EXPR);
758 const signop sgn = TYPE_SIGN (type);
759 const unsigned int prec = TYPE_PRECISION (type);
760
761 /* Combine the bounds, if any. */
762 if (op0 && op1)
763 {
764 if (minus_p)
765 wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
766 else
767 wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
768 }
769 else if (op0)
770 wi = wi::to_wide (op0);
771 else if (op1)
772 {
773 if (minus_p)
774 wi = wi::neg (wi::to_wide (op1), &ovf);
775 else
776 wi = wi::to_wide (op1);
777 }
778 else
779 wi = wi::shwi (0, prec);
780 }
781
782 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and
783 put the result in VR.
784
785 TYPE is the type of the range.
786
787 MIN_OVF and MAX_OVF indicate what type of overflow, if any,
788 occurred while originally calculating WMIN or WMAX. -1 indicates
789 underflow. +1 indicates overflow. 0 indicates neither. */
790
791 static void
set_value_range_with_overflow(value_range_kind & kind,tree & min,tree & max,tree type,const wide_int & wmin,const wide_int & wmax,wi::overflow_type min_ovf,wi::overflow_type max_ovf)792 set_value_range_with_overflow (value_range_kind &kind, tree &min, tree &max,
793 tree type,
794 const wide_int &wmin, const wide_int &wmax,
795 wi::overflow_type min_ovf,
796 wi::overflow_type max_ovf)
797 {
798 const signop sgn = TYPE_SIGN (type);
799 const unsigned int prec = TYPE_PRECISION (type);
800
801 /* For one bit precision if max < min, then the swapped
802 range covers all values. */
803 if (prec == 1 && wi::lt_p (wmax, wmin, sgn))
804 {
805 kind = VR_VARYING;
806 return;
807 }
808
809 if (TYPE_OVERFLOW_WRAPS (type))
810 {
811 /* If overflow wraps, truncate the values and adjust the
812 range kind and bounds appropriately. */
813 wide_int tmin = wide_int::from (wmin, prec, sgn);
814 wide_int tmax = wide_int::from (wmax, prec, sgn);
815 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
816 {
817 /* If the limits are swapped, we wrapped around and cover
818 the entire range. */
819 if (wi::gt_p (tmin, tmax, sgn))
820 kind = VR_VARYING;
821 else
822 {
823 kind = VR_RANGE;
824 /* No overflow or both overflow or underflow. The
825 range kind stays VR_RANGE. */
826 min = wide_int_to_tree (type, tmin);
827 max = wide_int_to_tree (type, tmax);
828 }
829 return;
830 }
831 else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
832 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
833 {
834 /* Min underflow or max overflow. The range kind
835 changes to VR_ANTI_RANGE. */
836 bool covers = false;
837 wide_int tem = tmin;
838 tmin = tmax + 1;
839 if (wi::cmp (tmin, tmax, sgn) < 0)
840 covers = true;
841 tmax = tem - 1;
842 if (wi::cmp (tmax, tem, sgn) > 0)
843 covers = true;
844 /* If the anti-range would cover nothing, drop to varying.
845 Likewise if the anti-range bounds are outside of the
846 types values. */
847 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
848 {
849 kind = VR_VARYING;
850 return;
851 }
852 kind = VR_ANTI_RANGE;
853 min = wide_int_to_tree (type, tmin);
854 max = wide_int_to_tree (type, tmax);
855 return;
856 }
857 else
858 {
859 /* Other underflow and/or overflow, drop to VR_VARYING. */
860 kind = VR_VARYING;
861 return;
862 }
863 }
864 else
865 {
866 /* If overflow does not wrap, saturate to the types min/max
867 value. */
868 wide_int type_min = wi::min_value (prec, sgn);
869 wide_int type_max = wi::max_value (prec, sgn);
870 kind = VR_RANGE;
871 if (min_ovf == wi::OVF_UNDERFLOW)
872 min = wide_int_to_tree (type, type_min);
873 else if (min_ovf == wi::OVF_OVERFLOW)
874 min = wide_int_to_tree (type, type_max);
875 else
876 min = wide_int_to_tree (type, wmin);
877
878 if (max_ovf == wi::OVF_UNDERFLOW)
879 max = wide_int_to_tree (type, type_min);
880 else if (max_ovf == wi::OVF_OVERFLOW)
881 max = wide_int_to_tree (type, type_max);
882 else
883 max = wide_int_to_tree (type, wmax);
884 }
885 }
886
887 /* Fold two value range's of a POINTER_PLUS_EXPR into VR. */
888
889 static void
extract_range_from_pointer_plus_expr(value_range * vr,enum tree_code code,tree expr_type,const value_range * vr0,const value_range * vr1)890 extract_range_from_pointer_plus_expr (value_range *vr,
891 enum tree_code code,
892 tree expr_type,
893 const value_range *vr0,
894 const value_range *vr1)
895 {
896 gcc_checking_assert (POINTER_TYPE_P (expr_type)
897 && code == POINTER_PLUS_EXPR);
898 /* For pointer types, we are really only interested in asserting
899 whether the expression evaluates to non-NULL.
900 With -fno-delete-null-pointer-checks we need to be more
901 conservative. As some object might reside at address 0,
902 then some offset could be added to it and the same offset
903 subtracted again and the result would be NULL.
904 E.g.
905 static int a[12]; where &a[0] is NULL and
906 ptr = &a[6];
907 ptr -= 6;
908 ptr will be NULL here, even when there is POINTER_PLUS_EXPR
909 where the first range doesn't include zero and the second one
910 doesn't either. As the second operand is sizetype (unsigned),
911 consider all ranges where the MSB could be set as possible
912 subtractions where the result might be NULL. */
913 if ((!range_includes_zero_p (vr0)
914 || !range_includes_zero_p (vr1))
915 && !TYPE_OVERFLOW_WRAPS (expr_type)
916 && (flag_delete_null_pointer_checks
917 || (range_int_cst_p (vr1)
918 && !tree_int_cst_sign_bit (vr1->max ()))))
919 vr->set_nonzero (expr_type);
920 else if (vr0->zero_p () && vr1->zero_p ())
921 vr->set_zero (expr_type);
922 else
923 vr->set_varying (expr_type);
924 }
925
926 /* Extract range information from a PLUS/MINUS_EXPR and store the
927 result in *VR. */
928
929 static void
extract_range_from_plus_minus_expr(value_range * vr,enum tree_code code,tree expr_type,const value_range * vr0_,const value_range * vr1_)930 extract_range_from_plus_minus_expr (value_range *vr,
931 enum tree_code code,
932 tree expr_type,
933 const value_range *vr0_,
934 const value_range *vr1_)
935 {
936 gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
937
938 value_range vr0 = *vr0_, vr1 = *vr1_;
939 value_range vrtem0, vrtem1;
940
941 /* Now canonicalize anti-ranges to ranges when they are not symbolic
942 and express ~[] op X as ([]' op X) U ([]'' op X). */
943 if (vr0.kind () == VR_ANTI_RANGE
944 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
945 {
946 extract_range_from_plus_minus_expr (vr, code, expr_type, &vrtem0, vr1_);
947 if (!vrtem1.undefined_p ())
948 {
949 value_range vrres;
950 extract_range_from_plus_minus_expr (&vrres, code, expr_type,
951 &vrtem1, vr1_);
952 vr->union_ (&vrres);
953 }
954 return;
955 }
956 /* Likewise for X op ~[]. */
957 if (vr1.kind () == VR_ANTI_RANGE
958 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
959 {
960 extract_range_from_plus_minus_expr (vr, code, expr_type, vr0_, &vrtem0);
961 if (!vrtem1.undefined_p ())
962 {
963 value_range vrres;
964 extract_range_from_plus_minus_expr (&vrres, code, expr_type,
965 vr0_, &vrtem1);
966 vr->union_ (&vrres);
967 }
968 return;
969 }
970
971 value_range_kind kind;
972 value_range_kind vr0_kind = vr0.kind (), vr1_kind = vr1.kind ();
973 tree vr0_min = vr0.min (), vr0_max = vr0.max ();
974 tree vr1_min = vr1.min (), vr1_max = vr1.max ();
975 tree min = NULL_TREE, max = NULL_TREE;
976
977 /* This will normalize things such that calculating
978 [0,0] - VR_VARYING is not dropped to varying, but is
979 calculated as [MIN+1, MAX]. */
980 if (vr0.varying_p ())
981 {
982 vr0_kind = VR_RANGE;
983 vr0_min = vrp_val_min (expr_type);
984 vr0_max = vrp_val_max (expr_type);
985 }
986 if (vr1.varying_p ())
987 {
988 vr1_kind = VR_RANGE;
989 vr1_min = vrp_val_min (expr_type);
990 vr1_max = vrp_val_max (expr_type);
991 }
992
993 const bool minus_p = (code == MINUS_EXPR);
994 tree min_op0 = vr0_min;
995 tree min_op1 = minus_p ? vr1_max : vr1_min;
996 tree max_op0 = vr0_max;
997 tree max_op1 = minus_p ? vr1_min : vr1_max;
998 tree sym_min_op0 = NULL_TREE;
999 tree sym_min_op1 = NULL_TREE;
1000 tree sym_max_op0 = NULL_TREE;
1001 tree sym_max_op1 = NULL_TREE;
1002 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
1003
1004 neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false;
1005
1006 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
1007 single-symbolic ranges, try to compute the precise resulting range,
1008 but only if we know that this resulting range will also be constant
1009 or single-symbolic. */
1010 if (vr0_kind == VR_RANGE && vr1_kind == VR_RANGE
1011 && (TREE_CODE (min_op0) == INTEGER_CST
1012 || (sym_min_op0
1013 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
1014 && (TREE_CODE (min_op1) == INTEGER_CST
1015 || (sym_min_op1
1016 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
1017 && (!(sym_min_op0 && sym_min_op1)
1018 || (sym_min_op0 == sym_min_op1
1019 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
1020 && (TREE_CODE (max_op0) == INTEGER_CST
1021 || (sym_max_op0
1022 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
1023 && (TREE_CODE (max_op1) == INTEGER_CST
1024 || (sym_max_op1
1025 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
1026 && (!(sym_max_op0 && sym_max_op1)
1027 || (sym_max_op0 == sym_max_op1
1028 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
1029 {
1030 wide_int wmin, wmax;
1031 wi::overflow_type min_ovf = wi::OVF_NONE;
1032 wi::overflow_type max_ovf = wi::OVF_NONE;
1033
1034 /* Build the bounds. */
1035 combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1);
1036 combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1);
1037
1038 /* If the resulting range will be symbolic, we need to eliminate any
1039 explicit or implicit overflow introduced in the above computation
1040 because compare_values could make an incorrect use of it. That's
1041 why we require one of the ranges to be a singleton. */
1042 if ((sym_min_op0 != sym_min_op1 || sym_max_op0 != sym_max_op1)
1043 && ((bool)min_ovf || (bool)max_ovf
1044 || (min_op0 != max_op0 && min_op1 != max_op1)))
1045 {
1046 vr->set_varying (expr_type);
1047 return;
1048 }
1049
1050 /* Adjust the range for possible overflow. */
1051 set_value_range_with_overflow (kind, min, max, expr_type,
1052 wmin, wmax, min_ovf, max_ovf);
1053 if (kind == VR_VARYING)
1054 {
1055 vr->set_varying (expr_type);
1056 return;
1057 }
1058
1059 /* Build the symbolic bounds if needed. */
1060 adjust_symbolic_bound (min, code, expr_type,
1061 sym_min_op0, sym_min_op1,
1062 neg_min_op0, neg_min_op1);
1063 adjust_symbolic_bound (max, code, expr_type,
1064 sym_max_op0, sym_max_op1,
1065 neg_max_op0, neg_max_op1);
1066 }
1067 else
1068 {
1069 /* For other cases, for example if we have a PLUS_EXPR with two
1070 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
1071 to compute a precise range for such a case.
1072 ??? General even mixed range kind operations can be expressed
1073 by for example transforming ~[3, 5] + [1, 2] to range-only
1074 operations and a union primitive:
1075 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
1076 [-INF+1, 4] U [6, +INF(OVF)]
1077 though usually the union is not exactly representable with
1078 a single range or anti-range as the above is
1079 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
1080 but one could use a scheme similar to equivalences for this. */
1081 vr->set_varying (expr_type);
1082 return;
1083 }
1084
1085 /* If either MIN or MAX overflowed, then set the resulting range to
1086 VARYING. */
1087 if (min == NULL_TREE
1088 || TREE_OVERFLOW_P (min)
1089 || max == NULL_TREE
1090 || TREE_OVERFLOW_P (max))
1091 {
1092 vr->set_varying (expr_type);
1093 return;
1094 }
1095
1096 int cmp = compare_values (min, max);
1097 if (cmp == -2 || cmp == 1)
1098 {
1099 /* If the new range has its limits swapped around (MIN > MAX),
1100 then the operation caused one of them to wrap around, mark
1101 the new range VARYING. */
1102 vr->set_varying (expr_type);
1103 }
1104 else
1105 vr->set (min, max, kind);
1106 }
1107
1108 /* Return the range-ops handler for CODE and EXPR_TYPE. If no
1109 suitable operator is found, return NULL and set VR to VARYING. */
1110
1111 static const range_operator *
get_range_op_handler(value_range * vr,enum tree_code code,tree expr_type)1112 get_range_op_handler (value_range *vr,
1113 enum tree_code code,
1114 tree expr_type)
1115 {
1116 const range_operator *op = range_op_handler (code, expr_type);
1117 if (!op)
1118 vr->set_varying (expr_type);
1119 return op;
1120 }
1121
1122 /* If the types passed are supported, return TRUE, otherwise set VR to
1123 VARYING and return FALSE. */
1124
1125 static bool
1126 supported_types_p (value_range *vr,
1127 tree type0,
1128 tree type1 = NULL)
1129 {
1130 if (!value_range::supports_type_p (type0)
1131 || (type1 && !value_range::supports_type_p (type1)))
1132 {
1133 vr->set_varying (type0);
1134 return false;
1135 }
1136 return true;
1137 }
1138
1139 /* If any of the ranges passed are defined, return TRUE, otherwise set
1140 VR to UNDEFINED and return FALSE. */
1141
1142 static bool
1143 defined_ranges_p (value_range *vr,
1144 const value_range *vr0, const value_range *vr1 = NULL)
1145 {
1146 if (vr0->undefined_p () && (!vr1 || vr1->undefined_p ()))
1147 {
1148 vr->set_undefined ();
1149 return false;
1150 }
1151 return true;
1152 }
1153
1154 static value_range
drop_undefines_to_varying(const value_range * vr,tree expr_type)1155 drop_undefines_to_varying (const value_range *vr, tree expr_type)
1156 {
1157 if (vr->undefined_p ())
1158 return value_range (expr_type);
1159 else
1160 return *vr;
1161 }
1162
1163 /* If any operand is symbolic, perform a binary operation on them and
1164 return TRUE, otherwise return FALSE. */
1165
1166 static bool
range_fold_binary_symbolics_p(value_range * vr,tree_code code,tree expr_type,const value_range * vr0,const value_range * vr1)1167 range_fold_binary_symbolics_p (value_range *vr,
1168 tree_code code,
1169 tree expr_type,
1170 const value_range *vr0, const value_range *vr1)
1171 {
1172 if (vr0->symbolic_p () || vr1->symbolic_p ())
1173 {
1174 if ((code == PLUS_EXPR || code == MINUS_EXPR))
1175 {
1176 extract_range_from_plus_minus_expr (vr, code, expr_type, vr0, vr1);
1177 return true;
1178 }
1179 if (POINTER_TYPE_P (expr_type) && code == POINTER_PLUS_EXPR)
1180 {
1181 extract_range_from_pointer_plus_expr (vr, code, expr_type, vr0, vr1);
1182 return true;
1183 }
1184 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1185 value_range vr0_cst (*vr0), vr1_cst (*vr1);
1186 vr0_cst.normalize_symbolics ();
1187 vr1_cst.normalize_symbolics ();
1188 return op->fold_range (*vr, expr_type, vr0_cst, vr1_cst);
1189 }
1190 return false;
1191 }
1192
1193 /* If operand is symbolic, perform a unary operation on it and return
1194 TRUE, otherwise return FALSE. */
1195
1196 static bool
range_fold_unary_symbolics_p(value_range * vr,tree_code code,tree expr_type,const value_range * vr0)1197 range_fold_unary_symbolics_p (value_range *vr,
1198 tree_code code,
1199 tree expr_type,
1200 const value_range *vr0)
1201 {
1202 if (vr0->symbolic_p ())
1203 {
1204 if (code == NEGATE_EXPR)
1205 {
1206 /* -X is simply 0 - X. */
1207 value_range zero;
1208 zero.set_zero (vr0->type ());
1209 range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &zero, vr0);
1210 return true;
1211 }
1212 if (code == BIT_NOT_EXPR)
1213 {
1214 /* ~X is simply -1 - X. */
1215 value_range minusone;
1216 minusone.set (build_int_cst (vr0->type (), -1));
1217 range_fold_binary_expr (vr, MINUS_EXPR, expr_type, &minusone, vr0);
1218 return true;
1219 }
1220 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1221 value_range vr0_cst (*vr0);
1222 vr0_cst.normalize_symbolics ();
1223 return op->fold_range (*vr, expr_type, vr0_cst, value_range (expr_type));
1224 }
1225 return false;
1226 }
1227
1228 /* Perform a binary operation on a pair of ranges. */
1229
1230 void
range_fold_binary_expr(value_range * vr,enum tree_code code,tree expr_type,const value_range * vr0_,const value_range * vr1_)1231 range_fold_binary_expr (value_range *vr,
1232 enum tree_code code,
1233 tree expr_type,
1234 const value_range *vr0_,
1235 const value_range *vr1_)
1236 {
1237 if (!supported_types_p (vr, expr_type)
1238 || !defined_ranges_p (vr, vr0_, vr1_))
1239 return;
1240 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1241 if (!op)
1242 return;
1243
1244 value_range vr0 = drop_undefines_to_varying (vr0_, expr_type);
1245 value_range vr1 = drop_undefines_to_varying (vr1_, expr_type);
1246 if (range_fold_binary_symbolics_p (vr, code, expr_type, &vr0, &vr1))
1247 return;
1248
1249 vr0.normalize_addresses ();
1250 vr1.normalize_addresses ();
1251 op->fold_range (*vr, expr_type, vr0, vr1);
1252 }
1253
1254 /* Perform a unary operation on a range. */
1255
1256 void
range_fold_unary_expr(value_range * vr,enum tree_code code,tree expr_type,const value_range * vr0,tree vr0_type)1257 range_fold_unary_expr (value_range *vr,
1258 enum tree_code code, tree expr_type,
1259 const value_range *vr0,
1260 tree vr0_type)
1261 {
1262 if (!supported_types_p (vr, expr_type, vr0_type)
1263 || !defined_ranges_p (vr, vr0))
1264 return;
1265 const range_operator *op = get_range_op_handler (vr, code, expr_type);
1266 if (!op)
1267 return;
1268
1269 if (range_fold_unary_symbolics_p (vr, code, expr_type, vr0))
1270 return;
1271
1272 value_range vr0_cst (*vr0);
1273 vr0_cst.normalize_addresses ();
1274 op->fold_range (*vr, expr_type, vr0_cst, value_range (expr_type));
1275 }
1276
1277 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
1278 create a new SSA name N and return the assertion assignment
1279 'N = ASSERT_EXPR <V, V OP W>'. */
1280
1281 static gimple *
build_assert_expr_for(tree cond,tree v)1282 build_assert_expr_for (tree cond, tree v)
1283 {
1284 tree a;
1285 gassign *assertion;
1286
1287 gcc_assert (TREE_CODE (v) == SSA_NAME
1288 && COMPARISON_CLASS_P (cond));
1289
1290 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
1291 assertion = gimple_build_assign (NULL_TREE, a);
1292
1293 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
1294 operand of the ASSERT_EXPR. Create it so the new name and the old one
1295 are registered in the replacement table so that we can fix the SSA web
1296 after adding all the ASSERT_EXPRs. */
1297 tree new_def = create_new_def_for (v, assertion, NULL);
1298 /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
1299 given we have to be able to fully propagate those out to re-create
1300 valid SSA when removing the asserts. */
1301 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
1302 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
1303
1304 return assertion;
1305 }
1306
1307
1308 /* Return false if EXPR is a predicate expression involving floating
1309 point values. */
1310
1311 static inline bool
fp_predicate(gimple * stmt)1312 fp_predicate (gimple *stmt)
1313 {
1314 GIMPLE_CHECK (stmt, GIMPLE_COND);
1315
1316 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
1317 }
1318
1319 /* If the range of values taken by OP can be inferred after STMT executes,
1320 return the comparison code (COMP_CODE_P) and value (VAL_P) that
1321 describes the inferred range. Return true if a range could be
1322 inferred. */
1323
1324 bool
infer_value_range(gimple * stmt,tree op,tree_code * comp_code_p,tree * val_p)1325 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
1326 {
1327 *val_p = NULL_TREE;
1328 *comp_code_p = ERROR_MARK;
1329
1330 /* Do not attempt to infer anything in names that flow through
1331 abnormal edges. */
1332 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
1333 return false;
1334
1335 /* If STMT is the last statement of a basic block with no normal
1336 successors, there is no point inferring anything about any of its
1337 operands. We would not be able to find a proper insertion point
1338 for the assertion, anyway. */
1339 if (stmt_ends_bb_p (stmt))
1340 {
1341 edge_iterator ei;
1342 edge e;
1343
1344 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
1345 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
1346 break;
1347 if (e == NULL)
1348 return false;
1349 }
1350
1351 if (infer_nonnull_range (stmt, op))
1352 {
1353 *val_p = build_int_cst (TREE_TYPE (op), 0);
1354 *comp_code_p = NE_EXPR;
1355 return true;
1356 }
1357
1358 return false;
1359 }
1360
1361
1362 void dump_asserts_for (FILE *, tree);
1363 void debug_asserts_for (tree);
1364 void dump_all_asserts (FILE *);
1365 void debug_all_asserts (void);
1366
1367 /* Dump all the registered assertions for NAME to FILE. */
1368
1369 void
dump_asserts_for(FILE * file,tree name)1370 dump_asserts_for (FILE *file, tree name)
1371 {
1372 assert_locus *loc;
1373
1374 fprintf (file, "Assertions to be inserted for ");
1375 print_generic_expr (file, name);
1376 fprintf (file, "\n");
1377
1378 loc = asserts_for[SSA_NAME_VERSION (name)];
1379 while (loc)
1380 {
1381 fprintf (file, "\t");
1382 print_gimple_stmt (file, gsi_stmt (loc->si), 0);
1383 fprintf (file, "\n\tBB #%d", loc->bb->index);
1384 if (loc->e)
1385 {
1386 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
1387 loc->e->dest->index);
1388 dump_edge_info (file, loc->e, dump_flags, 0);
1389 }
1390 fprintf (file, "\n\tPREDICATE: ");
1391 print_generic_expr (file, loc->expr);
1392 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
1393 print_generic_expr (file, loc->val);
1394 fprintf (file, "\n\n");
1395 loc = loc->next;
1396 }
1397
1398 fprintf (file, "\n");
1399 }
1400
1401
1402 /* Dump all the registered assertions for NAME to stderr. */
1403
1404 DEBUG_FUNCTION void
debug_asserts_for(tree name)1405 debug_asserts_for (tree name)
1406 {
1407 dump_asserts_for (stderr, name);
1408 }
1409
1410
1411 /* Dump all the registered assertions for all the names to FILE. */
1412
1413 void
dump_all_asserts(FILE * file)1414 dump_all_asserts (FILE *file)
1415 {
1416 unsigned i;
1417 bitmap_iterator bi;
1418
1419 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
1420 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
1421 dump_asserts_for (file, ssa_name (i));
1422 fprintf (file, "\n");
1423 }
1424
1425
1426 /* Dump all the registered assertions for all the names to stderr. */
1427
1428 DEBUG_FUNCTION void
debug_all_asserts(void)1429 debug_all_asserts (void)
1430 {
1431 dump_all_asserts (stderr);
1432 }
1433
1434 /* Dump assert_info structure. */
1435
1436 void
dump_assert_info(FILE * file,const assert_info & assert)1437 dump_assert_info (FILE *file, const assert_info &assert)
1438 {
1439 fprintf (file, "Assert for: ");
1440 print_generic_expr (file, assert.name);
1441 fprintf (file, "\n\tPREDICATE: expr=[");
1442 print_generic_expr (file, assert.expr);
1443 fprintf (file, "] %s ", get_tree_code_name (assert.comp_code));
1444 fprintf (file, "val=[");
1445 print_generic_expr (file, assert.val);
1446 fprintf (file, "]\n\n");
1447 }
1448
1449 DEBUG_FUNCTION void
debug(const assert_info & assert)1450 debug (const assert_info &assert)
1451 {
1452 dump_assert_info (stderr, assert);
1453 }
1454
1455 /* Dump a vector of assert_info's. */
1456
1457 void
dump_asserts_info(FILE * file,const vec<assert_info> & asserts)1458 dump_asserts_info (FILE *file, const vec<assert_info> &asserts)
1459 {
1460 for (unsigned i = 0; i < asserts.length (); ++i)
1461 {
1462 dump_assert_info (file, asserts[i]);
1463 fprintf (file, "\n");
1464 }
1465 }
1466
1467 DEBUG_FUNCTION void
debug(const vec<assert_info> & asserts)1468 debug (const vec<assert_info> &asserts)
1469 {
1470 dump_asserts_info (stderr, asserts);
1471 }
1472
1473 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
1474
1475 static void
add_assert_info(vec<assert_info> & asserts,tree name,tree expr,enum tree_code comp_code,tree val)1476 add_assert_info (vec<assert_info> &asserts,
1477 tree name, tree expr, enum tree_code comp_code, tree val)
1478 {
1479 assert_info info;
1480 info.comp_code = comp_code;
1481 info.name = name;
1482 if (TREE_OVERFLOW_P (val))
1483 val = drop_tree_overflow (val);
1484 info.val = val;
1485 info.expr = expr;
1486 asserts.safe_push (info);
1487 if (dump_enabled_p ())
1488 dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
1489 "Adding assert for %T from %T %s %T\n",
1490 name, expr, op_symbol_code (comp_code), val);
1491 }
1492
1493 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
1494 'EXPR COMP_CODE VAL' at a location that dominates block BB or
1495 E->DEST, then register this location as a possible insertion point
1496 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
1497
1498 BB, E and SI provide the exact insertion point for the new
1499 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
1500 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
1501 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
1502 must not be NULL. */
1503
1504 static void
register_new_assert_for(tree name,tree expr,enum tree_code comp_code,tree val,basic_block bb,edge e,gimple_stmt_iterator si)1505 register_new_assert_for (tree name, tree expr,
1506 enum tree_code comp_code,
1507 tree val,
1508 basic_block bb,
1509 edge e,
1510 gimple_stmt_iterator si)
1511 {
1512 assert_locus *n, *loc, *last_loc;
1513 basic_block dest_bb;
1514
1515 gcc_checking_assert (bb == NULL || e == NULL);
1516
1517 if (e == NULL)
1518 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
1519 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
1520
1521 /* Never build an assert comparing against an integer constant with
1522 TREE_OVERFLOW set. This confuses our undefined overflow warning
1523 machinery. */
1524 if (TREE_OVERFLOW_P (val))
1525 val = drop_tree_overflow (val);
1526
1527 /* The new assertion A will be inserted at BB or E. We need to
1528 determine if the new location is dominated by a previously
1529 registered location for A. If we are doing an edge insertion,
1530 assume that A will be inserted at E->DEST. Note that this is not
1531 necessarily true.
1532
1533 If E is a critical edge, it will be split. But even if E is
1534 split, the new block will dominate the same set of blocks that
1535 E->DEST dominates.
1536
1537 The reverse, however, is not true, blocks dominated by E->DEST
1538 will not be dominated by the new block created to split E. So,
1539 if the insertion location is on a critical edge, we will not use
1540 the new location to move another assertion previously registered
1541 at a block dominated by E->DEST. */
1542 dest_bb = (bb) ? bb : e->dest;
1543
1544 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
1545 VAL at a block dominating DEST_BB, then we don't need to insert a new
1546 one. Similarly, if the same assertion already exists at a block
1547 dominated by DEST_BB and the new location is not on a critical
1548 edge, then update the existing location for the assertion (i.e.,
1549 move the assertion up in the dominance tree).
1550
1551 Note, this is implemented as a simple linked list because there
1552 should not be more than a handful of assertions registered per
1553 name. If this becomes a performance problem, a table hashed by
1554 COMP_CODE and VAL could be implemented. */
1555 loc = asserts_for[SSA_NAME_VERSION (name)];
1556 last_loc = loc;
1557 while (loc)
1558 {
1559 if (loc->comp_code == comp_code
1560 && (loc->val == val
1561 || operand_equal_p (loc->val, val, 0))
1562 && (loc->expr == expr
1563 || operand_equal_p (loc->expr, expr, 0)))
1564 {
1565 /* If E is not a critical edge and DEST_BB
1566 dominates the existing location for the assertion, move
1567 the assertion up in the dominance tree by updating its
1568 location information. */
1569 if ((e == NULL || !EDGE_CRITICAL_P (e))
1570 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
1571 {
1572 loc->bb = dest_bb;
1573 loc->e = e;
1574 loc->si = si;
1575 return;
1576 }
1577 }
1578
1579 /* Update the last node of the list and move to the next one. */
1580 last_loc = loc;
1581 loc = loc->next;
1582 }
1583
1584 /* If we didn't find an assertion already registered for
1585 NAME COMP_CODE VAL, add a new one at the end of the list of
1586 assertions associated with NAME. */
1587 n = XNEW (struct assert_locus);
1588 n->bb = dest_bb;
1589 n->e = e;
1590 n->si = si;
1591 n->comp_code = comp_code;
1592 n->val = val;
1593 n->expr = expr;
1594 n->next = NULL;
1595
1596 if (last_loc)
1597 last_loc->next = n;
1598 else
1599 asserts_for[SSA_NAME_VERSION (name)] = n;
1600
1601 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
1602 }
1603
1604 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
1605 Extract a suitable test code and value and store them into *CODE_P and
1606 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
1607
1608 If no extraction was possible, return FALSE, otherwise return TRUE.
1609
1610 If INVERT is true, then we invert the result stored into *CODE_P. */
1611
1612 static bool
extract_code_and_val_from_cond_with_ops(tree name,enum tree_code cond_code,tree cond_op0,tree cond_op1,bool invert,enum tree_code * code_p,tree * val_p)1613 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
1614 tree cond_op0, tree cond_op1,
1615 bool invert, enum tree_code *code_p,
1616 tree *val_p)
1617 {
1618 enum tree_code comp_code;
1619 tree val;
1620
1621 /* Otherwise, we have a comparison of the form NAME COMP VAL
1622 or VAL COMP NAME. */
1623 if (name == cond_op1)
1624 {
1625 /* If the predicate is of the form VAL COMP NAME, flip
1626 COMP around because we need to register NAME as the
1627 first operand in the predicate. */
1628 comp_code = swap_tree_comparison (cond_code);
1629 val = cond_op0;
1630 }
1631 else if (name == cond_op0)
1632 {
1633 /* The comparison is of the form NAME COMP VAL, so the
1634 comparison code remains unchanged. */
1635 comp_code = cond_code;
1636 val = cond_op1;
1637 }
1638 else
1639 gcc_unreachable ();
1640
1641 /* Invert the comparison code as necessary. */
1642 if (invert)
1643 comp_code = invert_tree_comparison (comp_code, 0);
1644
1645 /* VRP only handles integral and pointer types. */
1646 if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
1647 && ! POINTER_TYPE_P (TREE_TYPE (val)))
1648 return false;
1649
1650 /* Do not register always-false predicates.
1651 FIXME: this works around a limitation in fold() when dealing with
1652 enumerations. Given 'enum { N1, N2 } x;', fold will not
1653 fold 'if (x > N2)' to 'if (0)'. */
1654 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
1655 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
1656 {
1657 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
1658 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
1659
1660 if (comp_code == GT_EXPR
1661 && (!max
1662 || compare_values (val, max) == 0))
1663 return false;
1664
1665 if (comp_code == LT_EXPR
1666 && (!min
1667 || compare_values (val, min) == 0))
1668 return false;
1669 }
1670 *code_p = comp_code;
1671 *val_p = val;
1672 return true;
1673 }
1674
1675 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
1676 (otherwise return VAL). VAL and MASK must be zero-extended for
1677 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
1678 (to transform signed values into unsigned) and at the end xor
1679 SGNBIT back. */
1680
1681 static wide_int
masked_increment(const wide_int & val_in,const wide_int & mask,const wide_int & sgnbit,unsigned int prec)1682 masked_increment (const wide_int &val_in, const wide_int &mask,
1683 const wide_int &sgnbit, unsigned int prec)
1684 {
1685 wide_int bit = wi::one (prec), res;
1686 unsigned int i;
1687
1688 wide_int val = val_in ^ sgnbit;
1689 for (i = 0; i < prec; i++, bit += bit)
1690 {
1691 res = mask;
1692 if ((res & bit) == 0)
1693 continue;
1694 res = bit - 1;
1695 res = wi::bit_and_not (val + bit, res);
1696 res &= mask;
1697 if (wi::gtu_p (res, val))
1698 return res ^ sgnbit;
1699 }
1700 return val ^ sgnbit;
1701 }
1702
1703 /* Helper for overflow_comparison_p
1704
1705 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
1706 OP1's defining statement to see if it ultimately has the form
1707 OP0 CODE (OP0 PLUS INTEGER_CST)
1708
1709 If so, return TRUE indicating this is an overflow test and store into
1710 *NEW_CST an updated constant that can be used in a narrowed range test.
1711
1712 REVERSED indicates if the comparison was originally:
1713
1714 OP1 CODE' OP0.
1715
1716 This affects how we build the updated constant. */
1717
1718 static bool
overflow_comparison_p_1(enum tree_code code,tree op0,tree op1,bool follow_assert_exprs,bool reversed,tree * new_cst)1719 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
1720 bool follow_assert_exprs, bool reversed, tree *new_cst)
1721 {
1722 /* See if this is a relational operation between two SSA_NAMES with
1723 unsigned, overflow wrapping values. If so, check it more deeply. */
1724 if ((code == LT_EXPR || code == LE_EXPR
1725 || code == GE_EXPR || code == GT_EXPR)
1726 && TREE_CODE (op0) == SSA_NAME
1727 && TREE_CODE (op1) == SSA_NAME
1728 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
1729 && TYPE_UNSIGNED (TREE_TYPE (op0))
1730 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
1731 {
1732 gimple *op1_def = SSA_NAME_DEF_STMT (op1);
1733
1734 /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
1735 if (follow_assert_exprs)
1736 {
1737 while (gimple_assign_single_p (op1_def)
1738 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
1739 {
1740 op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
1741 if (TREE_CODE (op1) != SSA_NAME)
1742 break;
1743 op1_def = SSA_NAME_DEF_STMT (op1);
1744 }
1745 }
1746
1747 /* Now look at the defining statement of OP1 to see if it adds
1748 or subtracts a nonzero constant from another operand. */
1749 if (op1_def
1750 && is_gimple_assign (op1_def)
1751 && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
1752 && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
1753 && !integer_zerop (gimple_assign_rhs2 (op1_def)))
1754 {
1755 tree target = gimple_assign_rhs1 (op1_def);
1756
1757 /* If requested, follow ASSERT_EXPRs backwards for op0 looking
1758 for one where TARGET appears on the RHS. */
1759 if (follow_assert_exprs)
1760 {
1761 /* Now see if that "other operand" is op0, following the chain
1762 of ASSERT_EXPRs if necessary. */
1763 gimple *op0_def = SSA_NAME_DEF_STMT (op0);
1764 while (op0 != target
1765 && gimple_assign_single_p (op0_def)
1766 && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
1767 {
1768 op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
1769 if (TREE_CODE (op0) != SSA_NAME)
1770 break;
1771 op0_def = SSA_NAME_DEF_STMT (op0);
1772 }
1773 }
1774
1775 /* If we did not find our target SSA_NAME, then this is not
1776 an overflow test. */
1777 if (op0 != target)
1778 return false;
1779
1780 tree type = TREE_TYPE (op0);
1781 wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
1782 tree inc = gimple_assign_rhs2 (op1_def);
1783 if (reversed)
1784 *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
1785 else
1786 *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
1787 return true;
1788 }
1789 }
1790 return false;
1791 }
1792
1793 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
1794 OP1's defining statement to see if it ultimately has the form
1795 OP0 CODE (OP0 PLUS INTEGER_CST)
1796
1797 If so, return TRUE indicating this is an overflow test and store into
1798 *NEW_CST an updated constant that can be used in a narrowed range test.
1799
1800 These statements are left as-is in the IL to facilitate discovery of
1801 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
1802 the alternate range representation is often useful within VRP. */
1803
1804 bool
overflow_comparison_p(tree_code code,tree name,tree val,bool use_equiv_p,tree * new_cst)1805 overflow_comparison_p (tree_code code, tree name, tree val,
1806 bool use_equiv_p, tree *new_cst)
1807 {
1808 if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
1809 return true;
1810 return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
1811 use_equiv_p, true, new_cst);
1812 }
1813
1814
1815 /* Try to register an edge assertion for SSA name NAME on edge E for
1816 the condition COND contributing to the conditional jump pointed to by BSI.
1817 Invert the condition COND if INVERT is true. */
1818
1819 static void
register_edge_assert_for_2(tree name,edge e,enum tree_code cond_code,tree cond_op0,tree cond_op1,bool invert,vec<assert_info> & asserts)1820 register_edge_assert_for_2 (tree name, edge e,
1821 enum tree_code cond_code,
1822 tree cond_op0, tree cond_op1, bool invert,
1823 vec<assert_info> &asserts)
1824 {
1825 tree val;
1826 enum tree_code comp_code;
1827
1828 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
1829 cond_op0,
1830 cond_op1,
1831 invert, &comp_code, &val))
1832 return;
1833
1834 /* Queue the assert. */
1835 tree x;
1836 if (overflow_comparison_p (comp_code, name, val, false, &x))
1837 {
1838 enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
1839 ? GT_EXPR : LE_EXPR);
1840 add_assert_info (asserts, name, name, new_code, x);
1841 }
1842 add_assert_info (asserts, name, name, comp_code, val);
1843
1844 /* In the case of NAME <= CST and NAME being defined as
1845 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
1846 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
1847 This catches range and anti-range tests. */
1848 if ((comp_code == LE_EXPR
1849 || comp_code == GT_EXPR)
1850 && TREE_CODE (val) == INTEGER_CST
1851 && TYPE_UNSIGNED (TREE_TYPE (val)))
1852 {
1853 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
1854 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
1855
1856 /* Extract CST2 from the (optional) addition. */
1857 if (is_gimple_assign (def_stmt)
1858 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
1859 {
1860 name2 = gimple_assign_rhs1 (def_stmt);
1861 cst2 = gimple_assign_rhs2 (def_stmt);
1862 if (TREE_CODE (name2) == SSA_NAME
1863 && TREE_CODE (cst2) == INTEGER_CST)
1864 def_stmt = SSA_NAME_DEF_STMT (name2);
1865 }
1866
1867 /* Extract NAME2 from the (optional) sign-changing cast. */
1868 if (gimple_assign_cast_p (def_stmt))
1869 {
1870 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
1871 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
1872 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
1873 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
1874 name3 = gimple_assign_rhs1 (def_stmt);
1875 }
1876
1877 /* If name3 is used later, create an ASSERT_EXPR for it. */
1878 if (name3 != NULL_TREE
1879 && TREE_CODE (name3) == SSA_NAME
1880 && (cst2 == NULL_TREE
1881 || TREE_CODE (cst2) == INTEGER_CST)
1882 && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
1883 {
1884 tree tmp;
1885
1886 /* Build an expression for the range test. */
1887 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
1888 if (cst2 != NULL_TREE)
1889 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
1890 add_assert_info (asserts, name3, tmp, comp_code, val);
1891 }
1892
1893 /* If name2 is used later, create an ASSERT_EXPR for it. */
1894 if (name2 != NULL_TREE
1895 && TREE_CODE (name2) == SSA_NAME
1896 && TREE_CODE (cst2) == INTEGER_CST
1897 && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
1898 {
1899 tree tmp;
1900
1901 /* Build an expression for the range test. */
1902 tmp = name2;
1903 if (TREE_TYPE (name) != TREE_TYPE (name2))
1904 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
1905 if (cst2 != NULL_TREE)
1906 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
1907 add_assert_info (asserts, name2, tmp, comp_code, val);
1908 }
1909 }
1910
1911 /* In the case of post-in/decrement tests like if (i++) ... and uses
1912 of the in/decremented value on the edge the extra name we want to
1913 assert for is not on the def chain of the name compared. Instead
1914 it is in the set of use stmts.
1915 Similar cases happen for conversions that were simplified through
1916 fold_{sign_changed,widened}_comparison. */
1917 if ((comp_code == NE_EXPR
1918 || comp_code == EQ_EXPR)
1919 && TREE_CODE (val) == INTEGER_CST)
1920 {
1921 imm_use_iterator ui;
1922 gimple *use_stmt;
1923 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
1924 {
1925 if (!is_gimple_assign (use_stmt))
1926 continue;
1927
1928 /* Cut off to use-stmts that are dominating the predecessor. */
1929 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
1930 continue;
1931
1932 tree name2 = gimple_assign_lhs (use_stmt);
1933 if (TREE_CODE (name2) != SSA_NAME)
1934 continue;
1935
1936 enum tree_code code = gimple_assign_rhs_code (use_stmt);
1937 tree cst;
1938 if (code == PLUS_EXPR
1939 || code == MINUS_EXPR)
1940 {
1941 cst = gimple_assign_rhs2 (use_stmt);
1942 if (TREE_CODE (cst) != INTEGER_CST)
1943 continue;
1944 cst = int_const_binop (code, val, cst);
1945 }
1946 else if (CONVERT_EXPR_CODE_P (code))
1947 {
1948 /* For truncating conversions we cannot record
1949 an inequality. */
1950 if (comp_code == NE_EXPR
1951 && (TYPE_PRECISION (TREE_TYPE (name2))
1952 < TYPE_PRECISION (TREE_TYPE (name))))
1953 continue;
1954 cst = fold_convert (TREE_TYPE (name2), val);
1955 }
1956 else
1957 continue;
1958
1959 if (TREE_OVERFLOW_P (cst))
1960 cst = drop_tree_overflow (cst);
1961 add_assert_info (asserts, name2, name2, comp_code, cst);
1962 }
1963 }
1964
1965 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
1966 && TREE_CODE (val) == INTEGER_CST)
1967 {
1968 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
1969 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
1970 tree val2 = NULL_TREE;
1971 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
1972 wide_int mask = wi::zero (prec);
1973 unsigned int nprec = prec;
1974 enum tree_code rhs_code = ERROR_MARK;
1975
1976 if (is_gimple_assign (def_stmt))
1977 rhs_code = gimple_assign_rhs_code (def_stmt);
1978
1979 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
1980 assert that A != CST1 -+ CST2. */
1981 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
1982 && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
1983 {
1984 tree op0 = gimple_assign_rhs1 (def_stmt);
1985 tree op1 = gimple_assign_rhs2 (def_stmt);
1986 if (TREE_CODE (op0) == SSA_NAME
1987 && TREE_CODE (op1) == INTEGER_CST)
1988 {
1989 enum tree_code reverse_op = (rhs_code == PLUS_EXPR
1990 ? MINUS_EXPR : PLUS_EXPR);
1991 op1 = int_const_binop (reverse_op, val, op1);
1992 if (TREE_OVERFLOW (op1))
1993 op1 = drop_tree_overflow (op1);
1994 add_assert_info (asserts, op0, op0, comp_code, op1);
1995 }
1996 }
1997
1998 /* Add asserts for NAME cmp CST and NAME being defined
1999 as NAME = (int) NAME2. */
2000 if (!TYPE_UNSIGNED (TREE_TYPE (val))
2001 && (comp_code == LE_EXPR || comp_code == LT_EXPR
2002 || comp_code == GT_EXPR || comp_code == GE_EXPR)
2003 && gimple_assign_cast_p (def_stmt))
2004 {
2005 name2 = gimple_assign_rhs1 (def_stmt);
2006 if (CONVERT_EXPR_CODE_P (rhs_code)
2007 && TREE_CODE (name2) == SSA_NAME
2008 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2009 && TYPE_UNSIGNED (TREE_TYPE (name2))
2010 && prec == TYPE_PRECISION (TREE_TYPE (name2))
2011 && (comp_code == LE_EXPR || comp_code == GT_EXPR
2012 || !tree_int_cst_equal (val,
2013 TYPE_MIN_VALUE (TREE_TYPE (val)))))
2014 {
2015 tree tmp, cst;
2016 enum tree_code new_comp_code = comp_code;
2017
2018 cst = fold_convert (TREE_TYPE (name2),
2019 TYPE_MIN_VALUE (TREE_TYPE (val)));
2020 /* Build an expression for the range test. */
2021 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
2022 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
2023 fold_convert (TREE_TYPE (name2), val));
2024 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2025 {
2026 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
2027 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
2028 build_int_cst (TREE_TYPE (name2), 1));
2029 }
2030 add_assert_info (asserts, name2, tmp, new_comp_code, cst);
2031 }
2032 }
2033
2034 /* Add asserts for NAME cmp CST and NAME being defined as
2035 NAME = NAME2 >> CST2.
2036
2037 Extract CST2 from the right shift. */
2038 if (rhs_code == RSHIFT_EXPR)
2039 {
2040 name2 = gimple_assign_rhs1 (def_stmt);
2041 cst2 = gimple_assign_rhs2 (def_stmt);
2042 if (TREE_CODE (name2) == SSA_NAME
2043 && tree_fits_uhwi_p (cst2)
2044 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2045 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
2046 && type_has_mode_precision_p (TREE_TYPE (val)))
2047 {
2048 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
2049 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
2050 }
2051 }
2052 if (val2 != NULL_TREE
2053 && TREE_CODE (val2) == INTEGER_CST
2054 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
2055 TREE_TYPE (val),
2056 val2, cst2), val))
2057 {
2058 enum tree_code new_comp_code = comp_code;
2059 tree tmp, new_val;
2060
2061 tmp = name2;
2062 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
2063 {
2064 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
2065 {
2066 tree type = build_nonstandard_integer_type (prec, 1);
2067 tmp = build1 (NOP_EXPR, type, name2);
2068 val2 = fold_convert (type, val2);
2069 }
2070 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
2071 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
2072 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
2073 }
2074 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2075 {
2076 wide_int minval
2077 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2078 new_val = val2;
2079 if (minval == wi::to_wide (new_val))
2080 new_val = NULL_TREE;
2081 }
2082 else
2083 {
2084 wide_int maxval
2085 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2086 mask |= wi::to_wide (val2);
2087 if (wi::eq_p (mask, maxval))
2088 new_val = NULL_TREE;
2089 else
2090 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
2091 }
2092
2093 if (new_val)
2094 add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
2095 }
2096
2097 /* If we have a conversion that doesn't change the value of the source
2098 simply register the same assert for it. */
2099 if (CONVERT_EXPR_CODE_P (rhs_code))
2100 {
2101 wide_int rmin, rmax;
2102 tree rhs1 = gimple_assign_rhs1 (def_stmt);
2103 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
2104 && TREE_CODE (rhs1) == SSA_NAME
2105 /* Make sure the relation preserves the upper/lower boundary of
2106 the range conservatively. */
2107 && (comp_code == NE_EXPR
2108 || comp_code == EQ_EXPR
2109 || (TYPE_SIGN (TREE_TYPE (name))
2110 == TYPE_SIGN (TREE_TYPE (rhs1)))
2111 || ((comp_code == LE_EXPR
2112 || comp_code == LT_EXPR)
2113 && !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
2114 || ((comp_code == GE_EXPR
2115 || comp_code == GT_EXPR)
2116 && TYPE_UNSIGNED (TREE_TYPE (rhs1))))
2117 /* And the conversion does not alter the value we compare
2118 against and all values in rhs1 can be represented in
2119 the converted to type. */
2120 && int_fits_type_p (val, TREE_TYPE (rhs1))
2121 && ((TYPE_PRECISION (TREE_TYPE (name))
2122 > TYPE_PRECISION (TREE_TYPE (rhs1)))
2123 || (get_range_info (rhs1, &rmin, &rmax) == VR_RANGE
2124 && wi::fits_to_tree_p
2125 (widest_int::from (rmin,
2126 TYPE_SIGN (TREE_TYPE (rhs1))),
2127 TREE_TYPE (name))
2128 && wi::fits_to_tree_p
2129 (widest_int::from (rmax,
2130 TYPE_SIGN (TREE_TYPE (rhs1))),
2131 TREE_TYPE (name)))))
2132 add_assert_info (asserts, rhs1, rhs1,
2133 comp_code, fold_convert (TREE_TYPE (rhs1), val));
2134 }
2135
2136 /* Add asserts for NAME cmp CST and NAME being defined as
2137 NAME = NAME2 & CST2.
2138
2139 Extract CST2 from the and.
2140
2141 Also handle
2142 NAME = (unsigned) NAME2;
2143 casts where NAME's type is unsigned and has smaller precision
2144 than NAME2's type as if it was NAME = NAME2 & MASK. */
2145 names[0] = NULL_TREE;
2146 names[1] = NULL_TREE;
2147 cst2 = NULL_TREE;
2148 if (rhs_code == BIT_AND_EXPR
2149 || (CONVERT_EXPR_CODE_P (rhs_code)
2150 && INTEGRAL_TYPE_P (TREE_TYPE (val))
2151 && TYPE_UNSIGNED (TREE_TYPE (val))
2152 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
2153 > prec))
2154 {
2155 name2 = gimple_assign_rhs1 (def_stmt);
2156 if (rhs_code == BIT_AND_EXPR)
2157 cst2 = gimple_assign_rhs2 (def_stmt);
2158 else
2159 {
2160 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
2161 nprec = TYPE_PRECISION (TREE_TYPE (name2));
2162 }
2163 if (TREE_CODE (name2) == SSA_NAME
2164 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2165 && TREE_CODE (cst2) == INTEGER_CST
2166 && !integer_zerop (cst2)
2167 && (nprec > 1
2168 || TYPE_UNSIGNED (TREE_TYPE (val))))
2169 {
2170 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
2171 if (gimple_assign_cast_p (def_stmt2))
2172 {
2173 names[1] = gimple_assign_rhs1 (def_stmt2);
2174 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
2175 || TREE_CODE (names[1]) != SSA_NAME
2176 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
2177 || (TYPE_PRECISION (TREE_TYPE (name2))
2178 != TYPE_PRECISION (TREE_TYPE (names[1]))))
2179 names[1] = NULL_TREE;
2180 }
2181 names[0] = name2;
2182 }
2183 }
2184 if (names[0] || names[1])
2185 {
2186 wide_int minv, maxv, valv, cst2v;
2187 wide_int tem, sgnbit;
2188 bool valid_p = false, valn, cst2n;
2189 enum tree_code ccode = comp_code;
2190
2191 valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
2192 cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
2193 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
2194 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
2195 /* If CST2 doesn't have most significant bit set,
2196 but VAL is negative, we have comparison like
2197 if ((x & 0x123) > -4) (always true). Just give up. */
2198 if (!cst2n && valn)
2199 ccode = ERROR_MARK;
2200 if (cst2n)
2201 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
2202 else
2203 sgnbit = wi::zero (nprec);
2204 minv = valv & cst2v;
2205 switch (ccode)
2206 {
2207 case EQ_EXPR:
2208 /* Minimum unsigned value for equality is VAL & CST2
2209 (should be equal to VAL, otherwise we probably should
2210 have folded the comparison into false) and
2211 maximum unsigned value is VAL | ~CST2. */
2212 maxv = valv | ~cst2v;
2213 valid_p = true;
2214 break;
2215
2216 case NE_EXPR:
2217 tem = valv | ~cst2v;
2218 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
2219 if (valv == 0)
2220 {
2221 cst2n = false;
2222 sgnbit = wi::zero (nprec);
2223 goto gt_expr;
2224 }
2225 /* If (VAL | ~CST2) is all ones, handle it as
2226 (X & CST2) < VAL. */
2227 if (tem == -1)
2228 {
2229 cst2n = false;
2230 valn = false;
2231 sgnbit = wi::zero (nprec);
2232 goto lt_expr;
2233 }
2234 if (!cst2n && wi::neg_p (cst2v))
2235 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
2236 if (sgnbit != 0)
2237 {
2238 if (valv == sgnbit)
2239 {
2240 cst2n = true;
2241 valn = true;
2242 goto gt_expr;
2243 }
2244 if (tem == wi::mask (nprec - 1, false, nprec))
2245 {
2246 cst2n = true;
2247 goto lt_expr;
2248 }
2249 if (!cst2n)
2250 sgnbit = wi::zero (nprec);
2251 }
2252 break;
2253
2254 case GE_EXPR:
2255 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
2256 is VAL and maximum unsigned value is ~0. For signed
2257 comparison, if CST2 doesn't have most significant bit
2258 set, handle it similarly. If CST2 has MSB set,
2259 the minimum is the same, and maximum is ~0U/2. */
2260 if (minv != valv)
2261 {
2262 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
2263 VAL. */
2264 minv = masked_increment (valv, cst2v, sgnbit, nprec);
2265 if (minv == valv)
2266 break;
2267 }
2268 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
2269 valid_p = true;
2270 break;
2271
2272 case GT_EXPR:
2273 gt_expr:
2274 /* Find out smallest MINV where MINV > VAL
2275 && (MINV & CST2) == MINV, if any. If VAL is signed and
2276 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
2277 minv = masked_increment (valv, cst2v, sgnbit, nprec);
2278 if (minv == valv)
2279 break;
2280 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
2281 valid_p = true;
2282 break;
2283
2284 case LE_EXPR:
2285 /* Minimum unsigned value for <= is 0 and maximum
2286 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
2287 Otherwise, find smallest VAL2 where VAL2 > VAL
2288 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2289 as maximum.
2290 For signed comparison, if CST2 doesn't have most
2291 significant bit set, handle it similarly. If CST2 has
2292 MSB set, the maximum is the same and minimum is INT_MIN. */
2293 if (minv == valv)
2294 maxv = valv;
2295 else
2296 {
2297 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
2298 if (maxv == valv)
2299 break;
2300 maxv -= 1;
2301 }
2302 maxv |= ~cst2v;
2303 minv = sgnbit;
2304 valid_p = true;
2305 break;
2306
2307 case LT_EXPR:
2308 lt_expr:
2309 /* Minimum unsigned value for < is 0 and maximum
2310 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
2311 Otherwise, find smallest VAL2 where VAL2 > VAL
2312 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2313 as maximum.
2314 For signed comparison, if CST2 doesn't have most
2315 significant bit set, handle it similarly. If CST2 has
2316 MSB set, the maximum is the same and minimum is INT_MIN. */
2317 if (minv == valv)
2318 {
2319 if (valv == sgnbit)
2320 break;
2321 maxv = valv;
2322 }
2323 else
2324 {
2325 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
2326 if (maxv == valv)
2327 break;
2328 }
2329 maxv -= 1;
2330 maxv |= ~cst2v;
2331 minv = sgnbit;
2332 valid_p = true;
2333 break;
2334
2335 default:
2336 break;
2337 }
2338 if (valid_p
2339 && (maxv - minv) != -1)
2340 {
2341 tree tmp, new_val, type;
2342 int i;
2343
2344 for (i = 0; i < 2; i++)
2345 if (names[i])
2346 {
2347 wide_int maxv2 = maxv;
2348 tmp = names[i];
2349 type = TREE_TYPE (names[i]);
2350 if (!TYPE_UNSIGNED (type))
2351 {
2352 type = build_nonstandard_integer_type (nprec, 1);
2353 tmp = build1 (NOP_EXPR, type, names[i]);
2354 }
2355 if (minv != 0)
2356 {
2357 tmp = build2 (PLUS_EXPR, type, tmp,
2358 wide_int_to_tree (type, -minv));
2359 maxv2 = maxv - minv;
2360 }
2361 new_val = wide_int_to_tree (type, maxv2);
2362 add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
2363 }
2364 }
2365 }
2366 }
2367 }
2368
2369 /* OP is an operand of a truth value expression which is known to have
2370 a particular value. Register any asserts for OP and for any
2371 operands in OP's defining statement.
2372
2373 If CODE is EQ_EXPR, then we want to register OP is zero (false),
2374 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
2375
2376 static void
register_edge_assert_for_1(tree op,enum tree_code code,edge e,vec<assert_info> & asserts)2377 register_edge_assert_for_1 (tree op, enum tree_code code,
2378 edge e, vec<assert_info> &asserts)
2379 {
2380 gimple *op_def;
2381 tree val;
2382 enum tree_code rhs_code;
2383
2384 /* We only care about SSA_NAMEs. */
2385 if (TREE_CODE (op) != SSA_NAME)
2386 return;
2387
2388 /* We know that OP will have a zero or nonzero value. */
2389 val = build_int_cst (TREE_TYPE (op), 0);
2390 add_assert_info (asserts, op, op, code, val);
2391
2392 /* Now look at how OP is set. If it's set from a comparison,
2393 a truth operation or some bit operations, then we may be able
2394 to register information about the operands of that assignment. */
2395 op_def = SSA_NAME_DEF_STMT (op);
2396 if (gimple_code (op_def) != GIMPLE_ASSIGN)
2397 return;
2398
2399 rhs_code = gimple_assign_rhs_code (op_def);
2400
2401 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
2402 {
2403 bool invert = (code == EQ_EXPR ? true : false);
2404 tree op0 = gimple_assign_rhs1 (op_def);
2405 tree op1 = gimple_assign_rhs2 (op_def);
2406
2407 if (TREE_CODE (op0) == SSA_NAME)
2408 register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
2409 if (TREE_CODE (op1) == SSA_NAME)
2410 register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
2411 }
2412 else if ((code == NE_EXPR
2413 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
2414 || (code == EQ_EXPR
2415 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
2416 {
2417 /* Recurse on each operand. */
2418 tree op0 = gimple_assign_rhs1 (op_def);
2419 tree op1 = gimple_assign_rhs2 (op_def);
2420 if (TREE_CODE (op0) == SSA_NAME
2421 && has_single_use (op0))
2422 register_edge_assert_for_1 (op0, code, e, asserts);
2423 if (TREE_CODE (op1) == SSA_NAME
2424 && has_single_use (op1))
2425 register_edge_assert_for_1 (op1, code, e, asserts);
2426 }
2427 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
2428 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
2429 {
2430 /* Recurse, flipping CODE. */
2431 code = invert_tree_comparison (code, false);
2432 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
2433 }
2434 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
2435 {
2436 /* Recurse through the copy. */
2437 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
2438 }
2439 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
2440 {
2441 /* Recurse through the type conversion, unless it is a narrowing
2442 conversion or conversion from non-integral type. */
2443 tree rhs = gimple_assign_rhs1 (op_def);
2444 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2445 && (TYPE_PRECISION (TREE_TYPE (rhs))
2446 <= TYPE_PRECISION (TREE_TYPE (op))))
2447 register_edge_assert_for_1 (rhs, code, e, asserts);
2448 }
2449 }
2450
2451 /* Check if comparison
2452 NAME COND_OP INTEGER_CST
2453 has a form of
2454 (X & 11...100..0) COND_OP XX...X00...0
2455 Such comparison can yield assertions like
2456 X >= XX...X00...0
2457 X <= XX...X11...1
2458 in case of COND_OP being EQ_EXPR or
2459 X < XX...X00...0
2460 X > XX...X11...1
2461 in case of NE_EXPR. */
2462
2463 static bool
is_masked_range_test(tree name,tree valt,enum tree_code cond_code,tree * new_name,tree * low,enum tree_code * low_code,tree * high,enum tree_code * high_code)2464 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
2465 tree *new_name, tree *low, enum tree_code *low_code,
2466 tree *high, enum tree_code *high_code)
2467 {
2468 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2469
2470 if (!is_gimple_assign (def_stmt)
2471 || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
2472 return false;
2473
2474 tree t = gimple_assign_rhs1 (def_stmt);
2475 tree maskt = gimple_assign_rhs2 (def_stmt);
2476 if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
2477 return false;
2478
2479 wi::tree_to_wide_ref mask = wi::to_wide (maskt);
2480 wide_int inv_mask = ~mask;
2481 /* Must have been removed by now so don't bother optimizing. */
2482 if (mask == 0 || inv_mask == 0)
2483 return false;
2484
2485 /* Assume VALT is INTEGER_CST. */
2486 wi::tree_to_wide_ref val = wi::to_wide (valt);
2487
2488 if ((inv_mask & (inv_mask + 1)) != 0
2489 || (val & mask) != val)
2490 return false;
2491
2492 bool is_range = cond_code == EQ_EXPR;
2493
2494 tree type = TREE_TYPE (t);
2495 wide_int min = wi::min_value (type),
2496 max = wi::max_value (type);
2497
2498 if (is_range)
2499 {
2500 *low_code = val == min ? ERROR_MARK : GE_EXPR;
2501 *high_code = val == max ? ERROR_MARK : LE_EXPR;
2502 }
2503 else
2504 {
2505 /* We can still generate assertion if one of alternatives
2506 is known to always be false. */
2507 if (val == min)
2508 {
2509 *low_code = (enum tree_code) 0;
2510 *high_code = GT_EXPR;
2511 }
2512 else if ((val | inv_mask) == max)
2513 {
2514 *low_code = LT_EXPR;
2515 *high_code = (enum tree_code) 0;
2516 }
2517 else
2518 return false;
2519 }
2520
2521 *new_name = t;
2522 *low = wide_int_to_tree (type, val);
2523 *high = wide_int_to_tree (type, val | inv_mask);
2524
2525 return true;
2526 }
2527
2528 /* Try to register an edge assertion for SSA name NAME on edge E for
2529 the condition COND contributing to the conditional jump pointed to by
2530 SI. */
2531
2532 void
register_edge_assert_for(tree name,edge e,enum tree_code cond_code,tree cond_op0,tree cond_op1,vec<assert_info> & asserts)2533 register_edge_assert_for (tree name, edge e,
2534 enum tree_code cond_code, tree cond_op0,
2535 tree cond_op1, vec<assert_info> &asserts)
2536 {
2537 tree val;
2538 enum tree_code comp_code;
2539 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
2540
2541 /* Do not attempt to infer anything in names that flow through
2542 abnormal edges. */
2543 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
2544 return;
2545
2546 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
2547 cond_op0, cond_op1,
2548 is_else_edge,
2549 &comp_code, &val))
2550 return;
2551
2552 /* Register ASSERT_EXPRs for name. */
2553 register_edge_assert_for_2 (name, e, cond_code, cond_op0,
2554 cond_op1, is_else_edge, asserts);
2555
2556
2557 /* If COND is effectively an equality test of an SSA_NAME against
2558 the value zero or one, then we may be able to assert values
2559 for SSA_NAMEs which flow into COND. */
2560
2561 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
2562 statement of NAME we can assert both operands of the BIT_AND_EXPR
2563 have nonzero value. */
2564 if (((comp_code == EQ_EXPR && integer_onep (val))
2565 || (comp_code == NE_EXPR && integer_zerop (val))))
2566 {
2567 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2568
2569 if (is_gimple_assign (def_stmt)
2570 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
2571 {
2572 tree op0 = gimple_assign_rhs1 (def_stmt);
2573 tree op1 = gimple_assign_rhs2 (def_stmt);
2574 register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
2575 register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
2576 }
2577 }
2578
2579 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
2580 statement of NAME we can assert both operands of the BIT_IOR_EXPR
2581 have zero value. */
2582 if (((comp_code == EQ_EXPR && integer_zerop (val))
2583 || (comp_code == NE_EXPR && integer_onep (val))))
2584 {
2585 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2586
2587 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
2588 necessarily zero value, or if type-precision is one. */
2589 if (is_gimple_assign (def_stmt)
2590 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
2591 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
2592 || comp_code == EQ_EXPR)))
2593 {
2594 tree op0 = gimple_assign_rhs1 (def_stmt);
2595 tree op1 = gimple_assign_rhs2 (def_stmt);
2596 register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
2597 register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
2598 }
2599 }
2600
2601 /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
2602 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
2603 && TREE_CODE (val) == INTEGER_CST)
2604 {
2605 enum tree_code low_code, high_code;
2606 tree low, high;
2607 if (is_masked_range_test (name, val, comp_code, &name, &low,
2608 &low_code, &high, &high_code))
2609 {
2610 if (low_code != ERROR_MARK)
2611 register_edge_assert_for_2 (name, e, low_code, name,
2612 low, /*invert*/false, asserts);
2613 if (high_code != ERROR_MARK)
2614 register_edge_assert_for_2 (name, e, high_code, name,
2615 high, /*invert*/false, asserts);
2616 }
2617 }
2618 }
2619
2620 /* Finish found ASSERTS for E and register them at GSI. */
2621
2622 static void
finish_register_edge_assert_for(edge e,gimple_stmt_iterator gsi,vec<assert_info> & asserts)2623 finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
2624 vec<assert_info> &asserts)
2625 {
2626 for (unsigned i = 0; i < asserts.length (); ++i)
2627 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
2628 reachable from E. */
2629 if (live_on_edge (e, asserts[i].name))
2630 register_new_assert_for (asserts[i].name, asserts[i].expr,
2631 asserts[i].comp_code, asserts[i].val,
2632 NULL, e, gsi);
2633 }
2634
2635
2636
2637 /* Determine whether the outgoing edges of BB should receive an
2638 ASSERT_EXPR for each of the operands of BB's LAST statement.
2639 The last statement of BB must be a COND_EXPR.
2640
2641 If any of the sub-graphs rooted at BB have an interesting use of
2642 the predicate operands, an assert location node is added to the
2643 list of assertions for the corresponding operands. */
2644
2645 static void
find_conditional_asserts(basic_block bb,gcond * last)2646 find_conditional_asserts (basic_block bb, gcond *last)
2647 {
2648 gimple_stmt_iterator bsi;
2649 tree op;
2650 edge_iterator ei;
2651 edge e;
2652 ssa_op_iter iter;
2653
2654 bsi = gsi_for_stmt (last);
2655
2656 /* Look for uses of the operands in each of the sub-graphs
2657 rooted at BB. We need to check each of the outgoing edges
2658 separately, so that we know what kind of ASSERT_EXPR to
2659 insert. */
2660 FOR_EACH_EDGE (e, ei, bb->succs)
2661 {
2662 if (e->dest == bb)
2663 continue;
2664
2665 /* Register the necessary assertions for each operand in the
2666 conditional predicate. */
2667 auto_vec<assert_info, 8> asserts;
2668 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
2669 register_edge_assert_for (op, e,
2670 gimple_cond_code (last),
2671 gimple_cond_lhs (last),
2672 gimple_cond_rhs (last), asserts);
2673 finish_register_edge_assert_for (e, bsi, asserts);
2674 }
2675 }
2676
2677 struct case_info
2678 {
2679 tree expr;
2680 basic_block bb;
2681 };
2682
2683 /* Compare two case labels sorting first by the destination bb index
2684 and then by the case value. */
2685
2686 static int
compare_case_labels(const void * p1,const void * p2)2687 compare_case_labels (const void *p1, const void *p2)
2688 {
2689 const struct case_info *ci1 = (const struct case_info *) p1;
2690 const struct case_info *ci2 = (const struct case_info *) p2;
2691 int idx1 = ci1->bb->index;
2692 int idx2 = ci2->bb->index;
2693
2694 if (idx1 < idx2)
2695 return -1;
2696 else if (idx1 == idx2)
2697 {
2698 /* Make sure the default label is first in a group. */
2699 if (!CASE_LOW (ci1->expr))
2700 return -1;
2701 else if (!CASE_LOW (ci2->expr))
2702 return 1;
2703 else
2704 return tree_int_cst_compare (CASE_LOW (ci1->expr),
2705 CASE_LOW (ci2->expr));
2706 }
2707 else
2708 return 1;
2709 }
2710
2711 /* Determine whether the outgoing edges of BB should receive an
2712 ASSERT_EXPR for each of the operands of BB's LAST statement.
2713 The last statement of BB must be a SWITCH_EXPR.
2714
2715 If any of the sub-graphs rooted at BB have an interesting use of
2716 the predicate operands, an assert location node is added to the
2717 list of assertions for the corresponding operands. */
2718
2719 static void
find_switch_asserts(basic_block bb,gswitch * last)2720 find_switch_asserts (basic_block bb, gswitch *last)
2721 {
2722 gimple_stmt_iterator bsi;
2723 tree op;
2724 edge e;
2725 struct case_info *ci;
2726 size_t n = gimple_switch_num_labels (last);
2727 #if GCC_VERSION >= 4000
2728 unsigned int idx;
2729 #else
2730 /* Work around GCC 3.4 bug (PR 37086). */
2731 volatile unsigned int idx;
2732 #endif
2733
2734 bsi = gsi_for_stmt (last);
2735 op = gimple_switch_index (last);
2736 if (TREE_CODE (op) != SSA_NAME)
2737 return;
2738
2739 /* Build a vector of case labels sorted by destination label. */
2740 ci = XNEWVEC (struct case_info, n);
2741 for (idx = 0; idx < n; ++idx)
2742 {
2743 ci[idx].expr = gimple_switch_label (last, idx);
2744 ci[idx].bb = label_to_block (cfun, CASE_LABEL (ci[idx].expr));
2745 }
2746 edge default_edge = find_edge (bb, ci[0].bb);
2747 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
2748
2749 for (idx = 0; idx < n; ++idx)
2750 {
2751 tree min, max;
2752 tree cl = ci[idx].expr;
2753 basic_block cbb = ci[idx].bb;
2754
2755 min = CASE_LOW (cl);
2756 max = CASE_HIGH (cl);
2757
2758 /* If there are multiple case labels with the same destination
2759 we need to combine them to a single value range for the edge. */
2760 if (idx + 1 < n && cbb == ci[idx + 1].bb)
2761 {
2762 /* Skip labels until the last of the group. */
2763 do {
2764 ++idx;
2765 } while (idx < n && cbb == ci[idx].bb);
2766 --idx;
2767
2768 /* Pick up the maximum of the case label range. */
2769 if (CASE_HIGH (ci[idx].expr))
2770 max = CASE_HIGH (ci[idx].expr);
2771 else
2772 max = CASE_LOW (ci[idx].expr);
2773 }
2774
2775 /* Can't extract a useful assertion out of a range that includes the
2776 default label. */
2777 if (min == NULL_TREE)
2778 continue;
2779
2780 /* Find the edge to register the assert expr on. */
2781 e = find_edge (bb, cbb);
2782
2783 /* Register the necessary assertions for the operand in the
2784 SWITCH_EXPR. */
2785 auto_vec<assert_info, 8> asserts;
2786 register_edge_assert_for (op, e,
2787 max ? GE_EXPR : EQ_EXPR,
2788 op, fold_convert (TREE_TYPE (op), min),
2789 asserts);
2790 if (max)
2791 register_edge_assert_for (op, e, LE_EXPR, op,
2792 fold_convert (TREE_TYPE (op), max),
2793 asserts);
2794 finish_register_edge_assert_for (e, bsi, asserts);
2795 }
2796
2797 XDELETEVEC (ci);
2798
2799 if (!live_on_edge (default_edge, op))
2800 return;
2801
2802 /* Now register along the default label assertions that correspond to the
2803 anti-range of each label. */
2804 int insertion_limit = param_max_vrp_switch_assertions;
2805 if (insertion_limit == 0)
2806 return;
2807
2808 /* We can't do this if the default case shares a label with another case. */
2809 tree default_cl = gimple_switch_default_label (last);
2810 for (idx = 1; idx < n; idx++)
2811 {
2812 tree min, max;
2813 tree cl = gimple_switch_label (last, idx);
2814 if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
2815 continue;
2816
2817 min = CASE_LOW (cl);
2818 max = CASE_HIGH (cl);
2819
2820 /* Combine contiguous case ranges to reduce the number of assertions
2821 to insert. */
2822 for (idx = idx + 1; idx < n; idx++)
2823 {
2824 tree next_min, next_max;
2825 tree next_cl = gimple_switch_label (last, idx);
2826 if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
2827 break;
2828
2829 next_min = CASE_LOW (next_cl);
2830 next_max = CASE_HIGH (next_cl);
2831
2832 wide_int difference = (wi::to_wide (next_min)
2833 - wi::to_wide (max ? max : min));
2834 if (wi::eq_p (difference, 1))
2835 max = next_max ? next_max : next_min;
2836 else
2837 break;
2838 }
2839 idx--;
2840
2841 if (max == NULL_TREE)
2842 {
2843 /* Register the assertion OP != MIN. */
2844 auto_vec<assert_info, 8> asserts;
2845 min = fold_convert (TREE_TYPE (op), min);
2846 register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
2847 asserts);
2848 finish_register_edge_assert_for (default_edge, bsi, asserts);
2849 }
2850 else
2851 {
2852 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
2853 which will give OP the anti-range ~[MIN,MAX]. */
2854 tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
2855 min = fold_convert (TREE_TYPE (uop), min);
2856 max = fold_convert (TREE_TYPE (uop), max);
2857
2858 tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
2859 tree rhs = int_const_binop (MINUS_EXPR, max, min);
2860 register_new_assert_for (op, lhs, GT_EXPR, rhs,
2861 NULL, default_edge, bsi);
2862 }
2863
2864 if (--insertion_limit == 0)
2865 break;
2866 }
2867 }
2868
2869
2870 /* Traverse all the statements in block BB looking for statements that
2871 may generate useful assertions for the SSA names in their operand.
2872 If a statement produces a useful assertion A for name N_i, then the
2873 list of assertions already generated for N_i is scanned to
2874 determine if A is actually needed.
2875
2876 If N_i already had the assertion A at a location dominating the
2877 current location, then nothing needs to be done. Otherwise, the
2878 new location for A is recorded instead.
2879
2880 1- For every statement S in BB, all the variables used by S are
2881 added to bitmap FOUND_IN_SUBGRAPH.
2882
2883 2- If statement S uses an operand N in a way that exposes a known
2884 value range for N, then if N was not already generated by an
2885 ASSERT_EXPR, create a new assert location for N. For instance,
2886 if N is a pointer and the statement dereferences it, we can
2887 assume that N is not NULL.
2888
2889 3- COND_EXPRs are a special case of #2. We can derive range
2890 information from the predicate but need to insert different
2891 ASSERT_EXPRs for each of the sub-graphs rooted at the
2892 conditional block. If the last statement of BB is a conditional
2893 expression of the form 'X op Y', then
2894
2895 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
2896
2897 b) If the conditional is the only entry point to the sub-graph
2898 corresponding to the THEN_CLAUSE, recurse into it. On
2899 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
2900 an ASSERT_EXPR is added for the corresponding variable.
2901
2902 c) Repeat step (b) on the ELSE_CLAUSE.
2903
2904 d) Mark X and Y in FOUND_IN_SUBGRAPH.
2905
2906 For instance,
2907
2908 if (a == 9)
2909 b = a;
2910 else
2911 b = c + 1;
2912
2913 In this case, an assertion on the THEN clause is useful to
2914 determine that 'a' is always 9 on that edge. However, an assertion
2915 on the ELSE clause would be unnecessary.
2916
2917 4- If BB does not end in a conditional expression, then we recurse
2918 into BB's dominator children.
2919
2920 At the end of the recursive traversal, every SSA name will have a
2921 list of locations where ASSERT_EXPRs should be added. When a new
2922 location for name N is found, it is registered by calling
2923 register_new_assert_for. That function keeps track of all the
2924 registered assertions to prevent adding unnecessary assertions.
2925 For instance, if a pointer P_4 is dereferenced more than once in a
2926 dominator tree, only the location dominating all the dereference of
2927 P_4 will receive an ASSERT_EXPR. */
2928
2929 static void
find_assert_locations_1(basic_block bb,sbitmap live)2930 find_assert_locations_1 (basic_block bb, sbitmap live)
2931 {
2932 gimple *last;
2933
2934 last = last_stmt (bb);
2935
2936 /* If BB's last statement is a conditional statement involving integer
2937 operands, determine if we need to add ASSERT_EXPRs. */
2938 if (last
2939 && gimple_code (last) == GIMPLE_COND
2940 && !fp_predicate (last)
2941 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
2942 find_conditional_asserts (bb, as_a <gcond *> (last));
2943
2944 /* If BB's last statement is a switch statement involving integer
2945 operands, determine if we need to add ASSERT_EXPRs. */
2946 if (last
2947 && gimple_code (last) == GIMPLE_SWITCH
2948 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
2949 find_switch_asserts (bb, as_a <gswitch *> (last));
2950
2951 /* Traverse all the statements in BB marking used names and looking
2952 for statements that may infer assertions for their used operands. */
2953 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
2954 gsi_prev (&si))
2955 {
2956 gimple *stmt;
2957 tree op;
2958 ssa_op_iter i;
2959
2960 stmt = gsi_stmt (si);
2961
2962 if (is_gimple_debug (stmt))
2963 continue;
2964
2965 /* See if we can derive an assertion for any of STMT's operands. */
2966 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
2967 {
2968 tree value;
2969 enum tree_code comp_code;
2970
2971 /* If op is not live beyond this stmt, do not bother to insert
2972 asserts for it. */
2973 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
2974 continue;
2975
2976 /* If OP is used in such a way that we can infer a value
2977 range for it, and we don't find a previous assertion for
2978 it, create a new assertion location node for OP. */
2979 if (infer_value_range (stmt, op, &comp_code, &value))
2980 {
2981 /* If we are able to infer a nonzero value range for OP,
2982 then walk backwards through the use-def chain to see if OP
2983 was set via a typecast.
2984
2985 If so, then we can also infer a nonzero value range
2986 for the operand of the NOP_EXPR. */
2987 if (comp_code == NE_EXPR && integer_zerop (value))
2988 {
2989 tree t = op;
2990 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
2991
2992 while (is_gimple_assign (def_stmt)
2993 && CONVERT_EXPR_CODE_P
2994 (gimple_assign_rhs_code (def_stmt))
2995 && TREE_CODE
2996 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
2997 && POINTER_TYPE_P
2998 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
2999 {
3000 t = gimple_assign_rhs1 (def_stmt);
3001 def_stmt = SSA_NAME_DEF_STMT (t);
3002
3003 /* Note we want to register the assert for the
3004 operand of the NOP_EXPR after SI, not after the
3005 conversion. */
3006 if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
3007 register_new_assert_for (t, t, comp_code, value,
3008 bb, NULL, si);
3009 }
3010 }
3011
3012 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
3013 }
3014 }
3015
3016 /* Update live. */
3017 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3018 bitmap_set_bit (live, SSA_NAME_VERSION (op));
3019 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
3020 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
3021 }
3022
3023 /* Traverse all PHI nodes in BB, updating live. */
3024 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3025 gsi_next (&si))
3026 {
3027 use_operand_p arg_p;
3028 ssa_op_iter i;
3029 gphi *phi = si.phi ();
3030 tree res = gimple_phi_result (phi);
3031
3032 if (virtual_operand_p (res))
3033 continue;
3034
3035 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
3036 {
3037 tree arg = USE_FROM_PTR (arg_p);
3038 if (TREE_CODE (arg) == SSA_NAME)
3039 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
3040 }
3041
3042 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
3043 }
3044 }
3045
3046 /* Do an RPO walk over the function computing SSA name liveness
3047 on-the-fly and deciding on assert expressions to insert. */
3048
3049 static void
find_assert_locations(void)3050 find_assert_locations (void)
3051 {
3052 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3053 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3054 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3055 int rpo_cnt, i;
3056
3057 live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
3058 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3059 for (i = 0; i < rpo_cnt; ++i)
3060 bb_rpo[rpo[i]] = i;
3061
3062 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
3063 the order we compute liveness and insert asserts we otherwise
3064 fail to insert asserts into the loop latch. */
3065 loop_p loop;
3066 FOR_EACH_LOOP (loop, 0)
3067 {
3068 i = loop->latch->index;
3069 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
3070 for (gphi_iterator gsi = gsi_start_phis (loop->header);
3071 !gsi_end_p (gsi); gsi_next (&gsi))
3072 {
3073 gphi *phi = gsi.phi ();
3074 if (virtual_operand_p (gimple_phi_result (phi)))
3075 continue;
3076 tree arg = gimple_phi_arg_def (phi, j);
3077 if (TREE_CODE (arg) == SSA_NAME)
3078 {
3079 if (live[i] == NULL)
3080 {
3081 live[i] = sbitmap_alloc (num_ssa_names);
3082 bitmap_clear (live[i]);
3083 }
3084 bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
3085 }
3086 }
3087 }
3088
3089 for (i = rpo_cnt - 1; i >= 0; --i)
3090 {
3091 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
3092 edge e;
3093 edge_iterator ei;
3094
3095 if (!live[rpo[i]])
3096 {
3097 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
3098 bitmap_clear (live[rpo[i]]);
3099 }
3100
3101 /* Process BB and update the live information with uses in
3102 this block. */
3103 find_assert_locations_1 (bb, live[rpo[i]]);
3104
3105 /* Merge liveness into the predecessor blocks and free it. */
3106 if (!bitmap_empty_p (live[rpo[i]]))
3107 {
3108 int pred_rpo = i;
3109 FOR_EACH_EDGE (e, ei, bb->preds)
3110 {
3111 int pred = e->src->index;
3112 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
3113 continue;
3114
3115 if (!live[pred])
3116 {
3117 live[pred] = sbitmap_alloc (num_ssa_names);
3118 bitmap_clear (live[pred]);
3119 }
3120 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
3121
3122 if (bb_rpo[pred] < pred_rpo)
3123 pred_rpo = bb_rpo[pred];
3124 }
3125
3126 /* Record the RPO number of the last visited block that needs
3127 live information from this block. */
3128 last_rpo[rpo[i]] = pred_rpo;
3129 }
3130 else
3131 {
3132 sbitmap_free (live[rpo[i]]);
3133 live[rpo[i]] = NULL;
3134 }
3135
3136 /* We can free all successors live bitmaps if all their
3137 predecessors have been visited already. */
3138 FOR_EACH_EDGE (e, ei, bb->succs)
3139 if (last_rpo[e->dest->index] == i
3140 && live[e->dest->index])
3141 {
3142 sbitmap_free (live[e->dest->index]);
3143 live[e->dest->index] = NULL;
3144 }
3145 }
3146
3147 XDELETEVEC (rpo);
3148 XDELETEVEC (bb_rpo);
3149 XDELETEVEC (last_rpo);
3150 for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
3151 if (live[i])
3152 sbitmap_free (live[i]);
3153 XDELETEVEC (live);
3154 }
3155
3156 /* Create an ASSERT_EXPR for NAME and insert it in the location
3157 indicated by LOC. Return true if we made any edge insertions. */
3158
3159 static bool
process_assert_insertions_for(tree name,assert_locus * loc)3160 process_assert_insertions_for (tree name, assert_locus *loc)
3161 {
3162 /* Build the comparison expression NAME_i COMP_CODE VAL. */
3163 gimple *stmt;
3164 tree cond;
3165 gimple *assert_stmt;
3166 edge_iterator ei;
3167 edge e;
3168
3169 /* If we have X <=> X do not insert an assert expr for that. */
3170 if (loc->expr == loc->val)
3171 return false;
3172
3173 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
3174 assert_stmt = build_assert_expr_for (cond, name);
3175 if (loc->e)
3176 {
3177 /* We have been asked to insert the assertion on an edge. This
3178 is used only by COND_EXPR and SWITCH_EXPR assertions. */
3179 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
3180 || (gimple_code (gsi_stmt (loc->si))
3181 == GIMPLE_SWITCH));
3182
3183 gsi_insert_on_edge (loc->e, assert_stmt);
3184 return true;
3185 }
3186
3187 /* If the stmt iterator points at the end then this is an insertion
3188 at the beginning of a block. */
3189 if (gsi_end_p (loc->si))
3190 {
3191 gimple_stmt_iterator si = gsi_after_labels (loc->bb);
3192 gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
3193 return false;
3194
3195 }
3196 /* Otherwise, we can insert right after LOC->SI iff the
3197 statement must not be the last statement in the block. */
3198 stmt = gsi_stmt (loc->si);
3199 if (!stmt_ends_bb_p (stmt))
3200 {
3201 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
3202 return false;
3203 }
3204
3205 /* If STMT must be the last statement in BB, we can only insert new
3206 assertions on the non-abnormal edge out of BB. Note that since
3207 STMT is not control flow, there may only be one non-abnormal/eh edge
3208 out of BB. */
3209 FOR_EACH_EDGE (e, ei, loc->bb->succs)
3210 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
3211 {
3212 gsi_insert_on_edge (e, assert_stmt);
3213 return true;
3214 }
3215
3216 gcc_unreachable ();
3217 }
3218
3219 /* Qsort helper for sorting assert locations. If stable is true, don't
3220 use iterative_hash_expr because it can be unstable for -fcompare-debug,
3221 on the other side some pointers might be NULL. */
3222
3223 template <bool stable>
3224 static int
compare_assert_loc(const void * pa,const void * pb)3225 compare_assert_loc (const void *pa, const void *pb)
3226 {
3227 assert_locus * const a = *(assert_locus * const *)pa;
3228 assert_locus * const b = *(assert_locus * const *)pb;
3229
3230 /* If stable, some asserts might be optimized away already, sort
3231 them last. */
3232 if (stable)
3233 {
3234 if (a == NULL)
3235 return b != NULL;
3236 else if (b == NULL)
3237 return -1;
3238 }
3239
3240 if (a->e == NULL && b->e != NULL)
3241 return 1;
3242 else if (a->e != NULL && b->e == NULL)
3243 return -1;
3244
3245 /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
3246 no need to test both a->e and b->e. */
3247
3248 /* Sort after destination index. */
3249 if (a->e == NULL)
3250 ;
3251 else if (a->e->dest->index > b->e->dest->index)
3252 return 1;
3253 else if (a->e->dest->index < b->e->dest->index)
3254 return -1;
3255
3256 /* Sort after comp_code. */
3257 if (a->comp_code > b->comp_code)
3258 return 1;
3259 else if (a->comp_code < b->comp_code)
3260 return -1;
3261
3262 hashval_t ha, hb;
3263
3264 /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
3265 uses DECL_UID of the VAR_DECL, so sorting might differ between
3266 -g and -g0. When doing the removal of redundant assert exprs
3267 and commonization to successors, this does not matter, but for
3268 the final sort needs to be stable. */
3269 if (stable)
3270 {
3271 ha = 0;
3272 hb = 0;
3273 }
3274 else
3275 {
3276 ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
3277 hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
3278 }
3279
3280 /* Break the tie using hashing and source/bb index. */
3281 if (ha == hb)
3282 return (a->e != NULL
3283 ? a->e->src->index - b->e->src->index
3284 : a->bb->index - b->bb->index);
3285 return ha > hb ? 1 : -1;
3286 }
3287
3288 /* Process all the insertions registered for every name N_i registered
3289 in NEED_ASSERT_FOR. The list of assertions to be inserted are
3290 found in ASSERTS_FOR[i]. */
3291
3292 static void
process_assert_insertions(void)3293 process_assert_insertions (void)
3294 {
3295 unsigned i;
3296 bitmap_iterator bi;
3297 bool update_edges_p = false;
3298 int num_asserts = 0;
3299
3300 if (dump_file && (dump_flags & TDF_DETAILS))
3301 dump_all_asserts (dump_file);
3302
3303 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
3304 {
3305 assert_locus *loc = asserts_for[i];
3306 gcc_assert (loc);
3307
3308 auto_vec<assert_locus *, 16> asserts;
3309 for (; loc; loc = loc->next)
3310 asserts.safe_push (loc);
3311 asserts.qsort (compare_assert_loc<false>);
3312
3313 /* Push down common asserts to successors and remove redundant ones. */
3314 unsigned ecnt = 0;
3315 assert_locus *common = NULL;
3316 unsigned commonj = 0;
3317 for (unsigned j = 0; j < asserts.length (); ++j)
3318 {
3319 loc = asserts[j];
3320 if (! loc->e)
3321 common = NULL;
3322 else if (! common
3323 || loc->e->dest != common->e->dest
3324 || loc->comp_code != common->comp_code
3325 || ! operand_equal_p (loc->val, common->val, 0)
3326 || ! operand_equal_p (loc->expr, common->expr, 0))
3327 {
3328 commonj = j;
3329 common = loc;
3330 ecnt = 1;
3331 }
3332 else if (loc->e == asserts[j-1]->e)
3333 {
3334 /* Remove duplicate asserts. */
3335 if (commonj == j - 1)
3336 {
3337 commonj = j;
3338 common = loc;
3339 }
3340 free (asserts[j-1]);
3341 asserts[j-1] = NULL;
3342 }
3343 else
3344 {
3345 ecnt++;
3346 if (EDGE_COUNT (common->e->dest->preds) == ecnt)
3347 {
3348 /* We have the same assertion on all incoming edges of a BB.
3349 Insert it at the beginning of that block. */
3350 loc->bb = loc->e->dest;
3351 loc->e = NULL;
3352 loc->si = gsi_none ();
3353 common = NULL;
3354 /* Clear asserts commoned. */
3355 for (; commonj != j; ++commonj)
3356 if (asserts[commonj])
3357 {
3358 free (asserts[commonj]);
3359 asserts[commonj] = NULL;
3360 }
3361 }
3362 }
3363 }
3364
3365 /* The asserts vector sorting above might be unstable for
3366 -fcompare-debug, sort again to ensure a stable sort. */
3367 asserts.qsort (compare_assert_loc<true>);
3368 for (unsigned j = 0; j < asserts.length (); ++j)
3369 {
3370 loc = asserts[j];
3371 if (! loc)
3372 break;
3373 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
3374 num_asserts++;
3375 free (loc);
3376 }
3377 }
3378
3379 if (update_edges_p)
3380 gsi_commit_edge_inserts ();
3381
3382 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
3383 num_asserts);
3384 }
3385
3386
3387 /* Traverse the flowgraph looking for conditional jumps to insert range
3388 expressions. These range expressions are meant to provide information
3389 to optimizations that need to reason in terms of value ranges. They
3390 will not be expanded into RTL. For instance, given:
3391
3392 x = ...
3393 y = ...
3394 if (x < y)
3395 y = x - 2;
3396 else
3397 x = y + 3;
3398
3399 this pass will transform the code into:
3400
3401 x = ...
3402 y = ...
3403 if (x < y)
3404 {
3405 x = ASSERT_EXPR <x, x < y>
3406 y = x - 2
3407 }
3408 else
3409 {
3410 y = ASSERT_EXPR <y, x >= y>
3411 x = y + 3
3412 }
3413
3414 The idea is that once copy and constant propagation have run, other
3415 optimizations will be able to determine what ranges of values can 'x'
3416 take in different paths of the code, simply by checking the reaching
3417 definition of 'x'. */
3418
3419 static void
insert_range_assertions(void)3420 insert_range_assertions (void)
3421 {
3422 need_assert_for = BITMAP_ALLOC (NULL);
3423 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
3424
3425 calculate_dominance_info (CDI_DOMINATORS);
3426
3427 find_assert_locations ();
3428 if (!bitmap_empty_p (need_assert_for))
3429 {
3430 process_assert_insertions ();
3431 update_ssa (TODO_update_ssa_no_phi);
3432 }
3433
3434 if (dump_file && (dump_flags & TDF_DETAILS))
3435 {
3436 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
3437 dump_function_to_file (current_function_decl, dump_file, dump_flags);
3438 }
3439
3440 free (asserts_for);
3441 BITMAP_FREE (need_assert_for);
3442 }
3443
3444 class vrp_prop : public ssa_propagation_engine
3445 {
3446 public:
3447 enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
3448 enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
3449
3450 void vrp_initialize (void);
3451 void vrp_finalize (bool);
3452 void check_all_array_refs (void);
3453 bool check_array_ref (location_t, tree, bool);
3454 bool check_mem_ref (location_t, tree, bool);
3455 void search_for_addr_array (tree, location_t);
3456
3457 class vr_values vr_values;
3458 /* Temporary delegator to minimize code churn. */
get_value_range(const_tree op)3459 const value_range_equiv *get_value_range (const_tree op)
3460 { return vr_values.get_value_range (op); }
set_def_to_varying(const_tree def)3461 void set_def_to_varying (const_tree def)
3462 { vr_values.set_def_to_varying (def); }
set_defs_to_varying(gimple * stmt)3463 void set_defs_to_varying (gimple *stmt)
3464 { vr_values.set_defs_to_varying (stmt); }
extract_range_from_stmt(gimple * stmt,edge * taken_edge_p,tree * output_p,value_range_equiv * vr)3465 void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
3466 tree *output_p, value_range_equiv *vr)
3467 { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); }
update_value_range(const_tree op,value_range_equiv * vr)3468 bool update_value_range (const_tree op, value_range_equiv *vr)
3469 { return vr_values.update_value_range (op, vr); }
extract_range_basic(value_range_equiv * vr,gimple * stmt)3470 void extract_range_basic (value_range_equiv *vr, gimple *stmt)
3471 { vr_values.extract_range_basic (vr, stmt); }
extract_range_from_phi_node(gphi * phi,value_range_equiv * vr)3472 void extract_range_from_phi_node (gphi *phi, value_range_equiv *vr)
3473 { vr_values.extract_range_from_phi_node (phi, vr); }
3474 };
3475 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
3476 and "struct" hacks. If VRP can determine that the
3477 array subscript is a constant, check if it is outside valid
3478 range. If the array subscript is a RANGE, warn if it is
3479 non-overlapping with valid range.
3480 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR.
3481 Returns true if a warning has been issued. */
3482
3483 bool
check_array_ref(location_t location,tree ref,bool ignore_off_by_one)3484 vrp_prop::check_array_ref (location_t location, tree ref,
3485 bool ignore_off_by_one)
3486 {
3487 if (TREE_NO_WARNING (ref))
3488 return false;
3489
3490 tree low_sub = TREE_OPERAND (ref, 1);
3491 tree up_sub = low_sub;
3492 tree up_bound = array_ref_up_bound (ref);
3493
3494 /* Referenced decl if one can be determined. */
3495 tree decl = NULL_TREE;
3496
3497 /* Set for accesses to interior zero-length arrays. */
3498 bool interior_zero_len = false;
3499
3500 tree up_bound_p1;
3501
3502 if (!up_bound
3503 || TREE_CODE (up_bound) != INTEGER_CST
3504 || (warn_array_bounds < 2
3505 && array_at_struct_end_p (ref)))
3506 {
3507 /* Accesses to trailing arrays via pointers may access storage
3508 beyond the types array bounds. For such arrays, or for flexible
3509 array members, as well as for other arrays of an unknown size,
3510 replace the upper bound with a more permissive one that assumes
3511 the size of the largest object is PTRDIFF_MAX. */
3512 tree eltsize = array_ref_element_size (ref);
3513
3514 if (TREE_CODE (eltsize) != INTEGER_CST
3515 || integer_zerop (eltsize))
3516 {
3517 up_bound = NULL_TREE;
3518 up_bound_p1 = NULL_TREE;
3519 }
3520 else
3521 {
3522 tree ptrdiff_max = TYPE_MAX_VALUE (ptrdiff_type_node);
3523 tree maxbound = ptrdiff_max;
3524 tree arg = TREE_OPERAND (ref, 0);
3525
3526 const bool compref = TREE_CODE (arg) == COMPONENT_REF;
3527 if (compref)
3528 {
3529 /* Try to determine the size of the trailing array from
3530 its initializer (if it has one). */
3531 if (tree refsize = component_ref_size (arg, &interior_zero_len))
3532 if (TREE_CODE (refsize) == INTEGER_CST)
3533 maxbound = refsize;
3534 }
3535
3536 if (maxbound == ptrdiff_max)
3537 {
3538 /* Try to determine the size of the base object. Avoid
3539 COMPONENT_REF already tried above. Using its DECL_SIZE
3540 size wouldn't necessarily be correct if the reference is
3541 to its flexible array member initialized in a different
3542 translation unit. */
3543 poly_int64 off;
3544 if (tree base = get_addr_base_and_unit_offset (arg, &off))
3545 {
3546 if (!compref && DECL_P (base))
3547 if (tree basesize = DECL_SIZE_UNIT (base))
3548 if (TREE_CODE (basesize) == INTEGER_CST)
3549 {
3550 maxbound = basesize;
3551 decl = base;
3552 }
3553
3554 if (known_gt (off, 0))
3555 maxbound = wide_int_to_tree (sizetype,
3556 wi::sub (wi::to_wide (maxbound),
3557 off));
3558 }
3559 }
3560 else
3561 maxbound = fold_convert (sizetype, maxbound);
3562
3563 up_bound_p1 = int_const_binop (TRUNC_DIV_EXPR, maxbound, eltsize);
3564
3565 if (up_bound_p1 != NULL_TREE)
3566 up_bound = int_const_binop (MINUS_EXPR, up_bound_p1,
3567 build_int_cst (ptrdiff_type_node, 1));
3568 else
3569 up_bound = NULL_TREE;
3570 }
3571 }
3572 else
3573 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
3574 build_int_cst (TREE_TYPE (up_bound), 1));
3575
3576 tree low_bound = array_ref_low_bound (ref);
3577
3578 tree artype = TREE_TYPE (TREE_OPERAND (ref, 0));
3579
3580 bool warned = false;
3581
3582 /* Empty array. */
3583 if (up_bound && tree_int_cst_equal (low_bound, up_bound_p1))
3584 warned = warning_at (location, OPT_Warray_bounds,
3585 "array subscript %E is outside array bounds of %qT",
3586 low_sub, artype);
3587
3588 const value_range_equiv *vr = NULL;
3589 if (TREE_CODE (low_sub) == SSA_NAME)
3590 {
3591 vr = get_value_range (low_sub);
3592 if (!vr->undefined_p () && !vr->varying_p ())
3593 {
3594 low_sub = vr->kind () == VR_RANGE ? vr->max () : vr->min ();
3595 up_sub = vr->kind () == VR_RANGE ? vr->min () : vr->max ();
3596 }
3597 }
3598
3599 if (warned)
3600 ; /* Do nothing. */
3601 else if (vr && vr->kind () == VR_ANTI_RANGE)
3602 {
3603 if (up_bound
3604 && TREE_CODE (up_sub) == INTEGER_CST
3605 && (ignore_off_by_one
3606 ? tree_int_cst_lt (up_bound, up_sub)
3607 : tree_int_cst_le (up_bound, up_sub))
3608 && TREE_CODE (low_sub) == INTEGER_CST
3609 && tree_int_cst_le (low_sub, low_bound))
3610 warned = warning_at (location, OPT_Warray_bounds,
3611 "array subscript [%E, %E] is outside "
3612 "array bounds of %qT",
3613 low_sub, up_sub, artype);
3614 }
3615 else if (up_bound
3616 && TREE_CODE (up_sub) == INTEGER_CST
3617 && (ignore_off_by_one
3618 ? !tree_int_cst_le (up_sub, up_bound_p1)
3619 : !tree_int_cst_le (up_sub, up_bound)))
3620 warned = warning_at (location, OPT_Warray_bounds,
3621 "array subscript %E is above array bounds of %qT",
3622 up_sub, artype);
3623 else if (TREE_CODE (low_sub) == INTEGER_CST
3624 && tree_int_cst_lt (low_sub, low_bound))
3625 warned = warning_at (location, OPT_Warray_bounds,
3626 "array subscript %E is below array bounds of %qT",
3627 low_sub, artype);
3628
3629 if (!warned && interior_zero_len)
3630 warned = warning_at (location, OPT_Wzero_length_bounds,
3631 (TREE_CODE (low_sub) == INTEGER_CST
3632 ? G_("array subscript %E is outside the bounds "
3633 "of an interior zero-length array %qT")
3634 : G_("array subscript %qE is outside the bounds "
3635 "of an interior zero-length array %qT")),
3636 low_sub, artype);
3637
3638 if (warned)
3639 {
3640 if (dump_file && (dump_flags & TDF_DETAILS))
3641 {
3642 fprintf (dump_file, "Array bound warning for ");
3643 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
3644 fprintf (dump_file, "\n");
3645 }
3646
3647 ref = decl ? decl : TREE_OPERAND (ref, 0);
3648
3649 tree rec = NULL_TREE;
3650 if (TREE_CODE (ref) == COMPONENT_REF)
3651 {
3652 /* For a reference to a member of a struct object also mention
3653 the object if it's known. It may be defined in a different
3654 function than the out-of-bounds access. */
3655 rec = TREE_OPERAND (ref, 0);
3656 if (!VAR_P (rec))
3657 rec = NULL_TREE;
3658 ref = TREE_OPERAND (ref, 1);
3659 }
3660
3661 if (DECL_P (ref))
3662 inform (DECL_SOURCE_LOCATION (ref), "while referencing %qD", ref);
3663 if (rec && DECL_P (rec))
3664 inform (DECL_SOURCE_LOCATION (rec), "defined here %qD", rec);
3665
3666 TREE_NO_WARNING (ref) = 1;
3667 }
3668
3669 return warned;
3670 }
3671
3672 /* Checks one MEM_REF in REF, located at LOCATION, for out-of-bounds
3673 references to string constants. If VRP can determine that the array
3674 subscript is a constant, check if it is outside valid range.
3675 If the array subscript is a RANGE, warn if it is non-overlapping
3676 with valid range.
3677 IGNORE_OFF_BY_ONE is true if the MEM_REF is inside an ADDR_EXPR
3678 (used to allow one-past-the-end indices for code that takes
3679 the address of the just-past-the-end element of an array).
3680 Returns true if a warning has been issued. */
3681
3682 bool
check_mem_ref(location_t location,tree ref,bool ignore_off_by_one)3683 vrp_prop::check_mem_ref (location_t location, tree ref,
3684 bool ignore_off_by_one)
3685 {
3686 if (TREE_NO_WARNING (ref))
3687 return false;
3688
3689 tree arg = TREE_OPERAND (ref, 0);
3690 /* The constant and variable offset of the reference. */
3691 tree cstoff = TREE_OPERAND (ref, 1);
3692 tree varoff = NULL_TREE;
3693
3694 const offset_int maxobjsize = tree_to_shwi (max_object_size ());
3695
3696 /* The array or string constant bounds in bytes. Initially set
3697 to [-MAXOBJSIZE - 1, MAXOBJSIZE] until a tighter bound is
3698 determined. */
3699 offset_int arrbounds[2] = { -maxobjsize - 1, maxobjsize };
3700
3701 /* The minimum and maximum intermediate offset. For a reference
3702 to be valid, not only does the final offset/subscript must be
3703 in bounds but all intermediate offsets should be as well.
3704 GCC may be able to deal gracefully with such out-of-bounds
3705 offsets so the checking is only enbaled at -Warray-bounds=2
3706 where it may help detect bugs in uses of the intermediate
3707 offsets that could otherwise not be detectable. */
3708 offset_int ioff = wi::to_offset (fold_convert (ptrdiff_type_node, cstoff));
3709 offset_int extrema[2] = { 0, wi::abs (ioff) };
3710
3711 /* The range of the byte offset into the reference. */
3712 offset_int offrange[2] = { 0, 0 };
3713
3714 const value_range_equiv *vr = NULL;
3715
3716 /* Determine the offsets and increment OFFRANGE for the bounds of each.
3717 The loop computes the range of the final offset for expressions such
3718 as (A + i0 + ... + iN)[CSTOFF] where i0 through iN are SSA_NAMEs in
3719 some range. */
3720 const unsigned limit = param_ssa_name_def_chain_limit;
3721 for (unsigned n = 0; TREE_CODE (arg) == SSA_NAME && n < limit; ++n)
3722 {
3723 gimple *def = SSA_NAME_DEF_STMT (arg);
3724 if (!is_gimple_assign (def))
3725 break;
3726
3727 tree_code code = gimple_assign_rhs_code (def);
3728 if (code == POINTER_PLUS_EXPR)
3729 {
3730 arg = gimple_assign_rhs1 (def);
3731 varoff = gimple_assign_rhs2 (def);
3732 }
3733 else if (code == ASSERT_EXPR)
3734 {
3735 arg = TREE_OPERAND (gimple_assign_rhs1 (def), 0);
3736 continue;
3737 }
3738 else
3739 return false;
3740
3741 /* VAROFF should always be a SSA_NAME here (and not even
3742 INTEGER_CST) but there's no point in taking chances. */
3743 if (TREE_CODE (varoff) != SSA_NAME)
3744 break;
3745
3746 vr = get_value_range (varoff);
3747 if (!vr || vr->undefined_p () || vr->varying_p ())
3748 break;
3749
3750 if (!vr->constant_p ())
3751 break;
3752
3753 if (vr->kind () == VR_RANGE)
3754 {
3755 offset_int min
3756 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->min ()));
3757 offset_int max
3758 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->max ()));
3759 if (min < max)
3760 {
3761 offrange[0] += min;
3762 offrange[1] += max;
3763 }
3764 else
3765 {
3766 /* When MIN >= MAX, the offset is effectively in a union
3767 of two ranges: [-MAXOBJSIZE -1, MAX] and [MIN, MAXOBJSIZE].
3768 Since there is no way to represent such a range across
3769 additions, conservatively add [-MAXOBJSIZE -1, MAXOBJSIZE]
3770 to OFFRANGE. */
3771 offrange[0] += arrbounds[0];
3772 offrange[1] += arrbounds[1];
3773 }
3774 }
3775 else
3776 {
3777 /* For an anti-range, analogously to the above, conservatively
3778 add [-MAXOBJSIZE -1, MAXOBJSIZE] to OFFRANGE. */
3779 offrange[0] += arrbounds[0];
3780 offrange[1] += arrbounds[1];
3781 }
3782
3783 /* Keep track of the minimum and maximum offset. */
3784 if (offrange[1] < 0 && offrange[1] < extrema[0])
3785 extrema[0] = offrange[1];
3786 if (offrange[0] > 0 && offrange[0] > extrema[1])
3787 extrema[1] = offrange[0];
3788
3789 if (offrange[0] < arrbounds[0])
3790 offrange[0] = arrbounds[0];
3791
3792 if (offrange[1] > arrbounds[1])
3793 offrange[1] = arrbounds[1];
3794 }
3795
3796 if (TREE_CODE (arg) == ADDR_EXPR)
3797 {
3798 arg = TREE_OPERAND (arg, 0);
3799 if (TREE_CODE (arg) != STRING_CST
3800 && TREE_CODE (arg) != PARM_DECL
3801 && TREE_CODE (arg) != VAR_DECL)
3802 return false;
3803 }
3804 else
3805 return false;
3806
3807 /* The type of the object being referred to. It can be an array,
3808 string literal, or a non-array type when the MEM_REF represents
3809 a reference/subscript via a pointer to an object that is not
3810 an element of an array. Incomplete types are excluded as well
3811 because their size is not known. */
3812 tree reftype = TREE_TYPE (arg);
3813 if (POINTER_TYPE_P (reftype)
3814 || !COMPLETE_TYPE_P (reftype)
3815 || TREE_CODE (TYPE_SIZE_UNIT (reftype)) != INTEGER_CST)
3816 return false;
3817
3818 /* Except in declared objects, references to trailing array members
3819 of structs and union objects are excluded because MEM_REF doesn't
3820 make it possible to identify the member where the reference
3821 originated. */
3822 if (RECORD_OR_UNION_TYPE_P (reftype)
3823 && (!VAR_P (arg)
3824 || (DECL_EXTERNAL (arg) && array_at_struct_end_p (ref))))
3825 return false;
3826
3827 arrbounds[0] = 0;
3828
3829 offset_int eltsize;
3830 if (TREE_CODE (reftype) == ARRAY_TYPE)
3831 {
3832 eltsize = wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (reftype)));
3833 if (tree dom = TYPE_DOMAIN (reftype))
3834 {
3835 tree bnds[] = { TYPE_MIN_VALUE (dom), TYPE_MAX_VALUE (dom) };
3836 if (TREE_CODE (arg) == COMPONENT_REF)
3837 {
3838 offset_int size = maxobjsize;
3839 if (tree fldsize = component_ref_size (arg))
3840 size = wi::to_offset (fldsize);
3841 arrbounds[1] = wi::lrshift (size, wi::floor_log2 (eltsize));
3842 }
3843 else if (array_at_struct_end_p (arg) || !bnds[0] || !bnds[1])
3844 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
3845 else
3846 arrbounds[1] = (wi::to_offset (bnds[1]) - wi::to_offset (bnds[0])
3847 + 1) * eltsize;
3848 }
3849 else
3850 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
3851
3852 /* Determine a tighter bound of the non-array element type. */
3853 tree eltype = TREE_TYPE (reftype);
3854 while (TREE_CODE (eltype) == ARRAY_TYPE)
3855 eltype = TREE_TYPE (eltype);
3856 eltsize = wi::to_offset (TYPE_SIZE_UNIT (eltype));
3857 }
3858 else
3859 {
3860 eltsize = 1;
3861 tree size = TYPE_SIZE_UNIT (reftype);
3862 if (VAR_P (arg))
3863 if (tree initsize = DECL_SIZE_UNIT (arg))
3864 if (tree_int_cst_lt (size, initsize))
3865 size = initsize;
3866
3867 arrbounds[1] = wi::to_offset (size);
3868 }
3869
3870 offrange[0] += ioff;
3871 offrange[1] += ioff;
3872
3873 /* Compute the more permissive upper bound when IGNORE_OFF_BY_ONE
3874 is set (when taking the address of the one-past-last element
3875 of an array) but always use the stricter bound in diagnostics. */
3876 offset_int ubound = arrbounds[1];
3877 if (ignore_off_by_one)
3878 ubound += 1;
3879
3880 if (arrbounds[0] == arrbounds[1]
3881 || offrange[0] >= ubound
3882 || offrange[1] < arrbounds[0])
3883 {
3884 /* Treat a reference to a non-array object as one to an array
3885 of a single element. */
3886 if (TREE_CODE (reftype) != ARRAY_TYPE)
3887 reftype = build_array_type_nelts (reftype, 1);
3888
3889 /* Extract the element type out of MEM_REF and use its size
3890 to compute the index to print in the diagnostic; arrays
3891 in MEM_REF don't mean anything. A type with no size like
3892 void is as good as having a size of 1. */
3893 tree type = TREE_TYPE (ref);
3894 while (TREE_CODE (type) == ARRAY_TYPE)
3895 type = TREE_TYPE (type);
3896 if (tree size = TYPE_SIZE_UNIT (type))
3897 {
3898 offrange[0] = offrange[0] / wi::to_offset (size);
3899 offrange[1] = offrange[1] / wi::to_offset (size);
3900 }
3901
3902 bool warned;
3903 if (offrange[0] == offrange[1])
3904 warned = warning_at (location, OPT_Warray_bounds,
3905 "array subscript %wi is outside array bounds "
3906 "of %qT",
3907 offrange[0].to_shwi (), reftype);
3908 else
3909 warned = warning_at (location, OPT_Warray_bounds,
3910 "array subscript [%wi, %wi] is outside "
3911 "array bounds of %qT",
3912 offrange[0].to_shwi (),
3913 offrange[1].to_shwi (), reftype);
3914 if (warned && DECL_P (arg))
3915 inform (DECL_SOURCE_LOCATION (arg), "while referencing %qD", arg);
3916
3917 if (warned)
3918 TREE_NO_WARNING (ref) = 1;
3919 return warned;
3920 }
3921
3922 if (warn_array_bounds < 2)
3923 return false;
3924
3925 /* At level 2 check also intermediate offsets. */
3926 int i = 0;
3927 if (extrema[i] < -arrbounds[1] || extrema[i = 1] > ubound)
3928 {
3929 HOST_WIDE_INT tmpidx = extrema[i].to_shwi () / eltsize.to_shwi ();
3930
3931 if (warning_at (location, OPT_Warray_bounds,
3932 "intermediate array offset %wi is outside array bounds "
3933 "of %qT", tmpidx, reftype))
3934 {
3935 TREE_NO_WARNING (ref) = 1;
3936 return true;
3937 }
3938 }
3939
3940 return false;
3941 }
3942
3943 /* Searches if the expr T, located at LOCATION computes
3944 address of an ARRAY_REF, and call check_array_ref on it. */
3945
3946 void
search_for_addr_array(tree t,location_t location)3947 vrp_prop::search_for_addr_array (tree t, location_t location)
3948 {
3949 /* Check each ARRAY_REF and MEM_REF in the reference chain. */
3950 do
3951 {
3952 bool warned = false;
3953 if (TREE_CODE (t) == ARRAY_REF)
3954 warned = check_array_ref (location, t, true /*ignore_off_by_one*/);
3955 else if (TREE_CODE (t) == MEM_REF)
3956 warned = check_mem_ref (location, t, true /*ignore_off_by_one*/);
3957
3958 if (warned)
3959 TREE_NO_WARNING (t) = true;
3960
3961 t = TREE_OPERAND (t, 0);
3962 }
3963 while (handled_component_p (t) || TREE_CODE (t) == MEM_REF);
3964
3965 if (TREE_CODE (t) != MEM_REF
3966 || TREE_CODE (TREE_OPERAND (t, 0)) != ADDR_EXPR
3967 || TREE_NO_WARNING (t))
3968 return;
3969
3970 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
3971 tree low_bound, up_bound, el_sz;
3972 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
3973 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
3974 || !TYPE_DOMAIN (TREE_TYPE (tem)))
3975 return;
3976
3977 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
3978 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
3979 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
3980 if (!low_bound
3981 || TREE_CODE (low_bound) != INTEGER_CST
3982 || !up_bound
3983 || TREE_CODE (up_bound) != INTEGER_CST
3984 || !el_sz
3985 || TREE_CODE (el_sz) != INTEGER_CST)
3986 return;
3987
3988 offset_int idx;
3989 if (!mem_ref_offset (t).is_constant (&idx))
3990 return;
3991
3992 bool warned = false;
3993 idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
3994 if (idx < 0)
3995 {
3996 if (dump_file && (dump_flags & TDF_DETAILS))
3997 {
3998 fprintf (dump_file, "Array bound warning for ");
3999 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4000 fprintf (dump_file, "\n");
4001 }
4002 warned = warning_at (location, OPT_Warray_bounds,
4003 "array subscript %wi is below "
4004 "array bounds of %qT",
4005 idx.to_shwi (), TREE_TYPE (tem));
4006 }
4007 else if (idx > (wi::to_offset (up_bound)
4008 - wi::to_offset (low_bound) + 1))
4009 {
4010 if (dump_file && (dump_flags & TDF_DETAILS))
4011 {
4012 fprintf (dump_file, "Array bound warning for ");
4013 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4014 fprintf (dump_file, "\n");
4015 }
4016 warned = warning_at (location, OPT_Warray_bounds,
4017 "array subscript %wu is above "
4018 "array bounds of %qT",
4019 idx.to_uhwi (), TREE_TYPE (tem));
4020 }
4021
4022 if (warned)
4023 {
4024 if (DECL_P (t))
4025 inform (DECL_SOURCE_LOCATION (t), "while referencing %qD", t);
4026
4027 TREE_NO_WARNING (t) = 1;
4028 }
4029 }
4030
4031 /* walk_tree() callback that checks if *TP is
4032 an ARRAY_REF inside an ADDR_EXPR (in which an array
4033 subscript one outside the valid range is allowed). Call
4034 check_array_ref for each ARRAY_REF found. The location is
4035 passed in DATA. */
4036
4037 static tree
check_array_bounds(tree * tp,int * walk_subtree,void * data)4038 check_array_bounds (tree *tp, int *walk_subtree, void *data)
4039 {
4040 tree t = *tp;
4041 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
4042 location_t location;
4043
4044 if (EXPR_HAS_LOCATION (t))
4045 location = EXPR_LOCATION (t);
4046 else
4047 location = gimple_location (wi->stmt);
4048
4049 *walk_subtree = TRUE;
4050
4051 bool warned = false;
4052 vrp_prop *vrp_prop = (class vrp_prop *)wi->info;
4053 if (TREE_CODE (t) == ARRAY_REF)
4054 warned = vrp_prop->check_array_ref (location, t, false/*ignore_off_by_one*/);
4055 else if (TREE_CODE (t) == MEM_REF)
4056 warned = vrp_prop->check_mem_ref (location, t, false /*ignore_off_by_one*/);
4057 else if (TREE_CODE (t) == ADDR_EXPR)
4058 {
4059 vrp_prop->search_for_addr_array (t, location);
4060 *walk_subtree = FALSE;
4061 }
4062 /* Propagate the no-warning bit to the outer expression. */
4063 if (warned)
4064 TREE_NO_WARNING (t) = true;
4065
4066 return NULL_TREE;
4067 }
4068
4069 /* A dom_walker subclass for use by vrp_prop::check_all_array_refs,
4070 to walk over all statements of all reachable BBs and call
4071 check_array_bounds on them. */
4072
4073 class check_array_bounds_dom_walker : public dom_walker
4074 {
4075 public:
check_array_bounds_dom_walker(vrp_prop * prop)4076 check_array_bounds_dom_walker (vrp_prop *prop)
4077 : dom_walker (CDI_DOMINATORS,
4078 /* Discover non-executable edges, preserving EDGE_EXECUTABLE
4079 flags, so that we can merge in information on
4080 non-executable edges from vrp_folder . */
4081 REACHABLE_BLOCKS_PRESERVING_FLAGS),
4082 m_prop (prop) {}
~check_array_bounds_dom_walker()4083 ~check_array_bounds_dom_walker () {}
4084
4085 edge before_dom_children (basic_block) FINAL OVERRIDE;
4086
4087 private:
4088 vrp_prop *m_prop;
4089 };
4090
4091 /* Implementation of dom_walker::before_dom_children.
4092
4093 Walk over all statements of BB and call check_array_bounds on them,
4094 and determine if there's a unique successor edge. */
4095
4096 edge
before_dom_children(basic_block bb)4097 check_array_bounds_dom_walker::before_dom_children (basic_block bb)
4098 {
4099 gimple_stmt_iterator si;
4100 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4101 {
4102 gimple *stmt = gsi_stmt (si);
4103 struct walk_stmt_info wi;
4104 if (!gimple_has_location (stmt)
4105 || is_gimple_debug (stmt))
4106 continue;
4107
4108 memset (&wi, 0, sizeof (wi));
4109
4110 wi.info = m_prop;
4111
4112 walk_gimple_op (stmt, check_array_bounds, &wi);
4113 }
4114
4115 /* Determine if there's a unique successor edge, and if so, return
4116 that back to dom_walker, ensuring that we don't visit blocks that
4117 became unreachable during the VRP propagation
4118 (PR tree-optimization/83312). */
4119 return find_taken_edge (bb, NULL_TREE);
4120 }
4121
4122 /* Walk over all statements of all reachable BBs and call check_array_bounds
4123 on them. */
4124
4125 void
check_all_array_refs()4126 vrp_prop::check_all_array_refs ()
4127 {
4128 check_array_bounds_dom_walker w (this);
4129 w.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
4130 }
4131
4132 /* Return true if all imm uses of VAR are either in STMT, or
4133 feed (optionally through a chain of single imm uses) GIMPLE_COND
4134 in basic block COND_BB. */
4135
4136 static bool
all_imm_uses_in_stmt_or_feed_cond(tree var,gimple * stmt,basic_block cond_bb)4137 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
4138 {
4139 use_operand_p use_p, use2_p;
4140 imm_use_iterator iter;
4141
4142 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
4143 if (USE_STMT (use_p) != stmt)
4144 {
4145 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
4146 if (is_gimple_debug (use_stmt))
4147 continue;
4148 while (is_gimple_assign (use_stmt)
4149 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
4150 && single_imm_use (gimple_assign_lhs (use_stmt),
4151 &use2_p, &use_stmt2))
4152 use_stmt = use_stmt2;
4153 if (gimple_code (use_stmt) != GIMPLE_COND
4154 || gimple_bb (use_stmt) != cond_bb)
4155 return false;
4156 }
4157 return true;
4158 }
4159
4160 /* Handle
4161 _4 = x_3 & 31;
4162 if (_4 != 0)
4163 goto <bb 6>;
4164 else
4165 goto <bb 7>;
4166 <bb 6>:
4167 __builtin_unreachable ();
4168 <bb 7>:
4169 x_5 = ASSERT_EXPR <x_3, ...>;
4170 If x_3 has no other immediate uses (checked by caller),
4171 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
4172 from the non-zero bitmask. */
4173
4174 void
maybe_set_nonzero_bits(edge e,tree var)4175 maybe_set_nonzero_bits (edge e, tree var)
4176 {
4177 basic_block cond_bb = e->src;
4178 gimple *stmt = last_stmt (cond_bb);
4179 tree cst;
4180
4181 if (stmt == NULL
4182 || gimple_code (stmt) != GIMPLE_COND
4183 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
4184 ? EQ_EXPR : NE_EXPR)
4185 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
4186 || !integer_zerop (gimple_cond_rhs (stmt)))
4187 return;
4188
4189 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
4190 if (!is_gimple_assign (stmt)
4191 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
4192 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
4193 return;
4194 if (gimple_assign_rhs1 (stmt) != var)
4195 {
4196 gimple *stmt2;
4197
4198 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
4199 return;
4200 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
4201 if (!gimple_assign_cast_p (stmt2)
4202 || gimple_assign_rhs1 (stmt2) != var
4203 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
4204 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
4205 != TYPE_PRECISION (TREE_TYPE (var))))
4206 return;
4207 }
4208 cst = gimple_assign_rhs2 (stmt);
4209 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
4210 wi::to_wide (cst)));
4211 }
4212
4213 /* Convert range assertion expressions into the implied copies and
4214 copy propagate away the copies. Doing the trivial copy propagation
4215 here avoids the need to run the full copy propagation pass after
4216 VRP.
4217
4218 FIXME, this will eventually lead to copy propagation removing the
4219 names that had useful range information attached to them. For
4220 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
4221 then N_i will have the range [3, +INF].
4222
4223 However, by converting the assertion into the implied copy
4224 operation N_i = N_j, we will then copy-propagate N_j into the uses
4225 of N_i and lose the range information. We may want to hold on to
4226 ASSERT_EXPRs a little while longer as the ranges could be used in
4227 things like jump threading.
4228
4229 The problem with keeping ASSERT_EXPRs around is that passes after
4230 VRP need to handle them appropriately.
4231
4232 Another approach would be to make the range information a first
4233 class property of the SSA_NAME so that it can be queried from
4234 any pass. This is made somewhat more complex by the need for
4235 multiple ranges to be associated with one SSA_NAME. */
4236
4237 static void
remove_range_assertions(void)4238 remove_range_assertions (void)
4239 {
4240 basic_block bb;
4241 gimple_stmt_iterator si;
4242 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
4243 a basic block preceeded by GIMPLE_COND branching to it and
4244 __builtin_trap, -1 if not yet checked, 0 otherwise. */
4245 int is_unreachable;
4246
4247 /* Note that the BSI iterator bump happens at the bottom of the
4248 loop and no bump is necessary if we're removing the statement
4249 referenced by the current BSI. */
4250 FOR_EACH_BB_FN (bb, cfun)
4251 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
4252 {
4253 gimple *stmt = gsi_stmt (si);
4254
4255 if (is_gimple_assign (stmt)
4256 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
4257 {
4258 tree lhs = gimple_assign_lhs (stmt);
4259 tree rhs = gimple_assign_rhs1 (stmt);
4260 tree var;
4261
4262 var = ASSERT_EXPR_VAR (rhs);
4263
4264 if (TREE_CODE (var) == SSA_NAME
4265 && !POINTER_TYPE_P (TREE_TYPE (lhs))
4266 && SSA_NAME_RANGE_INFO (lhs))
4267 {
4268 if (is_unreachable == -1)
4269 {
4270 is_unreachable = 0;
4271 if (single_pred_p (bb)
4272 && assert_unreachable_fallthru_edge_p
4273 (single_pred_edge (bb)))
4274 is_unreachable = 1;
4275 }
4276 /* Handle
4277 if (x_7 >= 10 && x_7 < 20)
4278 __builtin_unreachable ();
4279 x_8 = ASSERT_EXPR <x_7, ...>;
4280 if the only uses of x_7 are in the ASSERT_EXPR and
4281 in the condition. In that case, we can copy the
4282 range info from x_8 computed in this pass also
4283 for x_7. */
4284 if (is_unreachable
4285 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
4286 single_pred (bb)))
4287 {
4288 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
4289 SSA_NAME_RANGE_INFO (lhs)->get_min (),
4290 SSA_NAME_RANGE_INFO (lhs)->get_max ());
4291 maybe_set_nonzero_bits (single_pred_edge (bb), var);
4292 }
4293 }
4294
4295 /* Propagate the RHS into every use of the LHS. For SSA names
4296 also propagate abnormals as it merely restores the original
4297 IL in this case (an replace_uses_by would assert). */
4298 if (TREE_CODE (var) == SSA_NAME)
4299 {
4300 imm_use_iterator iter;
4301 use_operand_p use_p;
4302 gimple *use_stmt;
4303 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4304 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4305 SET_USE (use_p, var);
4306 }
4307 else
4308 replace_uses_by (lhs, var);
4309
4310 /* And finally, remove the copy, it is not needed. */
4311 gsi_remove (&si, true);
4312 release_defs (stmt);
4313 }
4314 else
4315 {
4316 if (!is_gimple_debug (gsi_stmt (si)))
4317 is_unreachable = 0;
4318 gsi_next (&si);
4319 }
4320 }
4321 }
4322
4323 /* Return true if STMT is interesting for VRP. */
4324
4325 bool
stmt_interesting_for_vrp(gimple * stmt)4326 stmt_interesting_for_vrp (gimple *stmt)
4327 {
4328 if (gimple_code (stmt) == GIMPLE_PHI)
4329 {
4330 tree res = gimple_phi_result (stmt);
4331 return (!virtual_operand_p (res)
4332 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
4333 || POINTER_TYPE_P (TREE_TYPE (res))));
4334 }
4335 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
4336 {
4337 tree lhs = gimple_get_lhs (stmt);
4338
4339 /* In general, assignments with virtual operands are not useful
4340 for deriving ranges, with the obvious exception of calls to
4341 builtin functions. */
4342 if (lhs && TREE_CODE (lhs) == SSA_NAME
4343 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
4344 || POINTER_TYPE_P (TREE_TYPE (lhs)))
4345 && (is_gimple_call (stmt)
4346 || !gimple_vuse (stmt)))
4347 return true;
4348 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
4349 switch (gimple_call_internal_fn (stmt))
4350 {
4351 case IFN_ADD_OVERFLOW:
4352 case IFN_SUB_OVERFLOW:
4353 case IFN_MUL_OVERFLOW:
4354 case IFN_ATOMIC_COMPARE_EXCHANGE:
4355 /* These internal calls return _Complex integer type,
4356 but are interesting to VRP nevertheless. */
4357 if (lhs && TREE_CODE (lhs) == SSA_NAME)
4358 return true;
4359 break;
4360 default:
4361 break;
4362 }
4363 }
4364 else if (gimple_code (stmt) == GIMPLE_COND
4365 || gimple_code (stmt) == GIMPLE_SWITCH)
4366 return true;
4367
4368 return false;
4369 }
4370
4371 /* Initialization required by ssa_propagate engine. */
4372
4373 void
vrp_initialize()4374 vrp_prop::vrp_initialize ()
4375 {
4376 basic_block bb;
4377
4378 FOR_EACH_BB_FN (bb, cfun)
4379 {
4380 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
4381 gsi_next (&si))
4382 {
4383 gphi *phi = si.phi ();
4384 if (!stmt_interesting_for_vrp (phi))
4385 {
4386 tree lhs = PHI_RESULT (phi);
4387 set_def_to_varying (lhs);
4388 prop_set_simulate_again (phi, false);
4389 }
4390 else
4391 prop_set_simulate_again (phi, true);
4392 }
4393
4394 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
4395 gsi_next (&si))
4396 {
4397 gimple *stmt = gsi_stmt (si);
4398
4399 /* If the statement is a control insn, then we do not
4400 want to avoid simulating the statement once. Failure
4401 to do so means that those edges will never get added. */
4402 if (stmt_ends_bb_p (stmt))
4403 prop_set_simulate_again (stmt, true);
4404 else if (!stmt_interesting_for_vrp (stmt))
4405 {
4406 set_defs_to_varying (stmt);
4407 prop_set_simulate_again (stmt, false);
4408 }
4409 else
4410 prop_set_simulate_again (stmt, true);
4411 }
4412 }
4413 }
4414
4415 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
4416 that includes the value VAL. The search is restricted to the range
4417 [START_IDX, n - 1] where n is the size of VEC.
4418
4419 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
4420 returned.
4421
4422 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
4423 it is placed in IDX and false is returned.
4424
4425 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
4426 returned. */
4427
4428 bool
find_case_label_index(gswitch * stmt,size_t start_idx,tree val,size_t * idx)4429 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
4430 {
4431 size_t n = gimple_switch_num_labels (stmt);
4432 size_t low, high;
4433
4434 /* Find case label for minimum of the value range or the next one.
4435 At each iteration we are searching in [low, high - 1]. */
4436
4437 for (low = start_idx, high = n; high != low; )
4438 {
4439 tree t;
4440 int cmp;
4441 /* Note that i != high, so we never ask for n. */
4442 size_t i = (high + low) / 2;
4443 t = gimple_switch_label (stmt, i);
4444
4445 /* Cache the result of comparing CASE_LOW and val. */
4446 cmp = tree_int_cst_compare (CASE_LOW (t), val);
4447
4448 if (cmp == 0)
4449 {
4450 /* Ranges cannot be empty. */
4451 *idx = i;
4452 return true;
4453 }
4454 else if (cmp > 0)
4455 high = i;
4456 else
4457 {
4458 low = i + 1;
4459 if (CASE_HIGH (t) != NULL
4460 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
4461 {
4462 *idx = i;
4463 return true;
4464 }
4465 }
4466 }
4467
4468 *idx = high;
4469 return false;
4470 }
4471
4472 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
4473 for values between MIN and MAX. The first index is placed in MIN_IDX. The
4474 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
4475 then MAX_IDX < MIN_IDX.
4476 Returns true if the default label is not needed. */
4477
4478 bool
find_case_label_range(gswitch * stmt,tree min,tree max,size_t * min_idx,size_t * max_idx)4479 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
4480 size_t *max_idx)
4481 {
4482 size_t i, j;
4483 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
4484 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
4485
4486 if (i == j
4487 && min_take_default
4488 && max_take_default)
4489 {
4490 /* Only the default case label reached.
4491 Return an empty range. */
4492 *min_idx = 1;
4493 *max_idx = 0;
4494 return false;
4495 }
4496 else
4497 {
4498 bool take_default = min_take_default || max_take_default;
4499 tree low, high;
4500 size_t k;
4501
4502 if (max_take_default)
4503 j--;
4504
4505 /* If the case label range is continuous, we do not need
4506 the default case label. Verify that. */
4507 high = CASE_LOW (gimple_switch_label (stmt, i));
4508 if (CASE_HIGH (gimple_switch_label (stmt, i)))
4509 high = CASE_HIGH (gimple_switch_label (stmt, i));
4510 for (k = i + 1; k <= j; ++k)
4511 {
4512 low = CASE_LOW (gimple_switch_label (stmt, k));
4513 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
4514 {
4515 take_default = true;
4516 break;
4517 }
4518 high = low;
4519 if (CASE_HIGH (gimple_switch_label (stmt, k)))
4520 high = CASE_HIGH (gimple_switch_label (stmt, k));
4521 }
4522
4523 *min_idx = i;
4524 *max_idx = j;
4525 return !take_default;
4526 }
4527 }
4528
4529 /* Evaluate statement STMT. If the statement produces a useful range,
4530 return SSA_PROP_INTERESTING and record the SSA name with the
4531 interesting range into *OUTPUT_P.
4532
4533 If STMT is a conditional branch and we can determine its truth
4534 value, the taken edge is recorded in *TAKEN_EDGE_P.
4535
4536 If STMT produces a varying value, return SSA_PROP_VARYING. */
4537
4538 enum ssa_prop_result
visit_stmt(gimple * stmt,edge * taken_edge_p,tree * output_p)4539 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
4540 {
4541 tree lhs = gimple_get_lhs (stmt);
4542 value_range_equiv vr;
4543 extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
4544
4545 if (*output_p)
4546 {
4547 if (update_value_range (*output_p, &vr))
4548 {
4549 if (dump_file && (dump_flags & TDF_DETAILS))
4550 {
4551 fprintf (dump_file, "Found new range for ");
4552 print_generic_expr (dump_file, *output_p);
4553 fprintf (dump_file, ": ");
4554 dump_value_range (dump_file, &vr);
4555 fprintf (dump_file, "\n");
4556 }
4557
4558 if (vr.varying_p ())
4559 return SSA_PROP_VARYING;
4560
4561 return SSA_PROP_INTERESTING;
4562 }
4563 return SSA_PROP_NOT_INTERESTING;
4564 }
4565
4566 if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
4567 switch (gimple_call_internal_fn (stmt))
4568 {
4569 case IFN_ADD_OVERFLOW:
4570 case IFN_SUB_OVERFLOW:
4571 case IFN_MUL_OVERFLOW:
4572 case IFN_ATOMIC_COMPARE_EXCHANGE:
4573 /* These internal calls return _Complex integer type,
4574 which VRP does not track, but the immediate uses
4575 thereof might be interesting. */
4576 if (lhs && TREE_CODE (lhs) == SSA_NAME)
4577 {
4578 imm_use_iterator iter;
4579 use_operand_p use_p;
4580 enum ssa_prop_result res = SSA_PROP_VARYING;
4581
4582 set_def_to_varying (lhs);
4583
4584 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
4585 {
4586 gimple *use_stmt = USE_STMT (use_p);
4587 if (!is_gimple_assign (use_stmt))
4588 continue;
4589 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
4590 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
4591 continue;
4592 tree rhs1 = gimple_assign_rhs1 (use_stmt);
4593 tree use_lhs = gimple_assign_lhs (use_stmt);
4594 if (TREE_CODE (rhs1) != rhs_code
4595 || TREE_OPERAND (rhs1, 0) != lhs
4596 || TREE_CODE (use_lhs) != SSA_NAME
4597 || !stmt_interesting_for_vrp (use_stmt)
4598 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
4599 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
4600 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
4601 continue;
4602
4603 /* If there is a change in the value range for any of the
4604 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
4605 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
4606 or IMAGPART_EXPR immediate uses, but none of them have
4607 a change in their value ranges, return
4608 SSA_PROP_NOT_INTERESTING. If there are no
4609 {REAL,IMAG}PART_EXPR uses at all,
4610 return SSA_PROP_VARYING. */
4611 value_range_equiv new_vr;
4612 extract_range_basic (&new_vr, use_stmt);
4613 const value_range_equiv *old_vr = get_value_range (use_lhs);
4614 if (!old_vr->equal_p (new_vr, /*ignore_equivs=*/false))
4615 res = SSA_PROP_INTERESTING;
4616 else
4617 res = SSA_PROP_NOT_INTERESTING;
4618 new_vr.equiv_clear ();
4619 if (res == SSA_PROP_INTERESTING)
4620 {
4621 *output_p = lhs;
4622 return res;
4623 }
4624 }
4625
4626 return res;
4627 }
4628 break;
4629 default:
4630 break;
4631 }
4632
4633 /* All other statements produce nothing of interest for VRP, so mark
4634 their outputs varying and prevent further simulation. */
4635 set_defs_to_varying (stmt);
4636
4637 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
4638 }
4639
4640 void
intersect(const value_range_equiv * other)4641 value_range_equiv::intersect (const value_range_equiv *other)
4642 {
4643 if (dump_file && (dump_flags & TDF_DETAILS))
4644 {
4645 fprintf (dump_file, "Intersecting\n ");
4646 dump_value_range (dump_file, this);
4647 fprintf (dump_file, "\nand\n ");
4648 dump_value_range (dump_file, other);
4649 fprintf (dump_file, "\n");
4650 }
4651
4652 /* If THIS is varying we want to pick up equivalences from OTHER.
4653 Just special-case this here rather than trying to fixup after the
4654 fact. */
4655 if (this->varying_p ())
4656 this->deep_copy (other);
4657 else
4658 {
4659 value_range tem = intersect_helper (this, other);
4660 this->update (tem.min (), tem.max (), tem.kind ());
4661
4662 /* If the result is VR_UNDEFINED there is no need to mess with
4663 equivalencies. */
4664 if (!undefined_p ())
4665 {
4666 /* The resulting set of equivalences for range intersection
4667 is the union of the two sets. */
4668 if (m_equiv && other->m_equiv && m_equiv != other->m_equiv)
4669 bitmap_ior_into (m_equiv, other->m_equiv);
4670 else if (other->m_equiv && !m_equiv)
4671 {
4672 /* All equivalence bitmaps are allocated from the same
4673 obstack. So we can use the obstack associated with
4674 VR to allocate this->m_equiv. */
4675 m_equiv = BITMAP_ALLOC (other->m_equiv->obstack);
4676 bitmap_copy (m_equiv, other->m_equiv);
4677 }
4678 }
4679 }
4680
4681 if (dump_file && (dump_flags & TDF_DETAILS))
4682 {
4683 fprintf (dump_file, "to\n ");
4684 dump_value_range (dump_file, this);
4685 fprintf (dump_file, "\n");
4686 }
4687 }
4688
4689 void
union_(const value_range_equiv * other)4690 value_range_equiv::union_ (const value_range_equiv *other)
4691 {
4692 if (dump_file && (dump_flags & TDF_DETAILS))
4693 {
4694 fprintf (dump_file, "Meeting\n ");
4695 dump_value_range (dump_file, this);
4696 fprintf (dump_file, "\nand\n ");
4697 dump_value_range (dump_file, other);
4698 fprintf (dump_file, "\n");
4699 }
4700
4701 /* If THIS is undefined we want to pick up equivalences from OTHER.
4702 Just special-case this here rather than trying to fixup after the fact. */
4703 if (this->undefined_p ())
4704 this->deep_copy (other);
4705 else
4706 {
4707 value_range tem = union_helper (this, other);
4708 this->update (tem.min (), tem.max (), tem.kind ());
4709
4710 /* The resulting set of equivalences is always the intersection of
4711 the two sets. */
4712 if (this->m_equiv && other->m_equiv && this->m_equiv != other->m_equiv)
4713 bitmap_and_into (this->m_equiv, other->m_equiv);
4714 else if (this->m_equiv && !other->m_equiv)
4715 bitmap_clear (this->m_equiv);
4716 }
4717
4718 if (dump_file && (dump_flags & TDF_DETAILS))
4719 {
4720 fprintf (dump_file, "to\n ");
4721 dump_value_range (dump_file, this);
4722 fprintf (dump_file, "\n");
4723 }
4724 }
4725
4726 /* Visit all arguments for PHI node PHI that flow through executable
4727 edges. If a valid value range can be derived from all the incoming
4728 value ranges, set a new range for the LHS of PHI. */
4729
4730 enum ssa_prop_result
visit_phi(gphi * phi)4731 vrp_prop::visit_phi (gphi *phi)
4732 {
4733 tree lhs = PHI_RESULT (phi);
4734 value_range_equiv vr_result;
4735 extract_range_from_phi_node (phi, &vr_result);
4736 if (update_value_range (lhs, &vr_result))
4737 {
4738 if (dump_file && (dump_flags & TDF_DETAILS))
4739 {
4740 fprintf (dump_file, "Found new range for ");
4741 print_generic_expr (dump_file, lhs);
4742 fprintf (dump_file, ": ");
4743 dump_value_range (dump_file, &vr_result);
4744 fprintf (dump_file, "\n");
4745 }
4746
4747 if (vr_result.varying_p ())
4748 return SSA_PROP_VARYING;
4749
4750 return SSA_PROP_INTERESTING;
4751 }
4752
4753 /* Nothing changed, don't add outgoing edges. */
4754 return SSA_PROP_NOT_INTERESTING;
4755 }
4756
4757 class vrp_folder : public substitute_and_fold_engine
4758 {
4759 public:
vrp_folder()4760 vrp_folder () : substitute_and_fold_engine (/* Fold all stmts. */ true) { }
4761 tree get_value (tree) FINAL OVERRIDE;
4762 bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
4763 bool fold_predicate_in (gimple_stmt_iterator *);
4764
4765 class vr_values *vr_values;
4766
4767 /* Delegators. */
vrp_evaluate_conditional(tree_code code,tree op0,tree op1,gimple * stmt)4768 tree vrp_evaluate_conditional (tree_code code, tree op0,
4769 tree op1, gimple *stmt)
4770 { return vr_values->vrp_evaluate_conditional (code, op0, op1, stmt); }
simplify_stmt_using_ranges(gimple_stmt_iterator * gsi)4771 bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
4772 { return vr_values->simplify_stmt_using_ranges (gsi); }
op_with_constant_singleton_value_range(tree op)4773 tree op_with_constant_singleton_value_range (tree op)
4774 { return vr_values->op_with_constant_singleton_value_range (op); }
4775 };
4776
4777 /* If the statement pointed by SI has a predicate whose value can be
4778 computed using the value range information computed by VRP, compute
4779 its value and return true. Otherwise, return false. */
4780
4781 bool
fold_predicate_in(gimple_stmt_iterator * si)4782 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
4783 {
4784 bool assignment_p = false;
4785 tree val;
4786 gimple *stmt = gsi_stmt (*si);
4787
4788 if (is_gimple_assign (stmt)
4789 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
4790 {
4791 assignment_p = true;
4792 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
4793 gimple_assign_rhs1 (stmt),
4794 gimple_assign_rhs2 (stmt),
4795 stmt);
4796 }
4797 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
4798 val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
4799 gimple_cond_lhs (cond_stmt),
4800 gimple_cond_rhs (cond_stmt),
4801 stmt);
4802 else
4803 return false;
4804
4805 if (val)
4806 {
4807 if (assignment_p)
4808 val = fold_convert (gimple_expr_type (stmt), val);
4809
4810 if (dump_file)
4811 {
4812 fprintf (dump_file, "Folding predicate ");
4813 print_gimple_expr (dump_file, stmt, 0);
4814 fprintf (dump_file, " to ");
4815 print_generic_expr (dump_file, val);
4816 fprintf (dump_file, "\n");
4817 }
4818
4819 if (is_gimple_assign (stmt))
4820 gimple_assign_set_rhs_from_tree (si, val);
4821 else
4822 {
4823 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
4824 gcond *cond_stmt = as_a <gcond *> (stmt);
4825 if (integer_zerop (val))
4826 gimple_cond_make_false (cond_stmt);
4827 else if (integer_onep (val))
4828 gimple_cond_make_true (cond_stmt);
4829 else
4830 gcc_unreachable ();
4831 }
4832
4833 return true;
4834 }
4835
4836 return false;
4837 }
4838
4839 /* Callback for substitute_and_fold folding the stmt at *SI. */
4840
4841 bool
fold_stmt(gimple_stmt_iterator * si)4842 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
4843 {
4844 if (fold_predicate_in (si))
4845 return true;
4846
4847 return simplify_stmt_using_ranges (si);
4848 }
4849
4850 /* If OP has a value range with a single constant value return that,
4851 otherwise return NULL_TREE. This returns OP itself if OP is a
4852 constant.
4853
4854 Implemented as a pure wrapper right now, but this will change. */
4855
4856 tree
get_value(tree op)4857 vrp_folder::get_value (tree op)
4858 {
4859 return op_with_constant_singleton_value_range (op);
4860 }
4861
4862 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
4863 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
4864 BB. If no such ASSERT_EXPR is found, return OP. */
4865
4866 static tree
lhs_of_dominating_assert(tree op,basic_block bb,gimple * stmt)4867 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
4868 {
4869 imm_use_iterator imm_iter;
4870 gimple *use_stmt;
4871 use_operand_p use_p;
4872
4873 if (TREE_CODE (op) == SSA_NAME)
4874 {
4875 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
4876 {
4877 use_stmt = USE_STMT (use_p);
4878 if (use_stmt != stmt
4879 && gimple_assign_single_p (use_stmt)
4880 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
4881 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
4882 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
4883 return gimple_assign_lhs (use_stmt);
4884 }
4885 }
4886 return op;
4887 }
4888
4889 /* A hack. */
4890 static class vr_values *x_vr_values;
4891
4892 /* A trivial wrapper so that we can present the generic jump threading
4893 code with a simple API for simplifying statements. STMT is the
4894 statement we want to simplify, WITHIN_STMT provides the location
4895 for any overflow warnings. */
4896
4897 static tree
simplify_stmt_for_jump_threading(gimple * stmt,gimple * within_stmt,class avail_exprs_stack * avail_exprs_stack ATTRIBUTE_UNUSED,basic_block bb)4898 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
4899 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED,
4900 basic_block bb)
4901 {
4902 /* First see if the conditional is in the hash table. */
4903 tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
4904 if (cached_lhs && is_gimple_min_invariant (cached_lhs))
4905 return cached_lhs;
4906
4907 vr_values *vr_values = x_vr_values;
4908 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
4909 {
4910 tree op0 = gimple_cond_lhs (cond_stmt);
4911 op0 = lhs_of_dominating_assert (op0, bb, stmt);
4912
4913 tree op1 = gimple_cond_rhs (cond_stmt);
4914 op1 = lhs_of_dominating_assert (op1, bb, stmt);
4915
4916 return vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
4917 op0, op1, within_stmt);
4918 }
4919
4920 /* We simplify a switch statement by trying to determine which case label
4921 will be taken. If we are successful then we return the corresponding
4922 CASE_LABEL_EXPR. */
4923 if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
4924 {
4925 tree op = gimple_switch_index (switch_stmt);
4926 if (TREE_CODE (op) != SSA_NAME)
4927 return NULL_TREE;
4928
4929 op = lhs_of_dominating_assert (op, bb, stmt);
4930
4931 const value_range_equiv *vr = vr_values->get_value_range (op);
4932 if (vr->undefined_p ()
4933 || vr->varying_p ()
4934 || vr->symbolic_p ())
4935 return NULL_TREE;
4936
4937 if (vr->kind () == VR_RANGE)
4938 {
4939 size_t i, j;
4940 /* Get the range of labels that contain a part of the operand's
4941 value range. */
4942 find_case_label_range (switch_stmt, vr->min (), vr->max (), &i, &j);
4943
4944 /* Is there only one such label? */
4945 if (i == j)
4946 {
4947 tree label = gimple_switch_label (switch_stmt, i);
4948
4949 /* The i'th label will be taken only if the value range of the
4950 operand is entirely within the bounds of this label. */
4951 if (CASE_HIGH (label) != NULL_TREE
4952 ? (tree_int_cst_compare (CASE_LOW (label), vr->min ()) <= 0
4953 && tree_int_cst_compare (CASE_HIGH (label),
4954 vr->max ()) >= 0)
4955 : (tree_int_cst_equal (CASE_LOW (label), vr->min ())
4956 && tree_int_cst_equal (vr->min (), vr->max ())))
4957 return label;
4958 }
4959
4960 /* If there are no such labels then the default label will be
4961 taken. */
4962 if (i > j)
4963 return gimple_switch_label (switch_stmt, 0);
4964 }
4965
4966 if (vr->kind () == VR_ANTI_RANGE)
4967 {
4968 unsigned n = gimple_switch_num_labels (switch_stmt);
4969 tree min_label = gimple_switch_label (switch_stmt, 1);
4970 tree max_label = gimple_switch_label (switch_stmt, n - 1);
4971
4972 /* The default label will be taken only if the anti-range of the
4973 operand is entirely outside the bounds of all the (non-default)
4974 case labels. */
4975 if (tree_int_cst_compare (vr->min (), CASE_LOW (min_label)) <= 0
4976 && (CASE_HIGH (max_label) != NULL_TREE
4977 ? tree_int_cst_compare (vr->max (),
4978 CASE_HIGH (max_label)) >= 0
4979 : tree_int_cst_compare (vr->max (),
4980 CASE_LOW (max_label)) >= 0))
4981 return gimple_switch_label (switch_stmt, 0);
4982 }
4983
4984 return NULL_TREE;
4985 }
4986
4987 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
4988 {
4989 tree lhs = gimple_assign_lhs (assign_stmt);
4990 if (TREE_CODE (lhs) == SSA_NAME
4991 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
4992 || POINTER_TYPE_P (TREE_TYPE (lhs)))
4993 && stmt_interesting_for_vrp (stmt))
4994 {
4995 edge dummy_e;
4996 tree dummy_tree;
4997 value_range_equiv new_vr;
4998 vr_values->extract_range_from_stmt (stmt, &dummy_e,
4999 &dummy_tree, &new_vr);
5000 tree singleton;
5001 if (new_vr.singleton_p (&singleton))
5002 return singleton;
5003 }
5004 }
5005
5006 return NULL_TREE;
5007 }
5008
5009 class vrp_dom_walker : public dom_walker
5010 {
5011 public:
vrp_dom_walker(cdi_direction direction,class const_and_copies * const_and_copies,class avail_exprs_stack * avail_exprs_stack)5012 vrp_dom_walker (cdi_direction direction,
5013 class const_and_copies *const_and_copies,
5014 class avail_exprs_stack *avail_exprs_stack)
5015 : dom_walker (direction, REACHABLE_BLOCKS),
5016 m_const_and_copies (const_and_copies),
5017 m_avail_exprs_stack (avail_exprs_stack),
5018 m_dummy_cond (NULL) {}
5019
5020 virtual edge before_dom_children (basic_block);
5021 virtual void after_dom_children (basic_block);
5022
5023 class vr_values *vr_values;
5024
5025 private:
5026 class const_and_copies *m_const_and_copies;
5027 class avail_exprs_stack *m_avail_exprs_stack;
5028
5029 gcond *m_dummy_cond;
5030
5031 };
5032
5033 /* Called before processing dominator children of BB. We want to look
5034 at ASSERT_EXPRs and record information from them in the appropriate
5035 tables.
5036
5037 We could look at other statements here. It's not seen as likely
5038 to significantly increase the jump threads we discover. */
5039
5040 edge
before_dom_children(basic_block bb)5041 vrp_dom_walker::before_dom_children (basic_block bb)
5042 {
5043 gimple_stmt_iterator gsi;
5044
5045 m_avail_exprs_stack->push_marker ();
5046 m_const_and_copies->push_marker ();
5047 for (gsi = gsi_start_nondebug_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5048 {
5049 gimple *stmt = gsi_stmt (gsi);
5050 if (gimple_assign_single_p (stmt)
5051 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
5052 {
5053 tree rhs1 = gimple_assign_rhs1 (stmt);
5054 tree cond = TREE_OPERAND (rhs1, 1);
5055 tree inverted = invert_truthvalue (cond);
5056 vec<cond_equivalence> p;
5057 p.create (3);
5058 record_conditions (&p, cond, inverted);
5059 for (unsigned int i = 0; i < p.length (); i++)
5060 m_avail_exprs_stack->record_cond (&p[i]);
5061
5062 tree lhs = gimple_assign_lhs (stmt);
5063 m_const_and_copies->record_const_or_copy (lhs,
5064 TREE_OPERAND (rhs1, 0));
5065 p.release ();
5066 continue;
5067 }
5068 break;
5069 }
5070 return NULL;
5071 }
5072
5073 /* Called after processing dominator children of BB. This is where we
5074 actually call into the threader. */
5075 void
after_dom_children(basic_block bb)5076 vrp_dom_walker::after_dom_children (basic_block bb)
5077 {
5078 if (!m_dummy_cond)
5079 m_dummy_cond = gimple_build_cond (NE_EXPR,
5080 integer_zero_node, integer_zero_node,
5081 NULL, NULL);
5082
5083 x_vr_values = vr_values;
5084 thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
5085 m_avail_exprs_stack, NULL,
5086 simplify_stmt_for_jump_threading);
5087 x_vr_values = NULL;
5088
5089 m_avail_exprs_stack->pop_to_marker ();
5090 m_const_and_copies->pop_to_marker ();
5091 }
5092
5093 /* Blocks which have more than one predecessor and more than
5094 one successor present jump threading opportunities, i.e.,
5095 when the block is reached from a specific predecessor, we
5096 may be able to determine which of the outgoing edges will
5097 be traversed. When this optimization applies, we are able
5098 to avoid conditionals at runtime and we may expose secondary
5099 optimization opportunities.
5100
5101 This routine is effectively a driver for the generic jump
5102 threading code. It basically just presents the generic code
5103 with edges that may be suitable for jump threading.
5104
5105 Unlike DOM, we do not iterate VRP if jump threading was successful.
5106 While iterating may expose new opportunities for VRP, it is expected
5107 those opportunities would be very limited and the compile time cost
5108 to expose those opportunities would be significant.
5109
5110 As jump threading opportunities are discovered, they are registered
5111 for later realization. */
5112
5113 static void
identify_jump_threads(class vr_values * vr_values)5114 identify_jump_threads (class vr_values *vr_values)
5115 {
5116 /* Ugh. When substituting values earlier in this pass we can
5117 wipe the dominance information. So rebuild the dominator
5118 information as we need it within the jump threading code. */
5119 calculate_dominance_info (CDI_DOMINATORS);
5120
5121 /* We do not allow VRP information to be used for jump threading
5122 across a back edge in the CFG. Otherwise it becomes too
5123 difficult to avoid eliminating loop exit tests. Of course
5124 EDGE_DFS_BACK is not accurate at this time so we have to
5125 recompute it. */
5126 mark_dfs_back_edges ();
5127
5128 /* Allocate our unwinder stack to unwind any temporary equivalences
5129 that might be recorded. */
5130 const_and_copies *equiv_stack = new const_and_copies ();
5131
5132 hash_table<expr_elt_hasher> *avail_exprs
5133 = new hash_table<expr_elt_hasher> (1024);
5134 avail_exprs_stack *avail_exprs_stack
5135 = new class avail_exprs_stack (avail_exprs);
5136
5137 vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
5138 walker.vr_values = vr_values;
5139 walker.walk (cfun->cfg->x_entry_block_ptr);
5140
5141 /* We do not actually update the CFG or SSA graphs at this point as
5142 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
5143 handle ASSERT_EXPRs gracefully. */
5144 delete equiv_stack;
5145 delete avail_exprs;
5146 delete avail_exprs_stack;
5147 }
5148
5149 /* Traverse all the blocks folding conditionals with known ranges. */
5150
5151 void
vrp_finalize(bool warn_array_bounds_p)5152 vrp_prop::vrp_finalize (bool warn_array_bounds_p)
5153 {
5154 size_t i;
5155
5156 /* We have completed propagating through the lattice. */
5157 vr_values.set_lattice_propagation_complete ();
5158
5159 if (dump_file)
5160 {
5161 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
5162 vr_values.dump_all_value_ranges (dump_file);
5163 fprintf (dump_file, "\n");
5164 }
5165
5166 /* Set value range to non pointer SSA_NAMEs. */
5167 for (i = 0; i < num_ssa_names; i++)
5168 {
5169 tree name = ssa_name (i);
5170 if (!name)
5171 continue;
5172
5173 const value_range_equiv *vr = get_value_range (name);
5174 if (!name || !vr->constant_p ())
5175 continue;
5176
5177 if (POINTER_TYPE_P (TREE_TYPE (name))
5178 && range_includes_zero_p (vr) == 0)
5179 set_ptr_nonnull (name);
5180 else if (!POINTER_TYPE_P (TREE_TYPE (name)))
5181 set_range_info (name, *vr);
5182 }
5183
5184 /* If we're checking array refs, we want to merge information on
5185 the executability of each edge between vrp_folder and the
5186 check_array_bounds_dom_walker: each can clear the
5187 EDGE_EXECUTABLE flag on edges, in different ways.
5188
5189 Hence, if we're going to call check_all_array_refs, set
5190 the flag on every edge now, rather than in
5191 check_array_bounds_dom_walker's ctor; vrp_folder may clear
5192 it from some edges. */
5193 if (warn_array_bounds && warn_array_bounds_p)
5194 set_all_edges_as_executable (cfun);
5195
5196 class vrp_folder vrp_folder;
5197 vrp_folder.vr_values = &vr_values;
5198 vrp_folder.substitute_and_fold ();
5199
5200 if (warn_array_bounds && warn_array_bounds_p)
5201 check_all_array_refs ();
5202 }
5203
5204 /* Main entry point to VRP (Value Range Propagation). This pass is
5205 loosely based on J. R. C. Patterson, ``Accurate Static Branch
5206 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
5207 Programming Language Design and Implementation, pp. 67-78, 1995.
5208 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
5209
5210 This is essentially an SSA-CCP pass modified to deal with ranges
5211 instead of constants.
5212
5213 While propagating ranges, we may find that two or more SSA name
5214 have equivalent, though distinct ranges. For instance,
5215
5216 1 x_9 = p_3->a;
5217 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
5218 3 if (p_4 == q_2)
5219 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
5220 5 endif
5221 6 if (q_2)
5222
5223 In the code above, pointer p_5 has range [q_2, q_2], but from the
5224 code we can also determine that p_5 cannot be NULL and, if q_2 had
5225 a non-varying range, p_5's range should also be compatible with it.
5226
5227 These equivalences are created by two expressions: ASSERT_EXPR and
5228 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
5229 result of another assertion, then we can use the fact that p_5 and
5230 p_4 are equivalent when evaluating p_5's range.
5231
5232 Together with value ranges, we also propagate these equivalences
5233 between names so that we can take advantage of information from
5234 multiple ranges when doing final replacement. Note that this
5235 equivalency relation is transitive but not symmetric.
5236
5237 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
5238 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
5239 in contexts where that assertion does not hold (e.g., in line 6).
5240
5241 TODO, the main difference between this pass and Patterson's is that
5242 we do not propagate edge probabilities. We only compute whether
5243 edges can be taken or not. That is, instead of having a spectrum
5244 of jump probabilities between 0 and 1, we only deal with 0, 1 and
5245 DON'T KNOW. In the future, it may be worthwhile to propagate
5246 probabilities to aid branch prediction. */
5247
5248 static unsigned int
execute_vrp(bool warn_array_bounds_p)5249 execute_vrp (bool warn_array_bounds_p)
5250 {
5251
5252 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
5253 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
5254 scev_initialize ();
5255
5256 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
5257 Inserting assertions may split edges which will invalidate
5258 EDGE_DFS_BACK. */
5259 insert_range_assertions ();
5260
5261 threadedge_initialize_values ();
5262
5263 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
5264 mark_dfs_back_edges ();
5265
5266 class vrp_prop vrp_prop;
5267 vrp_prop.vrp_initialize ();
5268 vrp_prop.ssa_propagate ();
5269 vrp_prop.vrp_finalize (warn_array_bounds_p);
5270
5271 /* We must identify jump threading opportunities before we release
5272 the datastructures built by VRP. */
5273 identify_jump_threads (&vrp_prop.vr_values);
5274
5275 /* A comparison of an SSA_NAME against a constant where the SSA_NAME
5276 was set by a type conversion can often be rewritten to use the
5277 RHS of the type conversion.
5278
5279 However, doing so inhibits jump threading through the comparison.
5280 So that transformation is not performed until after jump threading
5281 is complete. */
5282 basic_block bb;
5283 FOR_EACH_BB_FN (bb, cfun)
5284 {
5285 gimple *last = last_stmt (bb);
5286 if (last && gimple_code (last) == GIMPLE_COND)
5287 vrp_prop.vr_values.simplify_cond_using_ranges_2 (as_a <gcond *> (last));
5288 }
5289
5290 free_numbers_of_iterations_estimates (cfun);
5291
5292 /* ASSERT_EXPRs must be removed before finalizing jump threads
5293 as finalizing jump threads calls the CFG cleanup code which
5294 does not properly handle ASSERT_EXPRs. */
5295 remove_range_assertions ();
5296
5297 /* If we exposed any new variables, go ahead and put them into
5298 SSA form now, before we handle jump threading. This simplifies
5299 interactions between rewriting of _DECL nodes into SSA form
5300 and rewriting SSA_NAME nodes into SSA form after block
5301 duplication and CFG manipulation. */
5302 update_ssa (TODO_update_ssa);
5303
5304 /* We identified all the jump threading opportunities earlier, but could
5305 not transform the CFG at that time. This routine transforms the
5306 CFG and arranges for the dominator tree to be rebuilt if necessary.
5307
5308 Note the SSA graph update will occur during the normal TODO
5309 processing by the pass manager. */
5310 thread_through_all_blocks (false);
5311
5312 vrp_prop.vr_values.cleanup_edges_and_switches ();
5313 threadedge_finalize_values ();
5314
5315 scev_finalize ();
5316 loop_optimizer_finalize ();
5317 return 0;
5318 }
5319
5320 namespace {
5321
5322 const pass_data pass_data_vrp =
5323 {
5324 GIMPLE_PASS, /* type */
5325 "vrp", /* name */
5326 OPTGROUP_NONE, /* optinfo_flags */
5327 TV_TREE_VRP, /* tv_id */
5328 PROP_ssa, /* properties_required */
5329 0, /* properties_provided */
5330 0, /* properties_destroyed */
5331 0, /* todo_flags_start */
5332 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
5333 };
5334
5335 class pass_vrp : public gimple_opt_pass
5336 {
5337 public:
pass_vrp(gcc::context * ctxt)5338 pass_vrp (gcc::context *ctxt)
5339 : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
5340 {}
5341
5342 /* opt_pass methods: */
clone()5343 opt_pass * clone () { return new pass_vrp (m_ctxt); }
set_pass_param(unsigned int n,bool param)5344 void set_pass_param (unsigned int n, bool param)
5345 {
5346 gcc_assert (n == 0);
5347 warn_array_bounds_p = param;
5348 }
gate(function *)5349 virtual bool gate (function *) { return flag_tree_vrp != 0; }
execute(function *)5350 virtual unsigned int execute (function *)
5351 { return execute_vrp (warn_array_bounds_p); }
5352
5353 private:
5354 bool warn_array_bounds_p;
5355 }; // class pass_vrp
5356
5357 } // anon namespace
5358
5359 gimple_opt_pass *
make_pass_vrp(gcc::context * ctxt)5360 make_pass_vrp (gcc::context *ctxt)
5361 {
5362 return new pass_vrp (ctxt);
5363 }
5364
5365
5366 /* Worker for determine_value_range. */
5367
5368 static void
determine_value_range_1(value_range * vr,tree expr)5369 determine_value_range_1 (value_range *vr, tree expr)
5370 {
5371 if (BINARY_CLASS_P (expr))
5372 {
5373 value_range vr0, vr1;
5374 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
5375 determine_value_range_1 (&vr1, TREE_OPERAND (expr, 1));
5376 range_fold_binary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
5377 &vr0, &vr1);
5378 }
5379 else if (UNARY_CLASS_P (expr))
5380 {
5381 value_range vr0;
5382 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
5383 range_fold_unary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
5384 &vr0, TREE_TYPE (TREE_OPERAND (expr, 0)));
5385 }
5386 else if (TREE_CODE (expr) == INTEGER_CST)
5387 vr->set (expr);
5388 else
5389 {
5390 value_range_kind kind;
5391 wide_int min, max;
5392 /* For SSA names try to extract range info computed by VRP. Otherwise
5393 fall back to varying. */
5394 if (TREE_CODE (expr) == SSA_NAME
5395 && INTEGRAL_TYPE_P (TREE_TYPE (expr))
5396 && (kind = get_range_info (expr, &min, &max)) != VR_VARYING)
5397 vr->set (wide_int_to_tree (TREE_TYPE (expr), min),
5398 wide_int_to_tree (TREE_TYPE (expr), max),
5399 kind);
5400 else
5401 vr->set_varying (TREE_TYPE (expr));
5402 }
5403 }
5404
5405 /* Compute a value-range for EXPR and set it in *MIN and *MAX. Return
5406 the determined range type. */
5407
5408 value_range_kind
determine_value_range(tree expr,wide_int * min,wide_int * max)5409 determine_value_range (tree expr, wide_int *min, wide_int *max)
5410 {
5411 value_range vr;
5412 determine_value_range_1 (&vr, expr);
5413 if (vr.constant_p ())
5414 {
5415 *min = wi::to_wide (vr.min ());
5416 *max = wi::to_wide (vr.max ());
5417 return vr.kind ();
5418 }
5419
5420 return VR_VARYING;
5421 }
5422