1 /* Support routines for Value Range Propagation (VRP). 2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 3 Free Software Foundation, Inc. 4 Contributed by Diego Novillo <dnovillo@redhat.com>. 5 6 This file is part of GCC. 7 8 GCC is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License as published by 10 the Free Software Foundation; either version 3, or (at your option) 11 any later version. 12 13 GCC is distributed in the hope that it will be useful, 14 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 GNU General Public License for more details. 17 18 You should have received a copy of the GNU General Public License 19 along with GCC; see the file COPYING3. If not see 20 <http://www.gnu.org/licenses/>. */ 21 22 #include "config.h" 23 #include "system.h" 24 #include "coretypes.h" 25 #include "tm.h" 26 #include "ggc.h" 27 #include "flags.h" 28 #include "tree.h" 29 #include "basic-block.h" 30 #include "tree-flow.h" 31 #include "tree-pass.h" 32 #include "tree-dump.h" 33 #include "timevar.h" 34 #include "diagnostic.h" 35 #include "toplev.h" 36 #include "intl.h" 37 #include "cfgloop.h" 38 #include "tree-scalar-evolution.h" 39 #include "tree-ssa-propagate.h" 40 #include "tree-chrec.h" 41 42 43 /* Set of SSA names found live during the RPO traversal of the function 44 for still active basic-blocks. */ 45 static sbitmap *live; 46 47 /* Return true if the SSA name NAME is live on the edge E. */ 48 49 static bool 50 live_on_edge (edge e, tree name) 51 { 52 return (live[e->dest->index] 53 && TEST_BIT (live[e->dest->index], SSA_NAME_VERSION (name))); 54 } 55 56 /* Local functions. */ 57 static int compare_values (tree val1, tree val2); 58 static int compare_values_warnv (tree val1, tree val2, bool *); 59 static void vrp_meet (value_range_t *, value_range_t *); 60 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code, 61 tree, tree, bool, bool *, 62 bool *); 63 64 /* Location information for ASSERT_EXPRs. Each instance of this 65 structure describes an ASSERT_EXPR for an SSA name. Since a single 66 SSA name may have more than one assertion associated with it, these 67 locations are kept in a linked list attached to the corresponding 68 SSA name. */ 69 struct assert_locus_d 70 { 71 /* Basic block where the assertion would be inserted. */ 72 basic_block bb; 73 74 /* Some assertions need to be inserted on an edge (e.g., assertions 75 generated by COND_EXPRs). In those cases, BB will be NULL. */ 76 edge e; 77 78 /* Pointer to the statement that generated this assertion. */ 79 gimple_stmt_iterator si; 80 81 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */ 82 enum tree_code comp_code; 83 84 /* Value being compared against. */ 85 tree val; 86 87 /* Expression to compare. */ 88 tree expr; 89 90 /* Next node in the linked list. */ 91 struct assert_locus_d *next; 92 }; 93 94 typedef struct assert_locus_d *assert_locus_t; 95 96 /* If bit I is present, it means that SSA name N_i has a list of 97 assertions that should be inserted in the IL. */ 98 static bitmap need_assert_for; 99 100 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I] 101 holds a list of ASSERT_LOCUS_T nodes that describe where 102 ASSERT_EXPRs for SSA name N_I should be inserted. */ 103 static assert_locus_t *asserts_for; 104 105 /* Value range array. After propagation, VR_VALUE[I] holds the range 106 of values that SSA name N_I may take. */ 107 static value_range_t **vr_value; 108 109 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the 110 number of executable edges we saw the last time we visited the 111 node. */ 112 static int *vr_phi_edge_counts; 113 114 typedef struct { 115 gimple stmt; 116 tree vec; 117 } switch_update; 118 119 static VEC (edge, heap) *to_remove_edges; 120 DEF_VEC_O(switch_update); 121 DEF_VEC_ALLOC_O(switch_update, heap); 122 static VEC (switch_update, heap) *to_update_switch_stmts; 123 124 125 /* Return the maximum value for TYPE. */ 126 127 static inline tree 128 vrp_val_max (const_tree type) 129 { 130 if (!INTEGRAL_TYPE_P (type)) 131 return NULL_TREE; 132 133 return TYPE_MAX_VALUE (type); 134 } 135 136 /* Return the minimum value for TYPE. */ 137 138 static inline tree 139 vrp_val_min (const_tree type) 140 { 141 if (!INTEGRAL_TYPE_P (type)) 142 return NULL_TREE; 143 144 return TYPE_MIN_VALUE (type); 145 } 146 147 /* Return whether VAL is equal to the maximum value of its type. This 148 will be true for a positive overflow infinity. We can't do a 149 simple equality comparison with TYPE_MAX_VALUE because C typedefs 150 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not == 151 to the integer constant with the same value in the type. */ 152 153 static inline bool 154 vrp_val_is_max (const_tree val) 155 { 156 tree type_max = vrp_val_max (TREE_TYPE (val)); 157 return (val == type_max 158 || (type_max != NULL_TREE 159 && operand_equal_p (val, type_max, 0))); 160 } 161 162 /* Return whether VAL is equal to the minimum value of its type. This 163 will be true for a negative overflow infinity. */ 164 165 static inline bool 166 vrp_val_is_min (const_tree val) 167 { 168 tree type_min = vrp_val_min (TREE_TYPE (val)); 169 return (val == type_min 170 || (type_min != NULL_TREE 171 && operand_equal_p (val, type_min, 0))); 172 } 173 174 175 /* Return whether TYPE should use an overflow infinity distinct from 176 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to 177 represent a signed overflow during VRP computations. An infinity 178 is distinct from a half-range, which will go from some number to 179 TYPE_{MIN,MAX}_VALUE. */ 180 181 static inline bool 182 needs_overflow_infinity (const_tree type) 183 { 184 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type); 185 } 186 187 /* Return whether TYPE can support our overflow infinity 188 representation: we use the TREE_OVERFLOW flag, which only exists 189 for constants. If TYPE doesn't support this, we don't optimize 190 cases which would require signed overflow--we drop them to 191 VARYING. */ 192 193 static inline bool 194 supports_overflow_infinity (const_tree type) 195 { 196 tree min = vrp_val_min (type), max = vrp_val_max (type); 197 #ifdef ENABLE_CHECKING 198 gcc_assert (needs_overflow_infinity (type)); 199 #endif 200 return (min != NULL_TREE 201 && CONSTANT_CLASS_P (min) 202 && max != NULL_TREE 203 && CONSTANT_CLASS_P (max)); 204 } 205 206 /* VAL is the maximum or minimum value of a type. Return a 207 corresponding overflow infinity. */ 208 209 static inline tree 210 make_overflow_infinity (tree val) 211 { 212 #ifdef ENABLE_CHECKING 213 gcc_assert (val != NULL_TREE && CONSTANT_CLASS_P (val)); 214 #endif 215 val = copy_node (val); 216 TREE_OVERFLOW (val) = 1; 217 return val; 218 } 219 220 /* Return a negative overflow infinity for TYPE. */ 221 222 static inline tree 223 negative_overflow_infinity (tree type) 224 { 225 #ifdef ENABLE_CHECKING 226 gcc_assert (supports_overflow_infinity (type)); 227 #endif 228 return make_overflow_infinity (vrp_val_min (type)); 229 } 230 231 /* Return a positive overflow infinity for TYPE. */ 232 233 static inline tree 234 positive_overflow_infinity (tree type) 235 { 236 #ifdef ENABLE_CHECKING 237 gcc_assert (supports_overflow_infinity (type)); 238 #endif 239 return make_overflow_infinity (vrp_val_max (type)); 240 } 241 242 /* Return whether VAL is a negative overflow infinity. */ 243 244 static inline bool 245 is_negative_overflow_infinity (const_tree val) 246 { 247 return (needs_overflow_infinity (TREE_TYPE (val)) 248 && CONSTANT_CLASS_P (val) 249 && TREE_OVERFLOW (val) 250 && vrp_val_is_min (val)); 251 } 252 253 /* Return whether VAL is a positive overflow infinity. */ 254 255 static inline bool 256 is_positive_overflow_infinity (const_tree val) 257 { 258 return (needs_overflow_infinity (TREE_TYPE (val)) 259 && CONSTANT_CLASS_P (val) 260 && TREE_OVERFLOW (val) 261 && vrp_val_is_max (val)); 262 } 263 264 /* Return whether VAL is a positive or negative overflow infinity. */ 265 266 static inline bool 267 is_overflow_infinity (const_tree val) 268 { 269 return (needs_overflow_infinity (TREE_TYPE (val)) 270 && CONSTANT_CLASS_P (val) 271 && TREE_OVERFLOW (val) 272 && (vrp_val_is_min (val) || vrp_val_is_max (val))); 273 } 274 275 /* Return whether STMT has a constant rhs that is_overflow_infinity. */ 276 277 static inline bool 278 stmt_overflow_infinity (gimple stmt) 279 { 280 if (is_gimple_assign (stmt) 281 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) == 282 GIMPLE_SINGLE_RHS) 283 return is_overflow_infinity (gimple_assign_rhs1 (stmt)); 284 return false; 285 } 286 287 /* If VAL is now an overflow infinity, return VAL. Otherwise, return 288 the same value with TREE_OVERFLOW clear. This can be used to avoid 289 confusing a regular value with an overflow value. */ 290 291 static inline tree 292 avoid_overflow_infinity (tree val) 293 { 294 if (!is_overflow_infinity (val)) 295 return val; 296 297 if (vrp_val_is_max (val)) 298 return vrp_val_max (TREE_TYPE (val)); 299 else 300 { 301 #ifdef ENABLE_CHECKING 302 gcc_assert (vrp_val_is_min (val)); 303 #endif 304 return vrp_val_min (TREE_TYPE (val)); 305 } 306 } 307 308 309 /* Return true if ARG is marked with the nonnull attribute in the 310 current function signature. */ 311 312 static bool 313 nonnull_arg_p (const_tree arg) 314 { 315 tree t, attrs, fntype; 316 unsigned HOST_WIDE_INT arg_num; 317 318 gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg))); 319 320 /* The static chain decl is always non null. */ 321 if (arg == cfun->static_chain_decl) 322 return true; 323 324 fntype = TREE_TYPE (current_function_decl); 325 attrs = lookup_attribute ("nonnull", TYPE_ATTRIBUTES (fntype)); 326 327 /* If "nonnull" wasn't specified, we know nothing about the argument. */ 328 if (attrs == NULL_TREE) 329 return false; 330 331 /* If "nonnull" applies to all the arguments, then ARG is non-null. */ 332 if (TREE_VALUE (attrs) == NULL_TREE) 333 return true; 334 335 /* Get the position number for ARG in the function signature. */ 336 for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl); 337 t; 338 t = TREE_CHAIN (t), arg_num++) 339 { 340 if (t == arg) 341 break; 342 } 343 344 gcc_assert (t == arg); 345 346 /* Now see if ARG_NUM is mentioned in the nonnull list. */ 347 for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t)) 348 { 349 if (compare_tree_int (TREE_VALUE (t), arg_num) == 0) 350 return true; 351 } 352 353 return false; 354 } 355 356 357 /* Set value range VR to VR_VARYING. */ 358 359 static inline void 360 set_value_range_to_varying (value_range_t *vr) 361 { 362 vr->type = VR_VARYING; 363 vr->min = vr->max = NULL_TREE; 364 if (vr->equiv) 365 bitmap_clear (vr->equiv); 366 } 367 368 369 /* Set value range VR to {T, MIN, MAX, EQUIV}. */ 370 371 static void 372 set_value_range (value_range_t *vr, enum value_range_type t, tree min, 373 tree max, bitmap equiv) 374 { 375 #if defined ENABLE_CHECKING 376 /* Check the validity of the range. */ 377 if (t == VR_RANGE || t == VR_ANTI_RANGE) 378 { 379 int cmp; 380 381 gcc_assert (min && max); 382 383 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE) 384 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max)); 385 386 cmp = compare_values (min, max); 387 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2); 388 389 if (needs_overflow_infinity (TREE_TYPE (min))) 390 gcc_assert (!is_overflow_infinity (min) 391 || !is_overflow_infinity (max)); 392 } 393 394 if (t == VR_UNDEFINED || t == VR_VARYING) 395 gcc_assert (min == NULL_TREE && max == NULL_TREE); 396 397 if (t == VR_UNDEFINED || t == VR_VARYING) 398 gcc_assert (equiv == NULL || bitmap_empty_p (equiv)); 399 #endif 400 401 vr->type = t; 402 vr->min = min; 403 vr->max = max; 404 405 /* Since updating the equivalence set involves deep copying the 406 bitmaps, only do it if absolutely necessary. */ 407 if (vr->equiv == NULL 408 && equiv != NULL) 409 vr->equiv = BITMAP_ALLOC (NULL); 410 411 if (equiv != vr->equiv) 412 { 413 if (equiv && !bitmap_empty_p (equiv)) 414 bitmap_copy (vr->equiv, equiv); 415 else 416 bitmap_clear (vr->equiv); 417 } 418 } 419 420 421 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}. 422 This means adjusting T, MIN and MAX representing the case of a 423 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX] 424 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges. 425 In corner cases where MAX+1 or MIN-1 wraps this will fall back 426 to varying. 427 This routine exists to ease canonicalization in the case where we 428 extract ranges from var + CST op limit. */ 429 430 static void 431 set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t, 432 tree min, tree max, bitmap equiv) 433 { 434 /* Nothing to canonicalize for symbolic or unknown or varying ranges. */ 435 if ((t != VR_RANGE 436 && t != VR_ANTI_RANGE) 437 || TREE_CODE (min) != INTEGER_CST 438 || TREE_CODE (max) != INTEGER_CST) 439 { 440 set_value_range (vr, t, min, max, equiv); 441 return; 442 } 443 444 /* Wrong order for min and max, to swap them and the VR type we need 445 to adjust them. */ 446 if (tree_int_cst_lt (max, min)) 447 { 448 tree one = build_int_cst (TREE_TYPE (min), 1); 449 tree tmp = int_const_binop (PLUS_EXPR, max, one, 0); 450 max = int_const_binop (MINUS_EXPR, min, one, 0); 451 min = tmp; 452 453 /* There's one corner case, if we had [C+1, C] before we now have 454 that again. But this represents an empty value range, so drop 455 to varying in this case. */ 456 if (tree_int_cst_lt (max, min)) 457 { 458 set_value_range_to_varying (vr); 459 return; 460 } 461 462 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE; 463 } 464 465 /* Anti-ranges that can be represented as ranges should be so. */ 466 if (t == VR_ANTI_RANGE) 467 { 468 bool is_min = vrp_val_is_min (min); 469 bool is_max = vrp_val_is_max (max); 470 471 if (is_min && is_max) 472 { 473 /* We cannot deal with empty ranges, drop to varying. */ 474 set_value_range_to_varying (vr); 475 return; 476 } 477 else if (is_min 478 /* As a special exception preserve non-null ranges. */ 479 && !(TYPE_UNSIGNED (TREE_TYPE (min)) 480 && integer_zerop (max))) 481 { 482 tree one = build_int_cst (TREE_TYPE (max), 1); 483 min = int_const_binop (PLUS_EXPR, max, one, 0); 484 max = vrp_val_max (TREE_TYPE (max)); 485 t = VR_RANGE; 486 } 487 else if (is_max) 488 { 489 tree one = build_int_cst (TREE_TYPE (min), 1); 490 max = int_const_binop (MINUS_EXPR, min, one, 0); 491 min = vrp_val_min (TREE_TYPE (min)); 492 t = VR_RANGE; 493 } 494 } 495 496 set_value_range (vr, t, min, max, equiv); 497 } 498 499 /* Copy value range FROM into value range TO. */ 500 501 static inline void 502 copy_value_range (value_range_t *to, value_range_t *from) 503 { 504 set_value_range (to, from->type, from->min, from->max, from->equiv); 505 } 506 507 /* Set value range VR to a single value. This function is only called 508 with values we get from statements, and exists to clear the 509 TREE_OVERFLOW flag so that we don't think we have an overflow 510 infinity when we shouldn't. */ 511 512 static inline void 513 set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv) 514 { 515 gcc_assert (is_gimple_min_invariant (val)); 516 val = avoid_overflow_infinity (val); 517 set_value_range (vr, VR_RANGE, val, val, equiv); 518 } 519 520 /* Set value range VR to a non-negative range of type TYPE. 521 OVERFLOW_INFINITY indicates whether to use an overflow infinity 522 rather than TYPE_MAX_VALUE; this should be true if we determine 523 that the range is nonnegative based on the assumption that signed 524 overflow does not occur. */ 525 526 static inline void 527 set_value_range_to_nonnegative (value_range_t *vr, tree type, 528 bool overflow_infinity) 529 { 530 tree zero; 531 532 if (overflow_infinity && !supports_overflow_infinity (type)) 533 { 534 set_value_range_to_varying (vr); 535 return; 536 } 537 538 zero = build_int_cst (type, 0); 539 set_value_range (vr, VR_RANGE, zero, 540 (overflow_infinity 541 ? positive_overflow_infinity (type) 542 : TYPE_MAX_VALUE (type)), 543 vr->equiv); 544 } 545 546 /* Set value range VR to a non-NULL range of type TYPE. */ 547 548 static inline void 549 set_value_range_to_nonnull (value_range_t *vr, tree type) 550 { 551 tree zero = build_int_cst (type, 0); 552 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv); 553 } 554 555 556 /* Set value range VR to a NULL range of type TYPE. */ 557 558 static inline void 559 set_value_range_to_null (value_range_t *vr, tree type) 560 { 561 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv); 562 } 563 564 565 /* Set value range VR to a range of a truthvalue of type TYPE. */ 566 567 static inline void 568 set_value_range_to_truthvalue (value_range_t *vr, tree type) 569 { 570 if (TYPE_PRECISION (type) == 1) 571 set_value_range_to_varying (vr); 572 else 573 set_value_range (vr, VR_RANGE, 574 build_int_cst (type, 0), build_int_cst (type, 1), 575 vr->equiv); 576 } 577 578 579 /* Set value range VR to VR_UNDEFINED. */ 580 581 static inline void 582 set_value_range_to_undefined (value_range_t *vr) 583 { 584 vr->type = VR_UNDEFINED; 585 vr->min = vr->max = NULL_TREE; 586 if (vr->equiv) 587 bitmap_clear (vr->equiv); 588 } 589 590 591 /* If abs (min) < abs (max), set VR to [-max, max], if 592 abs (min) >= abs (max), set VR to [-min, min]. */ 593 594 static void 595 abs_extent_range (value_range_t *vr, tree min, tree max) 596 { 597 int cmp; 598 599 gcc_assert (TREE_CODE (min) == INTEGER_CST); 600 gcc_assert (TREE_CODE (max) == INTEGER_CST); 601 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min))); 602 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min))); 603 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min); 604 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max); 605 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max)) 606 { 607 set_value_range_to_varying (vr); 608 return; 609 } 610 cmp = compare_values (min, max); 611 if (cmp == -1) 612 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max); 613 else if (cmp == 0 || cmp == 1) 614 { 615 max = min; 616 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min); 617 } 618 else 619 { 620 set_value_range_to_varying (vr); 621 return; 622 } 623 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL); 624 } 625 626 627 /* Return value range information for VAR. 628 629 If we have no values ranges recorded (ie, VRP is not running), then 630 return NULL. Otherwise create an empty range if none existed for VAR. */ 631 632 static value_range_t * 633 get_value_range (const_tree var) 634 { 635 value_range_t *vr; 636 tree sym; 637 unsigned ver = SSA_NAME_VERSION (var); 638 639 /* If we have no recorded ranges, then return NULL. */ 640 if (! vr_value) 641 return NULL; 642 643 vr = vr_value[ver]; 644 if (vr) 645 return vr; 646 647 /* Create a default value range. */ 648 vr_value[ver] = vr = XCNEW (value_range_t); 649 650 /* Defer allocating the equivalence set. */ 651 vr->equiv = NULL; 652 653 /* If VAR is a default definition, the variable can take any value 654 in VAR's type. */ 655 sym = SSA_NAME_VAR (var); 656 if (SSA_NAME_IS_DEFAULT_DEF (var)) 657 { 658 /* Try to use the "nonnull" attribute to create ~[0, 0] 659 anti-ranges for pointers. Note that this is only valid with 660 default definitions of PARM_DECLs. */ 661 if (TREE_CODE (sym) == PARM_DECL 662 && POINTER_TYPE_P (TREE_TYPE (sym)) 663 && nonnull_arg_p (sym)) 664 set_value_range_to_nonnull (vr, TREE_TYPE (sym)); 665 else 666 set_value_range_to_varying (vr); 667 } 668 669 return vr; 670 } 671 672 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */ 673 674 static inline bool 675 vrp_operand_equal_p (const_tree val1, const_tree val2) 676 { 677 if (val1 == val2) 678 return true; 679 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0)) 680 return false; 681 if (is_overflow_infinity (val1)) 682 return is_overflow_infinity (val2); 683 return true; 684 } 685 686 /* Return true, if the bitmaps B1 and B2 are equal. */ 687 688 static inline bool 689 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2) 690 { 691 return (b1 == b2 692 || (b1 && b2 693 && bitmap_equal_p (b1, b2))); 694 } 695 696 /* Update the value range and equivalence set for variable VAR to 697 NEW_VR. Return true if NEW_VR is different from VAR's previous 698 value. 699 700 NOTE: This function assumes that NEW_VR is a temporary value range 701 object created for the sole purpose of updating VAR's range. The 702 storage used by the equivalence set from NEW_VR will be freed by 703 this function. Do not call update_value_range when NEW_VR 704 is the range object associated with another SSA name. */ 705 706 static inline bool 707 update_value_range (const_tree var, value_range_t *new_vr) 708 { 709 value_range_t *old_vr; 710 bool is_new; 711 712 /* Update the value range, if necessary. */ 713 old_vr = get_value_range (var); 714 is_new = old_vr->type != new_vr->type 715 || !vrp_operand_equal_p (old_vr->min, new_vr->min) 716 || !vrp_operand_equal_p (old_vr->max, new_vr->max) 717 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv); 718 719 if (is_new) 720 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max, 721 new_vr->equiv); 722 723 BITMAP_FREE (new_vr->equiv); 724 725 return is_new; 726 } 727 728 729 /* Add VAR and VAR's equivalence set to EQUIV. This is the central 730 point where equivalence processing can be turned on/off. */ 731 732 static void 733 add_equivalence (bitmap *equiv, const_tree var) 734 { 735 unsigned ver = SSA_NAME_VERSION (var); 736 value_range_t *vr = vr_value[ver]; 737 738 if (*equiv == NULL) 739 *equiv = BITMAP_ALLOC (NULL); 740 bitmap_set_bit (*equiv, ver); 741 if (vr && vr->equiv) 742 bitmap_ior_into (*equiv, vr->equiv); 743 } 744 745 746 /* Return true if VR is ~[0, 0]. */ 747 748 static inline bool 749 range_is_nonnull (value_range_t *vr) 750 { 751 return vr->type == VR_ANTI_RANGE 752 && integer_zerop (vr->min) 753 && integer_zerop (vr->max); 754 } 755 756 757 /* Return true if VR is [0, 0]. */ 758 759 static inline bool 760 range_is_null (value_range_t *vr) 761 { 762 return vr->type == VR_RANGE 763 && integer_zerop (vr->min) 764 && integer_zerop (vr->max); 765 } 766 767 /* Return true if max and min of VR are INTEGER_CST. It's not necessary 768 a singleton. */ 769 770 static inline bool 771 range_int_cst_p (value_range_t *vr) 772 { 773 return (vr->type == VR_RANGE 774 && TREE_CODE (vr->max) == INTEGER_CST 775 && TREE_CODE (vr->min) == INTEGER_CST 776 && !TREE_OVERFLOW (vr->max) 777 && !TREE_OVERFLOW (vr->min)); 778 } 779 780 /* Return true if VR is a INTEGER_CST singleton. */ 781 782 static inline bool 783 range_int_cst_singleton_p (value_range_t *vr) 784 { 785 return (range_int_cst_p (vr) 786 && tree_int_cst_equal (vr->min, vr->max)); 787 } 788 789 /* Return true if value range VR involves at least one symbol. */ 790 791 static inline bool 792 symbolic_range_p (value_range_t *vr) 793 { 794 return (!is_gimple_min_invariant (vr->min) 795 || !is_gimple_min_invariant (vr->max)); 796 } 797 798 /* Return true if value range VR uses an overflow infinity. */ 799 800 static inline bool 801 overflow_infinity_range_p (value_range_t *vr) 802 { 803 return (vr->type == VR_RANGE 804 && (is_overflow_infinity (vr->min) 805 || is_overflow_infinity (vr->max))); 806 } 807 808 /* Return false if we can not make a valid comparison based on VR; 809 this will be the case if it uses an overflow infinity and overflow 810 is not undefined (i.e., -fno-strict-overflow is in effect). 811 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR 812 uses an overflow infinity. */ 813 814 static bool 815 usable_range_p (value_range_t *vr, bool *strict_overflow_p) 816 { 817 gcc_assert (vr->type == VR_RANGE); 818 if (is_overflow_infinity (vr->min)) 819 { 820 *strict_overflow_p = true; 821 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min))) 822 return false; 823 } 824 if (is_overflow_infinity (vr->max)) 825 { 826 *strict_overflow_p = true; 827 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max))) 828 return false; 829 } 830 return true; 831 } 832 833 834 /* Like tree_expr_nonnegative_warnv_p, but this function uses value 835 ranges obtained so far. */ 836 837 static bool 838 vrp_expr_computes_nonnegative (tree expr, bool *strict_overflow_p) 839 { 840 return (tree_expr_nonnegative_warnv_p (expr, strict_overflow_p) 841 || (TREE_CODE (expr) == SSA_NAME 842 && ssa_name_nonnegative_p (expr))); 843 } 844 845 /* Return true if the result of assignment STMT is know to be non-negative. 846 If the return value is based on the assumption that signed overflow is 847 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change 848 *STRICT_OVERFLOW_P.*/ 849 850 static bool 851 gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p) 852 { 853 enum tree_code code = gimple_assign_rhs_code (stmt); 854 switch (get_gimple_rhs_class (code)) 855 { 856 case GIMPLE_UNARY_RHS: 857 return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt), 858 gimple_expr_type (stmt), 859 gimple_assign_rhs1 (stmt), 860 strict_overflow_p); 861 case GIMPLE_BINARY_RHS: 862 return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt), 863 gimple_expr_type (stmt), 864 gimple_assign_rhs1 (stmt), 865 gimple_assign_rhs2 (stmt), 866 strict_overflow_p); 867 case GIMPLE_SINGLE_RHS: 868 return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt), 869 strict_overflow_p); 870 case GIMPLE_INVALID_RHS: 871 gcc_unreachable (); 872 default: 873 gcc_unreachable (); 874 } 875 } 876 877 /* Return true if return value of call STMT is know to be non-negative. 878 If the return value is based on the assumption that signed overflow is 879 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change 880 *STRICT_OVERFLOW_P.*/ 881 882 static bool 883 gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p) 884 { 885 tree arg0 = gimple_call_num_args (stmt) > 0 ? 886 gimple_call_arg (stmt, 0) : NULL_TREE; 887 tree arg1 = gimple_call_num_args (stmt) > 1 ? 888 gimple_call_arg (stmt, 1) : NULL_TREE; 889 890 return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt), 891 gimple_call_fndecl (stmt), 892 arg0, 893 arg1, 894 strict_overflow_p); 895 } 896 897 /* Return true if STMT is know to to compute a non-negative value. 898 If the return value is based on the assumption that signed overflow is 899 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change 900 *STRICT_OVERFLOW_P.*/ 901 902 static bool 903 gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p) 904 { 905 switch (gimple_code (stmt)) 906 { 907 case GIMPLE_ASSIGN: 908 return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p); 909 case GIMPLE_CALL: 910 return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p); 911 default: 912 gcc_unreachable (); 913 } 914 } 915 916 /* Return true if the result of assignment STMT is know to be non-zero. 917 If the return value is based on the assumption that signed overflow is 918 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change 919 *STRICT_OVERFLOW_P.*/ 920 921 static bool 922 gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p) 923 { 924 enum tree_code code = gimple_assign_rhs_code (stmt); 925 switch (get_gimple_rhs_class (code)) 926 { 927 case GIMPLE_UNARY_RHS: 928 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt), 929 gimple_expr_type (stmt), 930 gimple_assign_rhs1 (stmt), 931 strict_overflow_p); 932 case GIMPLE_BINARY_RHS: 933 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt), 934 gimple_expr_type (stmt), 935 gimple_assign_rhs1 (stmt), 936 gimple_assign_rhs2 (stmt), 937 strict_overflow_p); 938 case GIMPLE_SINGLE_RHS: 939 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt), 940 strict_overflow_p); 941 case GIMPLE_INVALID_RHS: 942 gcc_unreachable (); 943 default: 944 gcc_unreachable (); 945 } 946 } 947 948 /* Return true if STMT is know to to compute a non-zero value. 949 If the return value is based on the assumption that signed overflow is 950 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change 951 *STRICT_OVERFLOW_P.*/ 952 953 static bool 954 gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p) 955 { 956 switch (gimple_code (stmt)) 957 { 958 case GIMPLE_ASSIGN: 959 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p); 960 case GIMPLE_CALL: 961 return gimple_alloca_call_p (stmt); 962 default: 963 gcc_unreachable (); 964 } 965 } 966 967 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges 968 obtained so far. */ 969 970 static bool 971 vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p) 972 { 973 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p)) 974 return true; 975 976 /* If we have an expression of the form &X->a, then the expression 977 is nonnull if X is nonnull. */ 978 if (is_gimple_assign (stmt) 979 && gimple_assign_rhs_code (stmt) == ADDR_EXPR) 980 { 981 tree expr = gimple_assign_rhs1 (stmt); 982 tree base = get_base_address (TREE_OPERAND (expr, 0)); 983 984 if (base != NULL_TREE 985 && TREE_CODE (base) == INDIRECT_REF 986 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME) 987 { 988 value_range_t *vr = get_value_range (TREE_OPERAND (base, 0)); 989 if (range_is_nonnull (vr)) 990 return true; 991 } 992 } 993 994 return false; 995 } 996 997 /* Returns true if EXPR is a valid value (as expected by compare_values) -- 998 a gimple invariant, or SSA_NAME +- CST. */ 999 1000 static bool 1001 valid_value_p (tree expr) 1002 { 1003 if (TREE_CODE (expr) == SSA_NAME) 1004 return true; 1005 1006 if (TREE_CODE (expr) == PLUS_EXPR 1007 || TREE_CODE (expr) == MINUS_EXPR) 1008 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME 1009 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST); 1010 1011 return is_gimple_min_invariant (expr); 1012 } 1013 1014 /* Return 1015 1 if VAL < VAL2 1016 0 if !(VAL < VAL2) 1017 -2 if those are incomparable. */ 1018 static inline int 1019 operand_less_p (tree val, tree val2) 1020 { 1021 /* LT is folded faster than GE and others. Inline the common case. */ 1022 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST) 1023 { 1024 if (TYPE_UNSIGNED (TREE_TYPE (val))) 1025 return INT_CST_LT_UNSIGNED (val, val2); 1026 else 1027 { 1028 if (INT_CST_LT (val, val2)) 1029 return 1; 1030 } 1031 } 1032 else 1033 { 1034 tree tcmp; 1035 1036 fold_defer_overflow_warnings (); 1037 1038 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2); 1039 1040 fold_undefer_and_ignore_overflow_warnings (); 1041 1042 if (!tcmp 1043 || TREE_CODE (tcmp) != INTEGER_CST) 1044 return -2; 1045 1046 if (!integer_zerop (tcmp)) 1047 return 1; 1048 } 1049 1050 /* val >= val2, not considering overflow infinity. */ 1051 if (is_negative_overflow_infinity (val)) 1052 return is_negative_overflow_infinity (val2) ? 0 : 1; 1053 else if (is_positive_overflow_infinity (val2)) 1054 return is_positive_overflow_infinity (val) ? 0 : 1; 1055 1056 return 0; 1057 } 1058 1059 /* Compare two values VAL1 and VAL2. Return 1060 1061 -2 if VAL1 and VAL2 cannot be compared at compile-time, 1062 -1 if VAL1 < VAL2, 1063 0 if VAL1 == VAL2, 1064 +1 if VAL1 > VAL2, and 1065 +2 if VAL1 != VAL2 1066 1067 This is similar to tree_int_cst_compare but supports pointer values 1068 and values that cannot be compared at compile time. 1069 1070 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to 1071 true if the return value is only valid if we assume that signed 1072 overflow is undefined. */ 1073 1074 static int 1075 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p) 1076 { 1077 if (val1 == val2) 1078 return 0; 1079 1080 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or 1081 both integers. */ 1082 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1)) 1083 == POINTER_TYPE_P (TREE_TYPE (val2))); 1084 /* Convert the two values into the same type. This is needed because 1085 sizetype causes sign extension even for unsigned types. */ 1086 val2 = fold_convert (TREE_TYPE (val1), val2); 1087 STRIP_USELESS_TYPE_CONVERSION (val2); 1088 1089 if ((TREE_CODE (val1) == SSA_NAME 1090 || TREE_CODE (val1) == PLUS_EXPR 1091 || TREE_CODE (val1) == MINUS_EXPR) 1092 && (TREE_CODE (val2) == SSA_NAME 1093 || TREE_CODE (val2) == PLUS_EXPR 1094 || TREE_CODE (val2) == MINUS_EXPR)) 1095 { 1096 tree n1, c1, n2, c2; 1097 enum tree_code code1, code2; 1098 1099 /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME', 1100 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the 1101 same name, return -2. */ 1102 if (TREE_CODE (val1) == SSA_NAME) 1103 { 1104 code1 = SSA_NAME; 1105 n1 = val1; 1106 c1 = NULL_TREE; 1107 } 1108 else 1109 { 1110 code1 = TREE_CODE (val1); 1111 n1 = TREE_OPERAND (val1, 0); 1112 c1 = TREE_OPERAND (val1, 1); 1113 if (tree_int_cst_sgn (c1) == -1) 1114 { 1115 if (is_negative_overflow_infinity (c1)) 1116 return -2; 1117 c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1); 1118 if (!c1) 1119 return -2; 1120 code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR; 1121 } 1122 } 1123 1124 if (TREE_CODE (val2) == SSA_NAME) 1125 { 1126 code2 = SSA_NAME; 1127 n2 = val2; 1128 c2 = NULL_TREE; 1129 } 1130 else 1131 { 1132 code2 = TREE_CODE (val2); 1133 n2 = TREE_OPERAND (val2, 0); 1134 c2 = TREE_OPERAND (val2, 1); 1135 if (tree_int_cst_sgn (c2) == -1) 1136 { 1137 if (is_negative_overflow_infinity (c2)) 1138 return -2; 1139 c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2); 1140 if (!c2) 1141 return -2; 1142 code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR; 1143 } 1144 } 1145 1146 /* Both values must use the same name. */ 1147 if (n1 != n2) 1148 return -2; 1149 1150 if (code1 == SSA_NAME 1151 && code2 == SSA_NAME) 1152 /* NAME == NAME */ 1153 return 0; 1154 1155 /* If overflow is defined we cannot simplify more. */ 1156 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1))) 1157 return -2; 1158 1159 if (strict_overflow_p != NULL 1160 && (code1 == SSA_NAME || !TREE_NO_WARNING (val1)) 1161 && (code2 == SSA_NAME || !TREE_NO_WARNING (val2))) 1162 *strict_overflow_p = true; 1163 1164 if (code1 == SSA_NAME) 1165 { 1166 if (code2 == PLUS_EXPR) 1167 /* NAME < NAME + CST */ 1168 return -1; 1169 else if (code2 == MINUS_EXPR) 1170 /* NAME > NAME - CST */ 1171 return 1; 1172 } 1173 else if (code1 == PLUS_EXPR) 1174 { 1175 if (code2 == SSA_NAME) 1176 /* NAME + CST > NAME */ 1177 return 1; 1178 else if (code2 == PLUS_EXPR) 1179 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */ 1180 return compare_values_warnv (c1, c2, strict_overflow_p); 1181 else if (code2 == MINUS_EXPR) 1182 /* NAME + CST1 > NAME - CST2 */ 1183 return 1; 1184 } 1185 else if (code1 == MINUS_EXPR) 1186 { 1187 if (code2 == SSA_NAME) 1188 /* NAME - CST < NAME */ 1189 return -1; 1190 else if (code2 == PLUS_EXPR) 1191 /* NAME - CST1 < NAME + CST2 */ 1192 return -1; 1193 else if (code2 == MINUS_EXPR) 1194 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that 1195 C1 and C2 are swapped in the call to compare_values. */ 1196 return compare_values_warnv (c2, c1, strict_overflow_p); 1197 } 1198 1199 gcc_unreachable (); 1200 } 1201 1202 /* We cannot compare non-constants. */ 1203 if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)) 1204 return -2; 1205 1206 if (!POINTER_TYPE_P (TREE_TYPE (val1))) 1207 { 1208 /* We cannot compare overflowed values, except for overflow 1209 infinities. */ 1210 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2)) 1211 { 1212 if (strict_overflow_p != NULL) 1213 *strict_overflow_p = true; 1214 if (is_negative_overflow_infinity (val1)) 1215 return is_negative_overflow_infinity (val2) ? 0 : -1; 1216 else if (is_negative_overflow_infinity (val2)) 1217 return 1; 1218 else if (is_positive_overflow_infinity (val1)) 1219 return is_positive_overflow_infinity (val2) ? 0 : 1; 1220 else if (is_positive_overflow_infinity (val2)) 1221 return -1; 1222 return -2; 1223 } 1224 1225 return tree_int_cst_compare (val1, val2); 1226 } 1227 else 1228 { 1229 tree t; 1230 1231 /* First see if VAL1 and VAL2 are not the same. */ 1232 if (val1 == val2 || operand_equal_p (val1, val2, 0)) 1233 return 0; 1234 1235 /* If VAL1 is a lower address than VAL2, return -1. */ 1236 if (operand_less_p (val1, val2) == 1) 1237 return -1; 1238 1239 /* If VAL1 is a higher address than VAL2, return +1. */ 1240 if (operand_less_p (val2, val1) == 1) 1241 return 1; 1242 1243 /* If VAL1 is different than VAL2, return +2. 1244 For integer constants we either have already returned -1 or 1 1245 or they are equivalent. We still might succeed in proving 1246 something about non-trivial operands. */ 1247 if (TREE_CODE (val1) != INTEGER_CST 1248 || TREE_CODE (val2) != INTEGER_CST) 1249 { 1250 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2); 1251 if (t && integer_onep (t)) 1252 return 2; 1253 } 1254 1255 return -2; 1256 } 1257 } 1258 1259 /* Compare values like compare_values_warnv, but treat comparisons of 1260 nonconstants which rely on undefined overflow as incomparable. */ 1261 1262 static int 1263 compare_values (tree val1, tree val2) 1264 { 1265 bool sop; 1266 int ret; 1267 1268 sop = false; 1269 ret = compare_values_warnv (val1, val2, &sop); 1270 if (sop 1271 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))) 1272 ret = -2; 1273 return ret; 1274 } 1275 1276 1277 /* Return 1 if VAL is inside value range VR (VR->MIN <= VAL <= VR->MAX), 1278 0 if VAL is not inside VR, 1279 -2 if we cannot tell either way. 1280 1281 FIXME, the current semantics of this functions are a bit quirky 1282 when taken in the context of VRP. In here we do not care 1283 about VR's type. If VR is the anti-range ~[3, 5] the call 1284 value_inside_range (4, VR) will return 1. 1285 1286 This is counter-intuitive in a strict sense, but the callers 1287 currently expect this. They are calling the function 1288 merely to determine whether VR->MIN <= VAL <= VR->MAX. The 1289 callers are applying the VR_RANGE/VR_ANTI_RANGE semantics 1290 themselves. 1291 1292 This also applies to value_ranges_intersect_p and 1293 range_includes_zero_p. The semantics of VR_RANGE and 1294 VR_ANTI_RANGE should be encoded here, but that also means 1295 adapting the users of these functions to the new semantics. 1296 1297 Benchmark compile/20001226-1.c compilation time after changing this 1298 function. */ 1299 1300 static inline int 1301 value_inside_range (tree val, value_range_t * vr) 1302 { 1303 int cmp1, cmp2; 1304 1305 cmp1 = operand_less_p (val, vr->min); 1306 if (cmp1 == -2) 1307 return -2; 1308 if (cmp1 == 1) 1309 return 0; 1310 1311 cmp2 = operand_less_p (vr->max, val); 1312 if (cmp2 == -2) 1313 return -2; 1314 1315 return !cmp2; 1316 } 1317 1318 1319 /* Return true if value ranges VR0 and VR1 have a non-empty 1320 intersection. 1321 1322 Benchmark compile/20001226-1.c compilation time after changing this 1323 function. 1324 */ 1325 1326 static inline bool 1327 value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1) 1328 { 1329 /* The value ranges do not intersect if the maximum of the first range is 1330 less than the minimum of the second range or vice versa. 1331 When those relations are unknown, we can't do any better. */ 1332 if (operand_less_p (vr0->max, vr1->min) != 0) 1333 return false; 1334 if (operand_less_p (vr1->max, vr0->min) != 0) 1335 return false; 1336 return true; 1337 } 1338 1339 1340 /* Return true if VR includes the value zero, false otherwise. FIXME, 1341 currently this will return false for an anti-range like ~[-4, 3]. 1342 This will be wrong when the semantics of value_inside_range are 1343 modified (currently the users of this function expect these 1344 semantics). */ 1345 1346 static inline bool 1347 range_includes_zero_p (value_range_t *vr) 1348 { 1349 tree zero; 1350 1351 gcc_assert (vr->type != VR_UNDEFINED 1352 && vr->type != VR_VARYING 1353 && !symbolic_range_p (vr)); 1354 1355 zero = build_int_cst (TREE_TYPE (vr->min), 0); 1356 return (value_inside_range (zero, vr) == 1); 1357 } 1358 1359 /* Return true if T, an SSA_NAME, is known to be nonnegative. Return 1360 false otherwise or if no value range information is available. */ 1361 1362 bool 1363 ssa_name_nonnegative_p (const_tree t) 1364 { 1365 value_range_t *vr = get_value_range (t); 1366 1367 if (INTEGRAL_TYPE_P (t) 1368 && TYPE_UNSIGNED (t)) 1369 return true; 1370 1371 if (!vr) 1372 return false; 1373 1374 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range 1375 which would return a useful value should be encoded as a VR_RANGE. */ 1376 if (vr->type == VR_RANGE) 1377 { 1378 int result = compare_values (vr->min, integer_zero_node); 1379 1380 return (result == 0 || result == 1); 1381 } 1382 return false; 1383 } 1384 1385 /* If OP has a value range with a single constant value return that, 1386 otherwise return NULL_TREE. This returns OP itself if OP is a 1387 constant. */ 1388 1389 static tree 1390 op_with_constant_singleton_value_range (tree op) 1391 { 1392 value_range_t *vr; 1393 1394 if (is_gimple_min_invariant (op)) 1395 return op; 1396 1397 if (TREE_CODE (op) != SSA_NAME) 1398 return NULL_TREE; 1399 1400 vr = get_value_range (op); 1401 if (vr->type == VR_RANGE 1402 && operand_equal_p (vr->min, vr->max, 0) 1403 && is_gimple_min_invariant (vr->min)) 1404 return vr->min; 1405 1406 return NULL_TREE; 1407 } 1408 1409 1410 /* Extract value range information from an ASSERT_EXPR EXPR and store 1411 it in *VR_P. */ 1412 1413 static void 1414 extract_range_from_assert (value_range_t *vr_p, tree expr) 1415 { 1416 tree var, cond, limit, min, max, type; 1417 value_range_t *var_vr, *limit_vr; 1418 enum tree_code cond_code; 1419 1420 var = ASSERT_EXPR_VAR (expr); 1421 cond = ASSERT_EXPR_COND (expr); 1422 1423 gcc_assert (COMPARISON_CLASS_P (cond)); 1424 1425 /* Find VAR in the ASSERT_EXPR conditional. */ 1426 if (var == TREE_OPERAND (cond, 0) 1427 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR 1428 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR) 1429 { 1430 /* If the predicate is of the form VAR COMP LIMIT, then we just 1431 take LIMIT from the RHS and use the same comparison code. */ 1432 cond_code = TREE_CODE (cond); 1433 limit = TREE_OPERAND (cond, 1); 1434 cond = TREE_OPERAND (cond, 0); 1435 } 1436 else 1437 { 1438 /* If the predicate is of the form LIMIT COMP VAR, then we need 1439 to flip around the comparison code to create the proper range 1440 for VAR. */ 1441 cond_code = swap_tree_comparison (TREE_CODE (cond)); 1442 limit = TREE_OPERAND (cond, 0); 1443 cond = TREE_OPERAND (cond, 1); 1444 } 1445 1446 limit = avoid_overflow_infinity (limit); 1447 1448 type = TREE_TYPE (var); 1449 gcc_assert (limit != var); 1450 1451 /* For pointer arithmetic, we only keep track of pointer equality 1452 and inequality. */ 1453 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR) 1454 { 1455 set_value_range_to_varying (vr_p); 1456 return; 1457 } 1458 1459 /* If LIMIT is another SSA name and LIMIT has a range of its own, 1460 try to use LIMIT's range to avoid creating symbolic ranges 1461 unnecessarily. */ 1462 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL; 1463 1464 /* LIMIT's range is only interesting if it has any useful information. */ 1465 if (limit_vr 1466 && (limit_vr->type == VR_UNDEFINED 1467 || limit_vr->type == VR_VARYING 1468 || symbolic_range_p (limit_vr))) 1469 limit_vr = NULL; 1470 1471 /* Initially, the new range has the same set of equivalences of 1472 VAR's range. This will be revised before returning the final 1473 value. Since assertions may be chained via mutually exclusive 1474 predicates, we will need to trim the set of equivalences before 1475 we are done. */ 1476 gcc_assert (vr_p->equiv == NULL); 1477 add_equivalence (&vr_p->equiv, var); 1478 1479 /* Extract a new range based on the asserted comparison for VAR and 1480 LIMIT's value range. Notice that if LIMIT has an anti-range, we 1481 will only use it for equality comparisons (EQ_EXPR). For any 1482 other kind of assertion, we cannot derive a range from LIMIT's 1483 anti-range that can be used to describe the new range. For 1484 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10], 1485 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is 1486 no single range for x_2 that could describe LE_EXPR, so we might 1487 as well build the range [b_4, +INF] for it. 1488 One special case we handle is extracting a range from a 1489 range test encoded as (unsigned)var + CST <= limit. */ 1490 if (TREE_CODE (cond) == NOP_EXPR 1491 || TREE_CODE (cond) == PLUS_EXPR) 1492 { 1493 if (TREE_CODE (cond) == PLUS_EXPR) 1494 { 1495 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)), 1496 TREE_OPERAND (cond, 1)); 1497 max = int_const_binop (PLUS_EXPR, limit, min, 0); 1498 cond = TREE_OPERAND (cond, 0); 1499 } 1500 else 1501 { 1502 min = build_int_cst (TREE_TYPE (var), 0); 1503 max = limit; 1504 } 1505 1506 /* Make sure to not set TREE_OVERFLOW on the final type 1507 conversion. We are willingly interpreting large positive 1508 unsigned values as negative singed values here. */ 1509 min = force_fit_type_double (TREE_TYPE (var), TREE_INT_CST_LOW (min), 1510 TREE_INT_CST_HIGH (min), 0, false); 1511 max = force_fit_type_double (TREE_TYPE (var), TREE_INT_CST_LOW (max), 1512 TREE_INT_CST_HIGH (max), 0, false); 1513 1514 /* We can transform a max, min range to an anti-range or 1515 vice-versa. Use set_and_canonicalize_value_range which does 1516 this for us. */ 1517 if (cond_code == LE_EXPR) 1518 set_and_canonicalize_value_range (vr_p, VR_RANGE, 1519 min, max, vr_p->equiv); 1520 else if (cond_code == GT_EXPR) 1521 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE, 1522 min, max, vr_p->equiv); 1523 else 1524 gcc_unreachable (); 1525 } 1526 else if (cond_code == EQ_EXPR) 1527 { 1528 enum value_range_type range_type; 1529 1530 if (limit_vr) 1531 { 1532 range_type = limit_vr->type; 1533 min = limit_vr->min; 1534 max = limit_vr->max; 1535 } 1536 else 1537 { 1538 range_type = VR_RANGE; 1539 min = limit; 1540 max = limit; 1541 } 1542 1543 set_value_range (vr_p, range_type, min, max, vr_p->equiv); 1544 1545 /* When asserting the equality VAR == LIMIT and LIMIT is another 1546 SSA name, the new range will also inherit the equivalence set 1547 from LIMIT. */ 1548 if (TREE_CODE (limit) == SSA_NAME) 1549 add_equivalence (&vr_p->equiv, limit); 1550 } 1551 else if (cond_code == NE_EXPR) 1552 { 1553 /* As described above, when LIMIT's range is an anti-range and 1554 this assertion is an inequality (NE_EXPR), then we cannot 1555 derive anything from the anti-range. For instance, if 1556 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does 1557 not imply that VAR's range is [0, 0]. So, in the case of 1558 anti-ranges, we just assert the inequality using LIMIT and 1559 not its anti-range. 1560 1561 If LIMIT_VR is a range, we can only use it to build a new 1562 anti-range if LIMIT_VR is a single-valued range. For 1563 instance, if LIMIT_VR is [0, 1], the predicate 1564 VAR != [0, 1] does not mean that VAR's range is ~[0, 1]. 1565 Rather, it means that for value 0 VAR should be ~[0, 0] 1566 and for value 1, VAR should be ~[1, 1]. We cannot 1567 represent these ranges. 1568 1569 The only situation in which we can build a valid 1570 anti-range is when LIMIT_VR is a single-valued range 1571 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case, 1572 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */ 1573 if (limit_vr 1574 && limit_vr->type == VR_RANGE 1575 && compare_values (limit_vr->min, limit_vr->max) == 0) 1576 { 1577 min = limit_vr->min; 1578 max = limit_vr->max; 1579 } 1580 else 1581 { 1582 /* In any other case, we cannot use LIMIT's range to build a 1583 valid anti-range. */ 1584 min = max = limit; 1585 } 1586 1587 /* If MIN and MAX cover the whole range for their type, then 1588 just use the original LIMIT. */ 1589 if (INTEGRAL_TYPE_P (type) 1590 && vrp_val_is_min (min) 1591 && vrp_val_is_max (max)) 1592 min = max = limit; 1593 1594 set_value_range (vr_p, VR_ANTI_RANGE, min, max, vr_p->equiv); 1595 } 1596 else if (cond_code == LE_EXPR || cond_code == LT_EXPR) 1597 { 1598 min = TYPE_MIN_VALUE (type); 1599 1600 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE) 1601 max = limit; 1602 else 1603 { 1604 /* If LIMIT_VR is of the form [N1, N2], we need to build the 1605 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for 1606 LT_EXPR. */ 1607 max = limit_vr->max; 1608 } 1609 1610 /* If the maximum value forces us to be out of bounds, simply punt. 1611 It would be pointless to try and do anything more since this 1612 all should be optimized away above us. */ 1613 if ((cond_code == LT_EXPR 1614 && compare_values (max, min) == 0) 1615 || (CONSTANT_CLASS_P (max) && TREE_OVERFLOW (max))) 1616 set_value_range_to_varying (vr_p); 1617 else 1618 { 1619 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */ 1620 if (cond_code == LT_EXPR) 1621 { 1622 tree one = build_int_cst (TREE_TYPE (max), 1); 1623 max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max, one); 1624 if (EXPR_P (max)) 1625 TREE_NO_WARNING (max) = 1; 1626 } 1627 1628 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); 1629 } 1630 } 1631 else if (cond_code == GE_EXPR || cond_code == GT_EXPR) 1632 { 1633 max = TYPE_MAX_VALUE (type); 1634 1635 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE) 1636 min = limit; 1637 else 1638 { 1639 /* If LIMIT_VR is of the form [N1, N2], we need to build the 1640 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for 1641 GT_EXPR. */ 1642 min = limit_vr->min; 1643 } 1644 1645 /* If the minimum value forces us to be out of bounds, simply punt. 1646 It would be pointless to try and do anything more since this 1647 all should be optimized away above us. */ 1648 if ((cond_code == GT_EXPR 1649 && compare_values (min, max) == 0) 1650 || (CONSTANT_CLASS_P (min) && TREE_OVERFLOW (min))) 1651 set_value_range_to_varying (vr_p); 1652 else 1653 { 1654 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */ 1655 if (cond_code == GT_EXPR) 1656 { 1657 tree one = build_int_cst (TREE_TYPE (min), 1); 1658 min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min, one); 1659 if (EXPR_P (min)) 1660 TREE_NO_WARNING (min) = 1; 1661 } 1662 1663 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); 1664 } 1665 } 1666 else 1667 gcc_unreachable (); 1668 1669 /* If VAR already had a known range, it may happen that the new 1670 range we have computed and VAR's range are not compatible. For 1671 instance, 1672 1673 if (p_5 == NULL) 1674 p_6 = ASSERT_EXPR <p_5, p_5 == NULL>; 1675 x_7 = p_6->fld; 1676 p_8 = ASSERT_EXPR <p_6, p_6 != NULL>; 1677 1678 While the above comes from a faulty program, it will cause an ICE 1679 later because p_8 and p_6 will have incompatible ranges and at 1680 the same time will be considered equivalent. A similar situation 1681 would arise from 1682 1683 if (i_5 > 10) 1684 i_6 = ASSERT_EXPR <i_5, i_5 > 10>; 1685 if (i_5 < 5) 1686 i_7 = ASSERT_EXPR <i_6, i_6 < 5>; 1687 1688 Again i_6 and i_7 will have incompatible ranges. It would be 1689 pointless to try and do anything with i_7's range because 1690 anything dominated by 'if (i_5 < 5)' will be optimized away. 1691 Note, due to the wa in which simulation proceeds, the statement 1692 i_7 = ASSERT_EXPR <...> we would never be visited because the 1693 conditional 'if (i_5 < 5)' always evaluates to false. However, 1694 this extra check does not hurt and may protect against future 1695 changes to VRP that may get into a situation similar to the 1696 NULL pointer dereference example. 1697 1698 Note that these compatibility tests are only needed when dealing 1699 with ranges or a mix of range and anti-range. If VAR_VR and VR_P 1700 are both anti-ranges, they will always be compatible, because two 1701 anti-ranges will always have a non-empty intersection. */ 1702 1703 var_vr = get_value_range (var); 1704 1705 /* We may need to make adjustments when VR_P and VAR_VR are numeric 1706 ranges or anti-ranges. */ 1707 if (vr_p->type == VR_VARYING 1708 || vr_p->type == VR_UNDEFINED 1709 || var_vr->type == VR_VARYING 1710 || var_vr->type == VR_UNDEFINED 1711 || symbolic_range_p (vr_p) 1712 || symbolic_range_p (var_vr)) 1713 return; 1714 1715 if (var_vr->type == VR_RANGE && vr_p->type == VR_RANGE) 1716 { 1717 /* If the two ranges have a non-empty intersection, we can 1718 refine the resulting range. Since the assert expression 1719 creates an equivalency and at the same time it asserts a 1720 predicate, we can take the intersection of the two ranges to 1721 get better precision. */ 1722 if (value_ranges_intersect_p (var_vr, vr_p)) 1723 { 1724 /* Use the larger of the two minimums. */ 1725 if (compare_values (vr_p->min, var_vr->min) == -1) 1726 min = var_vr->min; 1727 else 1728 min = vr_p->min; 1729 1730 /* Use the smaller of the two maximums. */ 1731 if (compare_values (vr_p->max, var_vr->max) == 1) 1732 max = var_vr->max; 1733 else 1734 max = vr_p->max; 1735 1736 set_value_range (vr_p, vr_p->type, min, max, vr_p->equiv); 1737 } 1738 else 1739 { 1740 /* The two ranges do not intersect, set the new range to 1741 VARYING, because we will not be able to do anything 1742 meaningful with it. */ 1743 set_value_range_to_varying (vr_p); 1744 } 1745 } 1746 else if ((var_vr->type == VR_RANGE && vr_p->type == VR_ANTI_RANGE) 1747 || (var_vr->type == VR_ANTI_RANGE && vr_p->type == VR_RANGE)) 1748 { 1749 /* A range and an anti-range will cancel each other only if 1750 their ends are the same. For instance, in the example above, 1751 p_8's range ~[0, 0] and p_6's range [0, 0] are incompatible, 1752 so VR_P should be set to VR_VARYING. */ 1753 if (compare_values (var_vr->min, vr_p->min) == 0 1754 && compare_values (var_vr->max, vr_p->max) == 0) 1755 set_value_range_to_varying (vr_p); 1756 else 1757 { 1758 tree min, max, anti_min, anti_max, real_min, real_max; 1759 int cmp; 1760 1761 /* We want to compute the logical AND of the two ranges; 1762 there are three cases to consider. 1763 1764 1765 1. The VR_ANTI_RANGE range is completely within the 1766 VR_RANGE and the endpoints of the ranges are 1767 different. In that case the resulting range 1768 should be whichever range is more precise. 1769 Typically that will be the VR_RANGE. 1770 1771 2. The VR_ANTI_RANGE is completely disjoint from 1772 the VR_RANGE. In this case the resulting range 1773 should be the VR_RANGE. 1774 1775 3. There is some overlap between the VR_ANTI_RANGE 1776 and the VR_RANGE. 1777 1778 3a. If the high limit of the VR_ANTI_RANGE resides 1779 within the VR_RANGE, then the result is a new 1780 VR_RANGE starting at the high limit of the 1781 VR_ANTI_RANGE + 1 and extending to the 1782 high limit of the original VR_RANGE. 1783 1784 3b. If the low limit of the VR_ANTI_RANGE resides 1785 within the VR_RANGE, then the result is a new 1786 VR_RANGE starting at the low limit of the original 1787 VR_RANGE and extending to the low limit of the 1788 VR_ANTI_RANGE - 1. */ 1789 if (vr_p->type == VR_ANTI_RANGE) 1790 { 1791 anti_min = vr_p->min; 1792 anti_max = vr_p->max; 1793 real_min = var_vr->min; 1794 real_max = var_vr->max; 1795 } 1796 else 1797 { 1798 anti_min = var_vr->min; 1799 anti_max = var_vr->max; 1800 real_min = vr_p->min; 1801 real_max = vr_p->max; 1802 } 1803 1804 1805 /* Case 1, VR_ANTI_RANGE completely within VR_RANGE, 1806 not including any endpoints. */ 1807 if (compare_values (anti_max, real_max) == -1 1808 && compare_values (anti_min, real_min) == 1) 1809 { 1810 /* If the range is covering the whole valid range of 1811 the type keep the anti-range. */ 1812 if (!vrp_val_is_min (real_min) 1813 || !vrp_val_is_max (real_max)) 1814 set_value_range (vr_p, VR_RANGE, real_min, 1815 real_max, vr_p->equiv); 1816 } 1817 /* Case 2, VR_ANTI_RANGE completely disjoint from 1818 VR_RANGE. */ 1819 else if (compare_values (anti_min, real_max) == 1 1820 || compare_values (anti_max, real_min) == -1) 1821 { 1822 set_value_range (vr_p, VR_RANGE, real_min, 1823 real_max, vr_p->equiv); 1824 } 1825 /* Case 3a, the anti-range extends into the low 1826 part of the real range. Thus creating a new 1827 low for the real range. */ 1828 else if (((cmp = compare_values (anti_max, real_min)) == 1 1829 || cmp == 0) 1830 && compare_values (anti_max, real_max) == -1) 1831 { 1832 gcc_assert (!is_positive_overflow_infinity (anti_max)); 1833 if (needs_overflow_infinity (TREE_TYPE (anti_max)) 1834 && vrp_val_is_max (anti_max)) 1835 { 1836 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min))) 1837 { 1838 set_value_range_to_varying (vr_p); 1839 return; 1840 } 1841 min = positive_overflow_infinity (TREE_TYPE (var_vr->min)); 1842 } 1843 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min))) 1844 min = fold_build2 (PLUS_EXPR, TREE_TYPE (var_vr->min), 1845 anti_max, 1846 build_int_cst (TREE_TYPE (var_vr->min), 1)); 1847 else 1848 min = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (var_vr->min), 1849 anti_max, size_int (1)); 1850 max = real_max; 1851 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); 1852 } 1853 /* Case 3b, the anti-range extends into the high 1854 part of the real range. Thus creating a new 1855 higher for the real range. */ 1856 else if (compare_values (anti_min, real_min) == 1 1857 && ((cmp = compare_values (anti_min, real_max)) == -1 1858 || cmp == 0)) 1859 { 1860 gcc_assert (!is_negative_overflow_infinity (anti_min)); 1861 if (needs_overflow_infinity (TREE_TYPE (anti_min)) 1862 && vrp_val_is_min (anti_min)) 1863 { 1864 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min))) 1865 { 1866 set_value_range_to_varying (vr_p); 1867 return; 1868 } 1869 max = negative_overflow_infinity (TREE_TYPE (var_vr->min)); 1870 } 1871 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min))) 1872 max = fold_build2 (MINUS_EXPR, TREE_TYPE (var_vr->min), 1873 anti_min, 1874 build_int_cst (TREE_TYPE (var_vr->min), 1)); 1875 else 1876 max = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (var_vr->min), 1877 anti_min, 1878 size_int (-1)); 1879 min = real_min; 1880 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv); 1881 } 1882 } 1883 } 1884 } 1885 1886 1887 /* Extract range information from SSA name VAR and store it in VR. If 1888 VAR has an interesting range, use it. Otherwise, create the 1889 range [VAR, VAR] and return it. This is useful in situations where 1890 we may have conditionals testing values of VARYING names. For 1891 instance, 1892 1893 x_3 = y_5; 1894 if (x_3 > y_5) 1895 ... 1896 1897 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is 1898 always false. */ 1899 1900 static void 1901 extract_range_from_ssa_name (value_range_t *vr, tree var) 1902 { 1903 value_range_t *var_vr = get_value_range (var); 1904 1905 if (var_vr->type != VR_UNDEFINED && var_vr->type != VR_VARYING) 1906 copy_value_range (vr, var_vr); 1907 else 1908 set_value_range (vr, VR_RANGE, var, var, NULL); 1909 1910 add_equivalence (&vr->equiv, var); 1911 } 1912 1913 1914 /* Wrapper around int_const_binop. If the operation overflows and we 1915 are not using wrapping arithmetic, then adjust the result to be 1916 -INF or +INF depending on CODE, VAL1 and VAL2. This can return 1917 NULL_TREE if we need to use an overflow infinity representation but 1918 the type does not support it. */ 1919 1920 static tree 1921 vrp_int_const_binop (enum tree_code code, tree val1, tree val2) 1922 { 1923 tree res; 1924 1925 res = int_const_binop (code, val1, val2, 0); 1926 1927 /* If we are using unsigned arithmetic, operate symbolically 1928 on -INF and +INF as int_const_binop only handles signed overflow. */ 1929 if (TYPE_UNSIGNED (TREE_TYPE (val1))) 1930 { 1931 int checkz = compare_values (res, val1); 1932 bool overflow = false; 1933 1934 /* Ensure that res = val1 [+*] val2 >= val1 1935 or that res = val1 - val2 <= val1. */ 1936 if ((code == PLUS_EXPR 1937 && !(checkz == 1 || checkz == 0)) 1938 || (code == MINUS_EXPR 1939 && !(checkz == 0 || checkz == -1))) 1940 { 1941 overflow = true; 1942 } 1943 /* Checking for multiplication overflow is done by dividing the 1944 output of the multiplication by the first input of the 1945 multiplication. If the result of that division operation is 1946 not equal to the second input of the multiplication, then the 1947 multiplication overflowed. */ 1948 else if (code == MULT_EXPR && !integer_zerop (val1)) 1949 { 1950 tree tmp = int_const_binop (TRUNC_DIV_EXPR, 1951 res, 1952 val1, 0); 1953 int check = compare_values (tmp, val2); 1954 1955 if (check != 0) 1956 overflow = true; 1957 } 1958 1959 if (overflow) 1960 { 1961 res = copy_node (res); 1962 TREE_OVERFLOW (res) = 1; 1963 } 1964 1965 } 1966 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1))) 1967 /* If the singed operation wraps then int_const_binop has done 1968 everything we want. */ 1969 ; 1970 else if ((TREE_OVERFLOW (res) 1971 && !TREE_OVERFLOW (val1) 1972 && !TREE_OVERFLOW (val2)) 1973 || is_overflow_infinity (val1) 1974 || is_overflow_infinity (val2)) 1975 { 1976 /* If the operation overflowed but neither VAL1 nor VAL2 are 1977 overflown, return -INF or +INF depending on the operation 1978 and the combination of signs of the operands. */ 1979 int sgn1 = tree_int_cst_sgn (val1); 1980 int sgn2 = tree_int_cst_sgn (val2); 1981 1982 if (needs_overflow_infinity (TREE_TYPE (res)) 1983 && !supports_overflow_infinity (TREE_TYPE (res))) 1984 return NULL_TREE; 1985 1986 /* We have to punt on adding infinities of different signs, 1987 since we can't tell what the sign of the result should be. 1988 Likewise for subtracting infinities of the same sign. */ 1989 if (((code == PLUS_EXPR && sgn1 != sgn2) 1990 || (code == MINUS_EXPR && sgn1 == sgn2)) 1991 && is_overflow_infinity (val1) 1992 && is_overflow_infinity (val2)) 1993 return NULL_TREE; 1994 1995 /* Don't try to handle division or shifting of infinities. */ 1996 if ((code == TRUNC_DIV_EXPR 1997 || code == FLOOR_DIV_EXPR 1998 || code == CEIL_DIV_EXPR 1999 || code == EXACT_DIV_EXPR 2000 || code == ROUND_DIV_EXPR 2001 || code == RSHIFT_EXPR) 2002 && (is_overflow_infinity (val1) 2003 || is_overflow_infinity (val2))) 2004 return NULL_TREE; 2005 2006 /* Notice that we only need to handle the restricted set of 2007 operations handled by extract_range_from_binary_expr. 2008 Among them, only multiplication, addition and subtraction 2009 can yield overflow without overflown operands because we 2010 are working with integral types only... except in the 2011 case VAL1 = -INF and VAL2 = -1 which overflows to +INF 2012 for division too. */ 2013 2014 /* For multiplication, the sign of the overflow is given 2015 by the comparison of the signs of the operands. */ 2016 if ((code == MULT_EXPR && sgn1 == sgn2) 2017 /* For addition, the operands must be of the same sign 2018 to yield an overflow. Its sign is therefore that 2019 of one of the operands, for example the first. For 2020 infinite operands X + -INF is negative, not positive. */ 2021 || (code == PLUS_EXPR 2022 && (sgn1 >= 0 2023 ? !is_negative_overflow_infinity (val2) 2024 : is_positive_overflow_infinity (val2))) 2025 /* For subtraction, non-infinite operands must be of 2026 different signs to yield an overflow. Its sign is 2027 therefore that of the first operand or the opposite of 2028 that of the second operand. A first operand of 0 counts 2029 as positive here, for the corner case 0 - (-INF), which 2030 overflows, but must yield +INF. For infinite operands 0 2031 - INF is negative, not positive. */ 2032 || (code == MINUS_EXPR 2033 && (sgn1 >= 0 2034 ? !is_positive_overflow_infinity (val2) 2035 : is_negative_overflow_infinity (val2))) 2036 /* We only get in here with positive shift count, so the 2037 overflow direction is the same as the sign of val1. 2038 Actually rshift does not overflow at all, but we only 2039 handle the case of shifting overflowed -INF and +INF. */ 2040 || (code == RSHIFT_EXPR 2041 && sgn1 >= 0) 2042 /* For division, the only case is -INF / -1 = +INF. */ 2043 || code == TRUNC_DIV_EXPR 2044 || code == FLOOR_DIV_EXPR 2045 || code == CEIL_DIV_EXPR 2046 || code == EXACT_DIV_EXPR 2047 || code == ROUND_DIV_EXPR) 2048 return (needs_overflow_infinity (TREE_TYPE (res)) 2049 ? positive_overflow_infinity (TREE_TYPE (res)) 2050 : TYPE_MAX_VALUE (TREE_TYPE (res))); 2051 else 2052 return (needs_overflow_infinity (TREE_TYPE (res)) 2053 ? negative_overflow_infinity (TREE_TYPE (res)) 2054 : TYPE_MIN_VALUE (TREE_TYPE (res))); 2055 } 2056 2057 return res; 2058 } 2059 2060 2061 /* Extract range information from a binary expression EXPR based on 2062 the ranges of each of its operands and the expression code. */ 2063 2064 static void 2065 extract_range_from_binary_expr (value_range_t *vr, 2066 enum tree_code code, 2067 tree expr_type, tree op0, tree op1) 2068 { 2069 enum value_range_type type; 2070 tree min, max; 2071 int cmp; 2072 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; 2073 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; 2074 2075 /* Not all binary expressions can be applied to ranges in a 2076 meaningful way. Handle only arithmetic operations. */ 2077 if (code != PLUS_EXPR 2078 && code != MINUS_EXPR 2079 && code != POINTER_PLUS_EXPR 2080 && code != MULT_EXPR 2081 && code != TRUNC_DIV_EXPR 2082 && code != FLOOR_DIV_EXPR 2083 && code != CEIL_DIV_EXPR 2084 && code != EXACT_DIV_EXPR 2085 && code != ROUND_DIV_EXPR 2086 && code != TRUNC_MOD_EXPR 2087 && code != RSHIFT_EXPR 2088 && code != MIN_EXPR 2089 && code != MAX_EXPR 2090 && code != BIT_AND_EXPR 2091 && code != BIT_IOR_EXPR 2092 && code != TRUTH_AND_EXPR 2093 && code != TRUTH_OR_EXPR) 2094 { 2095 /* We can still do constant propagation here. */ 2096 tree const_op0 = op_with_constant_singleton_value_range (op0); 2097 tree const_op1 = op_with_constant_singleton_value_range (op1); 2098 if (const_op0 || const_op1) 2099 { 2100 tree tem = fold_binary (code, expr_type, 2101 const_op0 ? const_op0 : op0, 2102 const_op1 ? const_op1 : op1); 2103 if (tem 2104 && is_gimple_min_invariant (tem) 2105 && !is_overflow_infinity (tem)) 2106 { 2107 set_value_range (vr, VR_RANGE, tem, tem, NULL); 2108 return; 2109 } 2110 } 2111 set_value_range_to_varying (vr); 2112 return; 2113 } 2114 2115 /* Get value ranges for each operand. For constant operands, create 2116 a new value range with the operand to simplify processing. */ 2117 if (TREE_CODE (op0) == SSA_NAME) 2118 vr0 = *(get_value_range (op0)); 2119 else if (is_gimple_min_invariant (op0)) 2120 set_value_range_to_value (&vr0, op0, NULL); 2121 else 2122 set_value_range_to_varying (&vr0); 2123 2124 if (TREE_CODE (op1) == SSA_NAME) 2125 vr1 = *(get_value_range (op1)); 2126 else if (is_gimple_min_invariant (op1)) 2127 set_value_range_to_value (&vr1, op1, NULL); 2128 else 2129 set_value_range_to_varying (&vr1); 2130 2131 /* If either range is UNDEFINED, so is the result. */ 2132 if (vr0.type == VR_UNDEFINED || vr1.type == VR_UNDEFINED) 2133 { 2134 set_value_range_to_undefined (vr); 2135 return; 2136 } 2137 2138 /* The type of the resulting value range defaults to VR0.TYPE. */ 2139 type = vr0.type; 2140 2141 /* Refuse to operate on VARYING ranges, ranges of different kinds 2142 and symbolic ranges. As an exception, we allow BIT_AND_EXPR 2143 because we may be able to derive a useful range even if one of 2144 the operands is VR_VARYING or symbolic range. Similarly for 2145 divisions. TODO, we may be able to derive anti-ranges in 2146 some cases. */ 2147 if (code != BIT_AND_EXPR 2148 && code != TRUTH_AND_EXPR 2149 && code != TRUTH_OR_EXPR 2150 && code != TRUNC_DIV_EXPR 2151 && code != FLOOR_DIV_EXPR 2152 && code != CEIL_DIV_EXPR 2153 && code != EXACT_DIV_EXPR 2154 && code != ROUND_DIV_EXPR 2155 && code != TRUNC_MOD_EXPR 2156 && (vr0.type == VR_VARYING 2157 || vr1.type == VR_VARYING 2158 || vr0.type != vr1.type 2159 || symbolic_range_p (&vr0) 2160 || symbolic_range_p (&vr1))) 2161 { 2162 set_value_range_to_varying (vr); 2163 return; 2164 } 2165 2166 /* Now evaluate the expression to determine the new range. */ 2167 if (POINTER_TYPE_P (expr_type) 2168 || POINTER_TYPE_P (TREE_TYPE (op0)) 2169 || POINTER_TYPE_P (TREE_TYPE (op1))) 2170 { 2171 if (code == MIN_EXPR || code == MAX_EXPR) 2172 { 2173 /* For MIN/MAX expressions with pointers, we only care about 2174 nullness, if both are non null, then the result is nonnull. 2175 If both are null, then the result is null. Otherwise they 2176 are varying. */ 2177 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1)) 2178 set_value_range_to_nonnull (vr, expr_type); 2179 else if (range_is_null (&vr0) && range_is_null (&vr1)) 2180 set_value_range_to_null (vr, expr_type); 2181 else 2182 set_value_range_to_varying (vr); 2183 2184 return; 2185 } 2186 gcc_assert (code == POINTER_PLUS_EXPR); 2187 /* For pointer types, we are really only interested in asserting 2188 whether the expression evaluates to non-NULL. */ 2189 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1)) 2190 set_value_range_to_nonnull (vr, expr_type); 2191 else if (range_is_null (&vr0) && range_is_null (&vr1)) 2192 set_value_range_to_null (vr, expr_type); 2193 else 2194 set_value_range_to_varying (vr); 2195 2196 return; 2197 } 2198 2199 /* For integer ranges, apply the operation to each end of the 2200 range and see what we end up with. */ 2201 if (code == TRUTH_AND_EXPR 2202 || code == TRUTH_OR_EXPR) 2203 { 2204 /* If one of the operands is zero, we know that the whole 2205 expression evaluates zero. */ 2206 if (code == TRUTH_AND_EXPR 2207 && ((vr0.type == VR_RANGE 2208 && integer_zerop (vr0.min) 2209 && integer_zerop (vr0.max)) 2210 || (vr1.type == VR_RANGE 2211 && integer_zerop (vr1.min) 2212 && integer_zerop (vr1.max)))) 2213 { 2214 type = VR_RANGE; 2215 min = max = build_int_cst (expr_type, 0); 2216 } 2217 /* If one of the operands is one, we know that the whole 2218 expression evaluates one. */ 2219 else if (code == TRUTH_OR_EXPR 2220 && ((vr0.type == VR_RANGE 2221 && integer_onep (vr0.min) 2222 && integer_onep (vr0.max)) 2223 || (vr1.type == VR_RANGE 2224 && integer_onep (vr1.min) 2225 && integer_onep (vr1.max)))) 2226 { 2227 type = VR_RANGE; 2228 min = max = build_int_cst (expr_type, 1); 2229 } 2230 else if (vr0.type != VR_VARYING 2231 && vr1.type != VR_VARYING 2232 && vr0.type == vr1.type 2233 && !symbolic_range_p (&vr0) 2234 && !overflow_infinity_range_p (&vr0) 2235 && !symbolic_range_p (&vr1) 2236 && !overflow_infinity_range_p (&vr1)) 2237 { 2238 /* Boolean expressions cannot be folded with int_const_binop. */ 2239 min = fold_binary (code, expr_type, vr0.min, vr1.min); 2240 max = fold_binary (code, expr_type, vr0.max, vr1.max); 2241 } 2242 else 2243 { 2244 /* The result of a TRUTH_*_EXPR is always true or false. */ 2245 set_value_range_to_truthvalue (vr, expr_type); 2246 return; 2247 } 2248 } 2249 else if (code == PLUS_EXPR 2250 || code == MIN_EXPR 2251 || code == MAX_EXPR) 2252 { 2253 /* If we have a PLUS_EXPR with two VR_ANTI_RANGEs, drop to 2254 VR_VARYING. It would take more effort to compute a precise 2255 range for such a case. For example, if we have op0 == 1 and 2256 op1 == -1 with their ranges both being ~[0,0], we would have 2257 op0 + op1 == 0, so we cannot claim that the sum is in ~[0,0]. 2258 Note that we are guaranteed to have vr0.type == vr1.type at 2259 this point. */ 2260 if (vr0.type == VR_ANTI_RANGE) 2261 { 2262 if (code == PLUS_EXPR) 2263 { 2264 set_value_range_to_varying (vr); 2265 return; 2266 } 2267 /* For MIN_EXPR and MAX_EXPR with two VR_ANTI_RANGEs, 2268 the resulting VR_ANTI_RANGE is the same - intersection 2269 of the two ranges. */ 2270 min = vrp_int_const_binop (MAX_EXPR, vr0.min, vr1.min); 2271 max = vrp_int_const_binop (MIN_EXPR, vr0.max, vr1.max); 2272 } 2273 else 2274 { 2275 /* For operations that make the resulting range directly 2276 proportional to the original ranges, apply the operation to 2277 the same end of each range. */ 2278 min = vrp_int_const_binop (code, vr0.min, vr1.min); 2279 max = vrp_int_const_binop (code, vr0.max, vr1.max); 2280 } 2281 2282 /* If both additions overflowed the range kind is still correct. 2283 This happens regularly with subtracting something in unsigned 2284 arithmetic. 2285 ??? See PR30318 for all the cases we do not handle. */ 2286 if (code == PLUS_EXPR 2287 && (TREE_OVERFLOW (min) && !is_overflow_infinity (min)) 2288 && (TREE_OVERFLOW (max) && !is_overflow_infinity (max))) 2289 { 2290 min = build_int_cst_wide (TREE_TYPE (min), 2291 TREE_INT_CST_LOW (min), 2292 TREE_INT_CST_HIGH (min)); 2293 max = build_int_cst_wide (TREE_TYPE (max), 2294 TREE_INT_CST_LOW (max), 2295 TREE_INT_CST_HIGH (max)); 2296 } 2297 } 2298 else if (code == MULT_EXPR 2299 || code == TRUNC_DIV_EXPR 2300 || code == FLOOR_DIV_EXPR 2301 || code == CEIL_DIV_EXPR 2302 || code == EXACT_DIV_EXPR 2303 || code == ROUND_DIV_EXPR 2304 || code == RSHIFT_EXPR) 2305 { 2306 tree val[4]; 2307 size_t i; 2308 bool sop; 2309 2310 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs, 2311 drop to VR_VARYING. It would take more effort to compute a 2312 precise range for such a case. For example, if we have 2313 op0 == 65536 and op1 == 65536 with their ranges both being 2314 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so 2315 we cannot claim that the product is in ~[0,0]. Note that we 2316 are guaranteed to have vr0.type == vr1.type at this 2317 point. */ 2318 if (code == MULT_EXPR 2319 && vr0.type == VR_ANTI_RANGE 2320 && !TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0))) 2321 { 2322 set_value_range_to_varying (vr); 2323 return; 2324 } 2325 2326 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1], 2327 then drop to VR_VARYING. Outside of this range we get undefined 2328 behavior from the shift operation. We cannot even trust 2329 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl 2330 shifts, and the operation at the tree level may be widened. */ 2331 if (code == RSHIFT_EXPR) 2332 { 2333 if (vr1.type == VR_ANTI_RANGE 2334 || !vrp_expr_computes_nonnegative (op1, &sop) 2335 || (operand_less_p 2336 (build_int_cst (TREE_TYPE (vr1.max), 2337 TYPE_PRECISION (expr_type) - 1), 2338 vr1.max) != 0)) 2339 { 2340 set_value_range_to_varying (vr); 2341 return; 2342 } 2343 } 2344 2345 else if ((code == TRUNC_DIV_EXPR 2346 || code == FLOOR_DIV_EXPR 2347 || code == CEIL_DIV_EXPR 2348 || code == EXACT_DIV_EXPR 2349 || code == ROUND_DIV_EXPR) 2350 && (vr0.type != VR_RANGE || symbolic_range_p (&vr0))) 2351 { 2352 /* For division, if op1 has VR_RANGE but op0 does not, something 2353 can be deduced just from that range. Say [min, max] / [4, max] 2354 gives [min / 4, max / 4] range. */ 2355 if (vr1.type == VR_RANGE 2356 && !symbolic_range_p (&vr1) 2357 && !range_includes_zero_p (&vr1)) 2358 { 2359 vr0.type = type = VR_RANGE; 2360 vr0.min = vrp_val_min (TREE_TYPE (op0)); 2361 vr0.max = vrp_val_max (TREE_TYPE (op1)); 2362 } 2363 else 2364 { 2365 set_value_range_to_varying (vr); 2366 return; 2367 } 2368 } 2369 2370 /* For divisions, if op0 is VR_RANGE, we can deduce a range 2371 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can 2372 include 0. */ 2373 if ((code == TRUNC_DIV_EXPR 2374 || code == FLOOR_DIV_EXPR 2375 || code == CEIL_DIV_EXPR 2376 || code == EXACT_DIV_EXPR 2377 || code == ROUND_DIV_EXPR) 2378 && vr0.type == VR_RANGE 2379 && (vr1.type != VR_RANGE 2380 || symbolic_range_p (&vr1) 2381 || range_includes_zero_p (&vr1))) 2382 { 2383 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0); 2384 int cmp; 2385 2386 sop = false; 2387 min = NULL_TREE; 2388 max = NULL_TREE; 2389 if (vrp_expr_computes_nonnegative (op1, &sop) && !sop) 2390 { 2391 /* For unsigned division or when divisor is known 2392 to be non-negative, the range has to cover 2393 all numbers from 0 to max for positive max 2394 and all numbers from min to 0 for negative min. */ 2395 cmp = compare_values (vr0.max, zero); 2396 if (cmp == -1) 2397 max = zero; 2398 else if (cmp == 0 || cmp == 1) 2399 max = vr0.max; 2400 else 2401 type = VR_VARYING; 2402 cmp = compare_values (vr0.min, zero); 2403 if (cmp == 1) 2404 min = zero; 2405 else if (cmp == 0 || cmp == -1) 2406 min = vr0.min; 2407 else 2408 type = VR_VARYING; 2409 } 2410 else 2411 { 2412 /* Otherwise the range is -max .. max or min .. -min 2413 depending on which bound is bigger in absolute value, 2414 as the division can change the sign. */ 2415 abs_extent_range (vr, vr0.min, vr0.max); 2416 return; 2417 } 2418 if (type == VR_VARYING) 2419 { 2420 set_value_range_to_varying (vr); 2421 return; 2422 } 2423 } 2424 2425 /* Multiplications and divisions are a bit tricky to handle, 2426 depending on the mix of signs we have in the two ranges, we 2427 need to operate on different values to get the minimum and 2428 maximum values for the new range. One approach is to figure 2429 out all the variations of range combinations and do the 2430 operations. 2431 2432 However, this involves several calls to compare_values and it 2433 is pretty convoluted. It's simpler to do the 4 operations 2434 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP 2435 MAX1) and then figure the smallest and largest values to form 2436 the new range. */ 2437 else 2438 { 2439 gcc_assert ((vr0.type == VR_RANGE 2440 || (code == MULT_EXPR && vr0.type == VR_ANTI_RANGE)) 2441 && vr0.type == vr1.type); 2442 2443 /* Compute the 4 cross operations. */ 2444 sop = false; 2445 val[0] = vrp_int_const_binop (code, vr0.min, vr1.min); 2446 if (val[0] == NULL_TREE) 2447 sop = true; 2448 2449 if (vr1.max == vr1.min) 2450 val[1] = NULL_TREE; 2451 else 2452 { 2453 val[1] = vrp_int_const_binop (code, vr0.min, vr1.max); 2454 if (val[1] == NULL_TREE) 2455 sop = true; 2456 } 2457 2458 if (vr0.max == vr0.min) 2459 val[2] = NULL_TREE; 2460 else 2461 { 2462 val[2] = vrp_int_const_binop (code, vr0.max, vr1.min); 2463 if (val[2] == NULL_TREE) 2464 sop = true; 2465 } 2466 2467 if (vr0.min == vr0.max || vr1.min == vr1.max) 2468 val[3] = NULL_TREE; 2469 else 2470 { 2471 val[3] = vrp_int_const_binop (code, vr0.max, vr1.max); 2472 if (val[3] == NULL_TREE) 2473 sop = true; 2474 } 2475 2476 if (sop) 2477 { 2478 set_value_range_to_varying (vr); 2479 return; 2480 } 2481 2482 /* Set MIN to the minimum of VAL[i] and MAX to the maximum 2483 of VAL[i]. */ 2484 min = val[0]; 2485 max = val[0]; 2486 for (i = 1; i < 4; i++) 2487 { 2488 if (!is_gimple_min_invariant (min) 2489 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min)) 2490 || !is_gimple_min_invariant (max) 2491 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max))) 2492 break; 2493 2494 if (val[i]) 2495 { 2496 if (!is_gimple_min_invariant (val[i]) 2497 || (TREE_OVERFLOW (val[i]) 2498 && !is_overflow_infinity (val[i]))) 2499 { 2500 /* If we found an overflowed value, set MIN and MAX 2501 to it so that we set the resulting range to 2502 VARYING. */ 2503 min = max = val[i]; 2504 break; 2505 } 2506 2507 if (compare_values (val[i], min) == -1) 2508 min = val[i]; 2509 2510 if (compare_values (val[i], max) == 1) 2511 max = val[i]; 2512 } 2513 } 2514 } 2515 } 2516 else if (code == TRUNC_MOD_EXPR) 2517 { 2518 bool sop = false; 2519 if (vr1.type != VR_RANGE 2520 || symbolic_range_p (&vr1) 2521 || range_includes_zero_p (&vr1) 2522 || vrp_val_is_min (vr1.min)) 2523 { 2524 set_value_range_to_varying (vr); 2525 return; 2526 } 2527 type = VR_RANGE; 2528 /* Compute MAX <|vr1.min|, |vr1.max|> - 1. */ 2529 max = fold_unary_to_constant (ABS_EXPR, TREE_TYPE (vr1.min), vr1.min); 2530 if (tree_int_cst_lt (max, vr1.max)) 2531 max = vr1.max; 2532 max = int_const_binop (MINUS_EXPR, max, integer_one_node, 0); 2533 /* If the dividend is non-negative the modulus will be 2534 non-negative as well. */ 2535 if (TYPE_UNSIGNED (TREE_TYPE (max)) 2536 || (vrp_expr_computes_nonnegative (op0, &sop) && !sop)) 2537 min = build_int_cst (TREE_TYPE (max), 0); 2538 else 2539 min = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (max), max); 2540 } 2541 else if (code == MINUS_EXPR) 2542 { 2543 /* If we have a MINUS_EXPR with two VR_ANTI_RANGEs, drop to 2544 VR_VARYING. It would take more effort to compute a precise 2545 range for such a case. For example, if we have op0 == 1 and 2546 op1 == 1 with their ranges both being ~[0,0], we would have 2547 op0 - op1 == 0, so we cannot claim that the difference is in 2548 ~[0,0]. Note that we are guaranteed to have 2549 vr0.type == vr1.type at this point. */ 2550 if (vr0.type == VR_ANTI_RANGE) 2551 { 2552 set_value_range_to_varying (vr); 2553 return; 2554 } 2555 2556 /* For MINUS_EXPR, apply the operation to the opposite ends of 2557 each range. */ 2558 min = vrp_int_const_binop (code, vr0.min, vr1.max); 2559 max = vrp_int_const_binop (code, vr0.max, vr1.min); 2560 } 2561 else if (code == BIT_AND_EXPR) 2562 { 2563 bool vr0_int_cst_singleton_p, vr1_int_cst_singleton_p; 2564 2565 vr0_int_cst_singleton_p = range_int_cst_singleton_p (&vr0); 2566 vr1_int_cst_singleton_p = range_int_cst_singleton_p (&vr1); 2567 2568 if (vr0_int_cst_singleton_p && vr1_int_cst_singleton_p) 2569 min = max = int_const_binop (code, vr0.max, vr1.max, 0); 2570 else if (vr0_int_cst_singleton_p 2571 && tree_int_cst_sgn (vr0.max) >= 0) 2572 { 2573 min = build_int_cst (expr_type, 0); 2574 max = vr0.max; 2575 } 2576 else if (vr1_int_cst_singleton_p 2577 && tree_int_cst_sgn (vr1.max) >= 0) 2578 { 2579 type = VR_RANGE; 2580 min = build_int_cst (expr_type, 0); 2581 max = vr1.max; 2582 } 2583 else 2584 { 2585 set_value_range_to_varying (vr); 2586 return; 2587 } 2588 } 2589 else if (code == BIT_IOR_EXPR) 2590 { 2591 if (range_int_cst_p (&vr0) 2592 && range_int_cst_p (&vr1) 2593 && tree_int_cst_sgn (vr0.min) >= 0 2594 && tree_int_cst_sgn (vr1.min) >= 0) 2595 { 2596 double_int vr0_max = tree_to_double_int (vr0.max); 2597 double_int vr1_max = tree_to_double_int (vr1.max); 2598 double_int ior_max; 2599 2600 /* Set all bits to the right of the most significant one to 1. 2601 For example, [0, 4] | [4, 4] = [4, 7]. */ 2602 ior_max.low = vr0_max.low | vr1_max.low; 2603 ior_max.high = vr0_max.high | vr1_max.high; 2604 if (ior_max.high != 0) 2605 { 2606 ior_max.low = ~(unsigned HOST_WIDE_INT)0u; 2607 ior_max.high |= ((HOST_WIDE_INT) 1 2608 << floor_log2 (ior_max.high)) - 1; 2609 } 2610 else if (ior_max.low != 0) 2611 ior_max.low |= ((unsigned HOST_WIDE_INT) 1u 2612 << floor_log2 (ior_max.low)) - 1; 2613 2614 /* Both of these endpoints are conservative. */ 2615 min = vrp_int_const_binop (MAX_EXPR, vr0.min, vr1.min); 2616 max = double_int_to_tree (expr_type, ior_max); 2617 } 2618 else 2619 { 2620 set_value_range_to_varying (vr); 2621 return; 2622 } 2623 } 2624 else 2625 gcc_unreachable (); 2626 2627 /* If either MIN or MAX overflowed, then set the resulting range to 2628 VARYING. But we do accept an overflow infinity 2629 representation. */ 2630 if (min == NULL_TREE 2631 || !is_gimple_min_invariant (min) 2632 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min)) 2633 || max == NULL_TREE 2634 || !is_gimple_min_invariant (max) 2635 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max))) 2636 { 2637 set_value_range_to_varying (vr); 2638 return; 2639 } 2640 2641 /* We punt if: 2642 1) [-INF, +INF] 2643 2) [-INF, +-INF(OVF)] 2644 3) [+-INF(OVF), +INF] 2645 4) [+-INF(OVF), +-INF(OVF)] 2646 We learn nothing when we have INF and INF(OVF) on both sides. 2647 Note that we do accept [-INF, -INF] and [+INF, +INF] without 2648 overflow. */ 2649 if ((vrp_val_is_min (min) || is_overflow_infinity (min)) 2650 && (vrp_val_is_max (max) || is_overflow_infinity (max))) 2651 { 2652 set_value_range_to_varying (vr); 2653 return; 2654 } 2655 2656 cmp = compare_values (min, max); 2657 if (cmp == -2 || cmp == 1) 2658 { 2659 /* If the new range has its limits swapped around (MIN > MAX), 2660 then the operation caused one of them to wrap around, mark 2661 the new range VARYING. */ 2662 set_value_range_to_varying (vr); 2663 } 2664 else 2665 set_value_range (vr, type, min, max, NULL); 2666 } 2667 2668 2669 /* Extract range information from a unary expression EXPR based on 2670 the range of its operand and the expression code. */ 2671 2672 static void 2673 extract_range_from_unary_expr (value_range_t *vr, enum tree_code code, 2674 tree type, tree op0) 2675 { 2676 tree min, max; 2677 int cmp; 2678 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; 2679 2680 /* Refuse to operate on certain unary expressions for which we 2681 cannot easily determine a resulting range. */ 2682 if (code == FIX_TRUNC_EXPR 2683 || code == FLOAT_EXPR 2684 || code == BIT_NOT_EXPR 2685 || code == CONJ_EXPR) 2686 { 2687 /* We can still do constant propagation here. */ 2688 if ((op0 = op_with_constant_singleton_value_range (op0)) != NULL_TREE) 2689 { 2690 tree tem = fold_unary (code, type, op0); 2691 if (tem 2692 && is_gimple_min_invariant (tem) 2693 && !is_overflow_infinity (tem)) 2694 { 2695 set_value_range (vr, VR_RANGE, tem, tem, NULL); 2696 return; 2697 } 2698 } 2699 set_value_range_to_varying (vr); 2700 return; 2701 } 2702 2703 /* Get value ranges for the operand. For constant operands, create 2704 a new value range with the operand to simplify processing. */ 2705 if (TREE_CODE (op0) == SSA_NAME) 2706 vr0 = *(get_value_range (op0)); 2707 else if (is_gimple_min_invariant (op0)) 2708 set_value_range_to_value (&vr0, op0, NULL); 2709 else 2710 set_value_range_to_varying (&vr0); 2711 2712 /* If VR0 is UNDEFINED, so is the result. */ 2713 if (vr0.type == VR_UNDEFINED) 2714 { 2715 set_value_range_to_undefined (vr); 2716 return; 2717 } 2718 2719 /* Refuse to operate on symbolic ranges, or if neither operand is 2720 a pointer or integral type. */ 2721 if ((!INTEGRAL_TYPE_P (TREE_TYPE (op0)) 2722 && !POINTER_TYPE_P (TREE_TYPE (op0))) 2723 || (vr0.type != VR_VARYING 2724 && symbolic_range_p (&vr0))) 2725 { 2726 set_value_range_to_varying (vr); 2727 return; 2728 } 2729 2730 /* If the expression involves pointers, we are only interested in 2731 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */ 2732 if (POINTER_TYPE_P (type) || POINTER_TYPE_P (TREE_TYPE (op0))) 2733 { 2734 bool sop; 2735 2736 sop = false; 2737 if (range_is_nonnull (&vr0) 2738 || (tree_unary_nonzero_warnv_p (code, type, op0, &sop) 2739 && !sop)) 2740 set_value_range_to_nonnull (vr, type); 2741 else if (range_is_null (&vr0)) 2742 set_value_range_to_null (vr, type); 2743 else 2744 set_value_range_to_varying (vr); 2745 2746 return; 2747 } 2748 2749 /* Handle unary expressions on integer ranges. */ 2750 if (CONVERT_EXPR_CODE_P (code) 2751 && INTEGRAL_TYPE_P (type) 2752 && INTEGRAL_TYPE_P (TREE_TYPE (op0))) 2753 { 2754 tree inner_type = TREE_TYPE (op0); 2755 tree outer_type = type; 2756 2757 /* If VR0 is varying and we increase the type precision, assume 2758 a full range for the following transformation. */ 2759 if (vr0.type == VR_VARYING 2760 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type)) 2761 { 2762 vr0.type = VR_RANGE; 2763 vr0.min = TYPE_MIN_VALUE (inner_type); 2764 vr0.max = TYPE_MAX_VALUE (inner_type); 2765 } 2766 2767 /* If VR0 is a constant range or anti-range and the conversion is 2768 not truncating we can convert the min and max values and 2769 canonicalize the resulting range. Otherwise we can do the 2770 conversion if the size of the range is less than what the 2771 precision of the target type can represent and the range is 2772 not an anti-range. */ 2773 if ((vr0.type == VR_RANGE 2774 || vr0.type == VR_ANTI_RANGE) 2775 && TREE_CODE (vr0.min) == INTEGER_CST 2776 && TREE_CODE (vr0.max) == INTEGER_CST 2777 && (!is_overflow_infinity (vr0.min) 2778 || (vr0.type == VR_RANGE 2779 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type) 2780 && needs_overflow_infinity (outer_type) 2781 && supports_overflow_infinity (outer_type))) 2782 && (!is_overflow_infinity (vr0.max) 2783 || (vr0.type == VR_RANGE 2784 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type) 2785 && needs_overflow_infinity (outer_type) 2786 && supports_overflow_infinity (outer_type))) 2787 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type) 2788 || (vr0.type == VR_RANGE 2789 && integer_zerop (int_const_binop (RSHIFT_EXPR, 2790 int_const_binop (MINUS_EXPR, vr0.max, vr0.min, 0), 2791 size_int (TYPE_PRECISION (outer_type)), 0))))) 2792 { 2793 tree new_min, new_max; 2794 new_min = force_fit_type_double (outer_type, 2795 TREE_INT_CST_LOW (vr0.min), 2796 TREE_INT_CST_HIGH (vr0.min), 0, 0); 2797 new_max = force_fit_type_double (outer_type, 2798 TREE_INT_CST_LOW (vr0.max), 2799 TREE_INT_CST_HIGH (vr0.max), 0, 0); 2800 if (is_overflow_infinity (vr0.min)) 2801 new_min = negative_overflow_infinity (outer_type); 2802 if (is_overflow_infinity (vr0.max)) 2803 new_max = positive_overflow_infinity (outer_type); 2804 set_and_canonicalize_value_range (vr, vr0.type, 2805 new_min, new_max, NULL); 2806 return; 2807 } 2808 2809 set_value_range_to_varying (vr); 2810 return; 2811 } 2812 2813 /* Conversion of a VR_VARYING value to a wider type can result 2814 in a usable range. So wait until after we've handled conversions 2815 before dropping the result to VR_VARYING if we had a source 2816 operand that is VR_VARYING. */ 2817 if (vr0.type == VR_VARYING) 2818 { 2819 set_value_range_to_varying (vr); 2820 return; 2821 } 2822 2823 /* Apply the operation to each end of the range and see what we end 2824 up with. */ 2825 if (code == NEGATE_EXPR 2826 && !TYPE_UNSIGNED (type)) 2827 { 2828 /* NEGATE_EXPR flips the range around. We need to treat 2829 TYPE_MIN_VALUE specially. */ 2830 if (is_positive_overflow_infinity (vr0.max)) 2831 min = negative_overflow_infinity (type); 2832 else if (is_negative_overflow_infinity (vr0.max)) 2833 min = positive_overflow_infinity (type); 2834 else if (!vrp_val_is_min (vr0.max)) 2835 min = fold_unary_to_constant (code, type, vr0.max); 2836 else if (needs_overflow_infinity (type)) 2837 { 2838 if (supports_overflow_infinity (type) 2839 && !is_overflow_infinity (vr0.min) 2840 && !vrp_val_is_min (vr0.min)) 2841 min = positive_overflow_infinity (type); 2842 else 2843 { 2844 set_value_range_to_varying (vr); 2845 return; 2846 } 2847 } 2848 else 2849 min = TYPE_MIN_VALUE (type); 2850 2851 if (is_positive_overflow_infinity (vr0.min)) 2852 max = negative_overflow_infinity (type); 2853 else if (is_negative_overflow_infinity (vr0.min)) 2854 max = positive_overflow_infinity (type); 2855 else if (!vrp_val_is_min (vr0.min)) 2856 max = fold_unary_to_constant (code, type, vr0.min); 2857 else if (needs_overflow_infinity (type)) 2858 { 2859 if (supports_overflow_infinity (type)) 2860 max = positive_overflow_infinity (type); 2861 else 2862 { 2863 set_value_range_to_varying (vr); 2864 return; 2865 } 2866 } 2867 else 2868 max = TYPE_MIN_VALUE (type); 2869 } 2870 else if (code == NEGATE_EXPR 2871 && TYPE_UNSIGNED (type)) 2872 { 2873 if (!range_includes_zero_p (&vr0)) 2874 { 2875 max = fold_unary_to_constant (code, type, vr0.min); 2876 min = fold_unary_to_constant (code, type, vr0.max); 2877 } 2878 else 2879 { 2880 if (range_is_null (&vr0)) 2881 set_value_range_to_null (vr, type); 2882 else 2883 set_value_range_to_varying (vr); 2884 return; 2885 } 2886 } 2887 else if (code == ABS_EXPR 2888 && !TYPE_UNSIGNED (type)) 2889 { 2890 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a 2891 useful range. */ 2892 if (!TYPE_OVERFLOW_UNDEFINED (type) 2893 && ((vr0.type == VR_RANGE 2894 && vrp_val_is_min (vr0.min)) 2895 || (vr0.type == VR_ANTI_RANGE 2896 && !vrp_val_is_min (vr0.min) 2897 && !range_includes_zero_p (&vr0)))) 2898 { 2899 set_value_range_to_varying (vr); 2900 return; 2901 } 2902 2903 /* ABS_EXPR may flip the range around, if the original range 2904 included negative values. */ 2905 if (is_overflow_infinity (vr0.min)) 2906 min = positive_overflow_infinity (type); 2907 else if (!vrp_val_is_min (vr0.min)) 2908 min = fold_unary_to_constant (code, type, vr0.min); 2909 else if (!needs_overflow_infinity (type)) 2910 min = TYPE_MAX_VALUE (type); 2911 else if (supports_overflow_infinity (type)) 2912 min = positive_overflow_infinity (type); 2913 else 2914 { 2915 set_value_range_to_varying (vr); 2916 return; 2917 } 2918 2919 if (is_overflow_infinity (vr0.max)) 2920 max = positive_overflow_infinity (type); 2921 else if (!vrp_val_is_min (vr0.max)) 2922 max = fold_unary_to_constant (code, type, vr0.max); 2923 else if (!needs_overflow_infinity (type)) 2924 max = TYPE_MAX_VALUE (type); 2925 else if (supports_overflow_infinity (type) 2926 /* We shouldn't generate [+INF, +INF] as set_value_range 2927 doesn't like this and ICEs. */ 2928 && !is_positive_overflow_infinity (min)) 2929 max = positive_overflow_infinity (type); 2930 else 2931 { 2932 set_value_range_to_varying (vr); 2933 return; 2934 } 2935 2936 cmp = compare_values (min, max); 2937 2938 /* If a VR_ANTI_RANGEs contains zero, then we have 2939 ~[-INF, min(MIN, MAX)]. */ 2940 if (vr0.type == VR_ANTI_RANGE) 2941 { 2942 if (range_includes_zero_p (&vr0)) 2943 { 2944 /* Take the lower of the two values. */ 2945 if (cmp != 1) 2946 max = min; 2947 2948 /* Create ~[-INF, min (abs(MIN), abs(MAX))] 2949 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when 2950 flag_wrapv is set and the original anti-range doesn't include 2951 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */ 2952 if (TYPE_OVERFLOW_WRAPS (type)) 2953 { 2954 tree type_min_value = TYPE_MIN_VALUE (type); 2955 2956 min = (vr0.min != type_min_value 2957 ? int_const_binop (PLUS_EXPR, type_min_value, 2958 integer_one_node, 0) 2959 : type_min_value); 2960 } 2961 else 2962 { 2963 if (overflow_infinity_range_p (&vr0)) 2964 min = negative_overflow_infinity (type); 2965 else 2966 min = TYPE_MIN_VALUE (type); 2967 } 2968 } 2969 else 2970 { 2971 /* All else has failed, so create the range [0, INF], even for 2972 flag_wrapv since TYPE_MIN_VALUE is in the original 2973 anti-range. */ 2974 vr0.type = VR_RANGE; 2975 min = build_int_cst (type, 0); 2976 if (needs_overflow_infinity (type)) 2977 { 2978 if (supports_overflow_infinity (type)) 2979 max = positive_overflow_infinity (type); 2980 else 2981 { 2982 set_value_range_to_varying (vr); 2983 return; 2984 } 2985 } 2986 else 2987 max = TYPE_MAX_VALUE (type); 2988 } 2989 } 2990 2991 /* If the range contains zero then we know that the minimum value in the 2992 range will be zero. */ 2993 else if (range_includes_zero_p (&vr0)) 2994 { 2995 if (cmp == 1) 2996 max = min; 2997 min = build_int_cst (type, 0); 2998 } 2999 else 3000 { 3001 /* If the range was reversed, swap MIN and MAX. */ 3002 if (cmp == 1) 3003 { 3004 tree t = min; 3005 min = max; 3006 max = t; 3007 } 3008 } 3009 } 3010 else 3011 { 3012 /* Otherwise, operate on each end of the range. */ 3013 min = fold_unary_to_constant (code, type, vr0.min); 3014 max = fold_unary_to_constant (code, type, vr0.max); 3015 3016 if (needs_overflow_infinity (type)) 3017 { 3018 gcc_assert (code != NEGATE_EXPR && code != ABS_EXPR); 3019 3020 /* If both sides have overflowed, we don't know 3021 anything. */ 3022 if ((is_overflow_infinity (vr0.min) 3023 || TREE_OVERFLOW (min)) 3024 && (is_overflow_infinity (vr0.max) 3025 || TREE_OVERFLOW (max))) 3026 { 3027 set_value_range_to_varying (vr); 3028 return; 3029 } 3030 3031 if (is_overflow_infinity (vr0.min)) 3032 min = vr0.min; 3033 else if (TREE_OVERFLOW (min)) 3034 { 3035 if (supports_overflow_infinity (type)) 3036 min = (tree_int_cst_sgn (min) >= 0 3037 ? positive_overflow_infinity (TREE_TYPE (min)) 3038 : negative_overflow_infinity (TREE_TYPE (min))); 3039 else 3040 { 3041 set_value_range_to_varying (vr); 3042 return; 3043 } 3044 } 3045 3046 if (is_overflow_infinity (vr0.max)) 3047 max = vr0.max; 3048 else if (TREE_OVERFLOW (max)) 3049 { 3050 if (supports_overflow_infinity (type)) 3051 max = (tree_int_cst_sgn (max) >= 0 3052 ? positive_overflow_infinity (TREE_TYPE (max)) 3053 : negative_overflow_infinity (TREE_TYPE (max))); 3054 else 3055 { 3056 set_value_range_to_varying (vr); 3057 return; 3058 } 3059 } 3060 } 3061 } 3062 3063 cmp = compare_values (min, max); 3064 if (cmp == -2 || cmp == 1) 3065 { 3066 /* If the new range has its limits swapped around (MIN > MAX), 3067 then the operation caused one of them to wrap around, mark 3068 the new range VARYING. */ 3069 set_value_range_to_varying (vr); 3070 } 3071 else 3072 set_value_range (vr, vr0.type, min, max, NULL); 3073 } 3074 3075 3076 /* Extract range information from a conditional expression EXPR based on 3077 the ranges of each of its operands and the expression code. */ 3078 3079 static void 3080 extract_range_from_cond_expr (value_range_t *vr, tree expr) 3081 { 3082 tree op0, op1; 3083 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; 3084 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; 3085 3086 /* Get value ranges for each operand. For constant operands, create 3087 a new value range with the operand to simplify processing. */ 3088 op0 = COND_EXPR_THEN (expr); 3089 if (TREE_CODE (op0) == SSA_NAME) 3090 vr0 = *(get_value_range (op0)); 3091 else if (is_gimple_min_invariant (op0)) 3092 set_value_range_to_value (&vr0, op0, NULL); 3093 else 3094 set_value_range_to_varying (&vr0); 3095 3096 op1 = COND_EXPR_ELSE (expr); 3097 if (TREE_CODE (op1) == SSA_NAME) 3098 vr1 = *(get_value_range (op1)); 3099 else if (is_gimple_min_invariant (op1)) 3100 set_value_range_to_value (&vr1, op1, NULL); 3101 else 3102 set_value_range_to_varying (&vr1); 3103 3104 /* The resulting value range is the union of the operand ranges */ 3105 vrp_meet (&vr0, &vr1); 3106 copy_value_range (vr, &vr0); 3107 } 3108 3109 3110 /* Extract range information from a comparison expression EXPR based 3111 on the range of its operand and the expression code. */ 3112 3113 static void 3114 extract_range_from_comparison (value_range_t *vr, enum tree_code code, 3115 tree type, tree op0, tree op1) 3116 { 3117 bool sop = false; 3118 tree val; 3119 3120 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop, 3121 NULL); 3122 3123 /* A disadvantage of using a special infinity as an overflow 3124 representation is that we lose the ability to record overflow 3125 when we don't have an infinity. So we have to ignore a result 3126 which relies on overflow. */ 3127 3128 if (val && !is_overflow_infinity (val) && !sop) 3129 { 3130 /* Since this expression was found on the RHS of an assignment, 3131 its type may be different from _Bool. Convert VAL to EXPR's 3132 type. */ 3133 val = fold_convert (type, val); 3134 if (is_gimple_min_invariant (val)) 3135 set_value_range_to_value (vr, val, vr->equiv); 3136 else 3137 set_value_range (vr, VR_RANGE, val, val, vr->equiv); 3138 } 3139 else 3140 /* The result of a comparison is always true or false. */ 3141 set_value_range_to_truthvalue (vr, type); 3142 } 3143 3144 /* Try to derive a nonnegative or nonzero range out of STMT relying 3145 primarily on generic routines in fold in conjunction with range data. 3146 Store the result in *VR */ 3147 3148 static void 3149 extract_range_basic (value_range_t *vr, gimple stmt) 3150 { 3151 bool sop = false; 3152 tree type = gimple_expr_type (stmt); 3153 3154 if (INTEGRAL_TYPE_P (type) 3155 && gimple_stmt_nonnegative_warnv_p (stmt, &sop)) 3156 set_value_range_to_nonnegative (vr, type, 3157 sop || stmt_overflow_infinity (stmt)); 3158 else if (vrp_stmt_computes_nonzero (stmt, &sop) 3159 && !sop) 3160 set_value_range_to_nonnull (vr, type); 3161 else 3162 set_value_range_to_varying (vr); 3163 } 3164 3165 3166 /* Try to compute a useful range out of assignment STMT and store it 3167 in *VR. */ 3168 3169 static void 3170 extract_range_from_assignment (value_range_t *vr, gimple stmt) 3171 { 3172 enum tree_code code = gimple_assign_rhs_code (stmt); 3173 3174 if (code == ASSERT_EXPR) 3175 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt)); 3176 else if (code == SSA_NAME) 3177 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt)); 3178 else if (TREE_CODE_CLASS (code) == tcc_binary 3179 || code == TRUTH_AND_EXPR 3180 || code == TRUTH_OR_EXPR 3181 || code == TRUTH_XOR_EXPR) 3182 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt), 3183 gimple_expr_type (stmt), 3184 gimple_assign_rhs1 (stmt), 3185 gimple_assign_rhs2 (stmt)); 3186 else if (TREE_CODE_CLASS (code) == tcc_unary) 3187 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt), 3188 gimple_expr_type (stmt), 3189 gimple_assign_rhs1 (stmt)); 3190 else if (code == COND_EXPR) 3191 extract_range_from_cond_expr (vr, gimple_assign_rhs1 (stmt)); 3192 else if (TREE_CODE_CLASS (code) == tcc_comparison) 3193 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt), 3194 gimple_expr_type (stmt), 3195 gimple_assign_rhs1 (stmt), 3196 gimple_assign_rhs2 (stmt)); 3197 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS 3198 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt))) 3199 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL); 3200 else 3201 set_value_range_to_varying (vr); 3202 3203 if (vr->type == VR_VARYING) 3204 extract_range_basic (vr, stmt); 3205 } 3206 3207 /* Given a range VR, a LOOP and a variable VAR, determine whether it 3208 would be profitable to adjust VR using scalar evolution information 3209 for VAR. If so, update VR with the new limits. */ 3210 3211 static void 3212 adjust_range_with_scev (value_range_t *vr, struct loop *loop, 3213 gimple stmt, tree var) 3214 { 3215 tree init, step, chrec, tmin, tmax, min, max, type, tem; 3216 enum ev_direction dir; 3217 3218 /* TODO. Don't adjust anti-ranges. An anti-range may provide 3219 better opportunities than a regular range, but I'm not sure. */ 3220 if (vr->type == VR_ANTI_RANGE) 3221 return; 3222 3223 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var)); 3224 3225 /* Like in PR19590, scev can return a constant function. */ 3226 if (is_gimple_min_invariant (chrec)) 3227 { 3228 set_value_range_to_value (vr, chrec, vr->equiv); 3229 return; 3230 } 3231 3232 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC) 3233 return; 3234 3235 init = initial_condition_in_loop_num (chrec, loop->num); 3236 tem = op_with_constant_singleton_value_range (init); 3237 if (tem) 3238 init = tem; 3239 step = evolution_part_in_loop_num (chrec, loop->num); 3240 tem = op_with_constant_singleton_value_range (step); 3241 if (tem) 3242 step = tem; 3243 3244 /* If STEP is symbolic, we can't know whether INIT will be the 3245 minimum or maximum value in the range. Also, unless INIT is 3246 a simple expression, compare_values and possibly other functions 3247 in tree-vrp won't be able to handle it. */ 3248 if (step == NULL_TREE 3249 || !is_gimple_min_invariant (step) 3250 || !valid_value_p (init)) 3251 return; 3252 3253 dir = scev_direction (chrec); 3254 if (/* Do not adjust ranges if we do not know whether the iv increases 3255 or decreases, ... */ 3256 dir == EV_DIR_UNKNOWN 3257 /* ... or if it may wrap. */ 3258 || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec), 3259 true)) 3260 return; 3261 3262 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of 3263 negative_overflow_infinity and positive_overflow_infinity, 3264 because we have concluded that the loop probably does not 3265 wrap. */ 3266 3267 type = TREE_TYPE (var); 3268 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type)) 3269 tmin = lower_bound_in_type (type, type); 3270 else 3271 tmin = TYPE_MIN_VALUE (type); 3272 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type)) 3273 tmax = upper_bound_in_type (type, type); 3274 else 3275 tmax = TYPE_MAX_VALUE (type); 3276 3277 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED) 3278 { 3279 min = tmin; 3280 max = tmax; 3281 3282 /* For VARYING or UNDEFINED ranges, just about anything we get 3283 from scalar evolutions should be better. */ 3284 3285 if (dir == EV_DIR_DECREASES) 3286 max = init; 3287 else 3288 min = init; 3289 3290 /* If we would create an invalid range, then just assume we 3291 know absolutely nothing. This may be over-conservative, 3292 but it's clearly safe, and should happen only in unreachable 3293 parts of code, or for invalid programs. */ 3294 if (compare_values (min, max) == 1) 3295 return; 3296 3297 set_value_range (vr, VR_RANGE, min, max, vr->equiv); 3298 } 3299 else if (vr->type == VR_RANGE) 3300 { 3301 min = vr->min; 3302 max = vr->max; 3303 3304 if (dir == EV_DIR_DECREASES) 3305 { 3306 /* INIT is the maximum value. If INIT is lower than VR->MAX 3307 but no smaller than VR->MIN, set VR->MAX to INIT. */ 3308 if (compare_values (init, max) == -1) 3309 { 3310 max = init; 3311 3312 /* If we just created an invalid range with the minimum 3313 greater than the maximum, we fail conservatively. 3314 This should happen only in unreachable 3315 parts of code, or for invalid programs. */ 3316 if (compare_values (min, max) == 1) 3317 return; 3318 } 3319 3320 /* According to the loop information, the variable does not 3321 overflow. If we think it does, probably because of an 3322 overflow due to arithmetic on a different INF value, 3323 reset now. */ 3324 if (is_negative_overflow_infinity (min)) 3325 min = tmin; 3326 } 3327 else 3328 { 3329 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */ 3330 if (compare_values (init, min) == 1) 3331 { 3332 min = init; 3333 3334 /* Again, avoid creating invalid range by failing. */ 3335 if (compare_values (min, max) == 1) 3336 return; 3337 } 3338 3339 if (is_positive_overflow_infinity (max)) 3340 max = tmax; 3341 } 3342 3343 set_value_range (vr, VR_RANGE, min, max, vr->equiv); 3344 } 3345 } 3346 3347 /* Return true if VAR may overflow at STMT. This checks any available 3348 loop information to see if we can determine that VAR does not 3349 overflow. */ 3350 3351 static bool 3352 vrp_var_may_overflow (tree var, gimple stmt) 3353 { 3354 struct loop *l; 3355 tree chrec, init, step; 3356 3357 if (current_loops == NULL) 3358 return true; 3359 3360 l = loop_containing_stmt (stmt); 3361 if (l == NULL 3362 || !loop_outer (l)) 3363 return true; 3364 3365 chrec = instantiate_parameters (l, analyze_scalar_evolution (l, var)); 3366 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC) 3367 return true; 3368 3369 init = initial_condition_in_loop_num (chrec, l->num); 3370 step = evolution_part_in_loop_num (chrec, l->num); 3371 3372 if (step == NULL_TREE 3373 || !is_gimple_min_invariant (step) 3374 || !valid_value_p (init)) 3375 return true; 3376 3377 /* If we get here, we know something useful about VAR based on the 3378 loop information. If it wraps, it may overflow. */ 3379 3380 if (scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec), 3381 true)) 3382 return true; 3383 3384 if (dump_file && (dump_flags & TDF_DETAILS) != 0) 3385 { 3386 print_generic_expr (dump_file, var, 0); 3387 fprintf (dump_file, ": loop information indicates does not overflow\n"); 3388 } 3389 3390 return false; 3391 } 3392 3393 3394 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP: 3395 3396 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for 3397 all the values in the ranges. 3398 3399 - Return BOOLEAN_FALSE_NODE if the comparison always returns false. 3400 3401 - Return NULL_TREE if it is not always possible to determine the 3402 value of the comparison. 3403 3404 Also set *STRICT_OVERFLOW_P to indicate whether a range with an 3405 overflow infinity was used in the test. */ 3406 3407 3408 static tree 3409 compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1, 3410 bool *strict_overflow_p) 3411 { 3412 /* VARYING or UNDEFINED ranges cannot be compared. */ 3413 if (vr0->type == VR_VARYING 3414 || vr0->type == VR_UNDEFINED 3415 || vr1->type == VR_VARYING 3416 || vr1->type == VR_UNDEFINED) 3417 return NULL_TREE; 3418 3419 /* Anti-ranges need to be handled separately. */ 3420 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE) 3421 { 3422 /* If both are anti-ranges, then we cannot compute any 3423 comparison. */ 3424 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE) 3425 return NULL_TREE; 3426 3427 /* These comparisons are never statically computable. */ 3428 if (comp == GT_EXPR 3429 || comp == GE_EXPR 3430 || comp == LT_EXPR 3431 || comp == LE_EXPR) 3432 return NULL_TREE; 3433 3434 /* Equality can be computed only between a range and an 3435 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */ 3436 if (vr0->type == VR_RANGE) 3437 { 3438 /* To simplify processing, make VR0 the anti-range. */ 3439 value_range_t *tmp = vr0; 3440 vr0 = vr1; 3441 vr1 = tmp; 3442 } 3443 3444 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR); 3445 3446 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0 3447 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0) 3448 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node; 3449 3450 return NULL_TREE; 3451 } 3452 3453 if (!usable_range_p (vr0, strict_overflow_p) 3454 || !usable_range_p (vr1, strict_overflow_p)) 3455 return NULL_TREE; 3456 3457 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the 3458 operands around and change the comparison code. */ 3459 if (comp == GT_EXPR || comp == GE_EXPR) 3460 { 3461 value_range_t *tmp; 3462 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR; 3463 tmp = vr0; 3464 vr0 = vr1; 3465 vr1 = tmp; 3466 } 3467 3468 if (comp == EQ_EXPR) 3469 { 3470 /* Equality may only be computed if both ranges represent 3471 exactly one value. */ 3472 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0 3473 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0) 3474 { 3475 int cmp_min = compare_values_warnv (vr0->min, vr1->min, 3476 strict_overflow_p); 3477 int cmp_max = compare_values_warnv (vr0->max, vr1->max, 3478 strict_overflow_p); 3479 if (cmp_min == 0 && cmp_max == 0) 3480 return boolean_true_node; 3481 else if (cmp_min != -2 && cmp_max != -2) 3482 return boolean_false_node; 3483 } 3484 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */ 3485 else if (compare_values_warnv (vr0->min, vr1->max, 3486 strict_overflow_p) == 1 3487 || compare_values_warnv (vr1->min, vr0->max, 3488 strict_overflow_p) == 1) 3489 return boolean_false_node; 3490 3491 return NULL_TREE; 3492 } 3493 else if (comp == NE_EXPR) 3494 { 3495 int cmp1, cmp2; 3496 3497 /* If VR0 is completely to the left or completely to the right 3498 of VR1, they are always different. Notice that we need to 3499 make sure that both comparisons yield similar results to 3500 avoid comparing values that cannot be compared at 3501 compile-time. */ 3502 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p); 3503 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p); 3504 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1)) 3505 return boolean_true_node; 3506 3507 /* If VR0 and VR1 represent a single value and are identical, 3508 return false. */ 3509 else if (compare_values_warnv (vr0->min, vr0->max, 3510 strict_overflow_p) == 0 3511 && compare_values_warnv (vr1->min, vr1->max, 3512 strict_overflow_p) == 0 3513 && compare_values_warnv (vr0->min, vr1->min, 3514 strict_overflow_p) == 0 3515 && compare_values_warnv (vr0->max, vr1->max, 3516 strict_overflow_p) == 0) 3517 return boolean_false_node; 3518 3519 /* Otherwise, they may or may not be different. */ 3520 else 3521 return NULL_TREE; 3522 } 3523 else if (comp == LT_EXPR || comp == LE_EXPR) 3524 { 3525 int tst; 3526 3527 /* If VR0 is to the left of VR1, return true. */ 3528 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p); 3529 if ((comp == LT_EXPR && tst == -1) 3530 || (comp == LE_EXPR && (tst == -1 || tst == 0))) 3531 { 3532 if (overflow_infinity_range_p (vr0) 3533 || overflow_infinity_range_p (vr1)) 3534 *strict_overflow_p = true; 3535 return boolean_true_node; 3536 } 3537 3538 /* If VR0 is to the right of VR1, return false. */ 3539 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p); 3540 if ((comp == LT_EXPR && (tst == 0 || tst == 1)) 3541 || (comp == LE_EXPR && tst == 1)) 3542 { 3543 if (overflow_infinity_range_p (vr0) 3544 || overflow_infinity_range_p (vr1)) 3545 *strict_overflow_p = true; 3546 return boolean_false_node; 3547 } 3548 3549 /* Otherwise, we don't know. */ 3550 return NULL_TREE; 3551 } 3552 3553 gcc_unreachable (); 3554 } 3555 3556 3557 /* Given a value range VR, a value VAL and a comparison code COMP, return 3558 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the 3559 values in VR. Return BOOLEAN_FALSE_NODE if the comparison 3560 always returns false. Return NULL_TREE if it is not always 3561 possible to determine the value of the comparison. Also set 3562 *STRICT_OVERFLOW_P to indicate whether a range with an overflow 3563 infinity was used in the test. */ 3564 3565 static tree 3566 compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val, 3567 bool *strict_overflow_p) 3568 { 3569 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED) 3570 return NULL_TREE; 3571 3572 /* Anti-ranges need to be handled separately. */ 3573 if (vr->type == VR_ANTI_RANGE) 3574 { 3575 /* For anti-ranges, the only predicates that we can compute at 3576 compile time are equality and inequality. */ 3577 if (comp == GT_EXPR 3578 || comp == GE_EXPR 3579 || comp == LT_EXPR 3580 || comp == LE_EXPR) 3581 return NULL_TREE; 3582 3583 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */ 3584 if (value_inside_range (val, vr) == 1) 3585 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node; 3586 3587 return NULL_TREE; 3588 } 3589 3590 if (!usable_range_p (vr, strict_overflow_p)) 3591 return NULL_TREE; 3592 3593 if (comp == EQ_EXPR) 3594 { 3595 /* EQ_EXPR may only be computed if VR represents exactly 3596 one value. */ 3597 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0) 3598 { 3599 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p); 3600 if (cmp == 0) 3601 return boolean_true_node; 3602 else if (cmp == -1 || cmp == 1 || cmp == 2) 3603 return boolean_false_node; 3604 } 3605 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1 3606 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1) 3607 return boolean_false_node; 3608 3609 return NULL_TREE; 3610 } 3611 else if (comp == NE_EXPR) 3612 { 3613 /* If VAL is not inside VR, then they are always different. */ 3614 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1 3615 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1) 3616 return boolean_true_node; 3617 3618 /* If VR represents exactly one value equal to VAL, then return 3619 false. */ 3620 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0 3621 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0) 3622 return boolean_false_node; 3623 3624 /* Otherwise, they may or may not be different. */ 3625 return NULL_TREE; 3626 } 3627 else if (comp == LT_EXPR || comp == LE_EXPR) 3628 { 3629 int tst; 3630 3631 /* If VR is to the left of VAL, return true. */ 3632 tst = compare_values_warnv (vr->max, val, strict_overflow_p); 3633 if ((comp == LT_EXPR && tst == -1) 3634 || (comp == LE_EXPR && (tst == -1 || tst == 0))) 3635 { 3636 if (overflow_infinity_range_p (vr)) 3637 *strict_overflow_p = true; 3638 return boolean_true_node; 3639 } 3640 3641 /* If VR is to the right of VAL, return false. */ 3642 tst = compare_values_warnv (vr->min, val, strict_overflow_p); 3643 if ((comp == LT_EXPR && (tst == 0 || tst == 1)) 3644 || (comp == LE_EXPR && tst == 1)) 3645 { 3646 if (overflow_infinity_range_p (vr)) 3647 *strict_overflow_p = true; 3648 return boolean_false_node; 3649 } 3650 3651 /* Otherwise, we don't know. */ 3652 return NULL_TREE; 3653 } 3654 else if (comp == GT_EXPR || comp == GE_EXPR) 3655 { 3656 int tst; 3657 3658 /* If VR is to the right of VAL, return true. */ 3659 tst = compare_values_warnv (vr->min, val, strict_overflow_p); 3660 if ((comp == GT_EXPR && tst == 1) 3661 || (comp == GE_EXPR && (tst == 0 || tst == 1))) 3662 { 3663 if (overflow_infinity_range_p (vr)) 3664 *strict_overflow_p = true; 3665 return boolean_true_node; 3666 } 3667 3668 /* If VR is to the left of VAL, return false. */ 3669 tst = compare_values_warnv (vr->max, val, strict_overflow_p); 3670 if ((comp == GT_EXPR && (tst == -1 || tst == 0)) 3671 || (comp == GE_EXPR && tst == -1)) 3672 { 3673 if (overflow_infinity_range_p (vr)) 3674 *strict_overflow_p = true; 3675 return boolean_false_node; 3676 } 3677 3678 /* Otherwise, we don't know. */ 3679 return NULL_TREE; 3680 } 3681 3682 gcc_unreachable (); 3683 } 3684 3685 3686 /* Debugging dumps. */ 3687 3688 void dump_value_range (FILE *, value_range_t *); 3689 void debug_value_range (value_range_t *); 3690 void dump_all_value_ranges (FILE *); 3691 void debug_all_value_ranges (void); 3692 void dump_vr_equiv (FILE *, bitmap); 3693 void debug_vr_equiv (bitmap); 3694 3695 3696 /* Dump value range VR to FILE. */ 3697 3698 void 3699 dump_value_range (FILE *file, value_range_t *vr) 3700 { 3701 if (vr == NULL) 3702 fprintf (file, "[]"); 3703 else if (vr->type == VR_UNDEFINED) 3704 fprintf (file, "UNDEFINED"); 3705 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE) 3706 { 3707 tree type = TREE_TYPE (vr->min); 3708 3709 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : ""); 3710 3711 if (is_negative_overflow_infinity (vr->min)) 3712 fprintf (file, "-INF(OVF)"); 3713 else if (INTEGRAL_TYPE_P (type) 3714 && !TYPE_UNSIGNED (type) 3715 && vrp_val_is_min (vr->min)) 3716 fprintf (file, "-INF"); 3717 else 3718 print_generic_expr (file, vr->min, 0); 3719 3720 fprintf (file, ", "); 3721 3722 if (is_positive_overflow_infinity (vr->max)) 3723 fprintf (file, "+INF(OVF)"); 3724 else if (INTEGRAL_TYPE_P (type) 3725 && vrp_val_is_max (vr->max)) 3726 fprintf (file, "+INF"); 3727 else 3728 print_generic_expr (file, vr->max, 0); 3729 3730 fprintf (file, "]"); 3731 3732 if (vr->equiv) 3733 { 3734 bitmap_iterator bi; 3735 unsigned i, c = 0; 3736 3737 fprintf (file, " EQUIVALENCES: { "); 3738 3739 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi) 3740 { 3741 print_generic_expr (file, ssa_name (i), 0); 3742 fprintf (file, " "); 3743 c++; 3744 } 3745 3746 fprintf (file, "} (%u elements)", c); 3747 } 3748 } 3749 else if (vr->type == VR_VARYING) 3750 fprintf (file, "VARYING"); 3751 else 3752 fprintf (file, "INVALID RANGE"); 3753 } 3754 3755 3756 /* Dump value range VR to stderr. */ 3757 3758 void 3759 debug_value_range (value_range_t *vr) 3760 { 3761 dump_value_range (stderr, vr); 3762 fprintf (stderr, "\n"); 3763 } 3764 3765 3766 /* Dump value ranges of all SSA_NAMEs to FILE. */ 3767 3768 void 3769 dump_all_value_ranges (FILE *file) 3770 { 3771 size_t i; 3772 3773 for (i = 0; i < num_ssa_names; i++) 3774 { 3775 if (vr_value[i]) 3776 { 3777 print_generic_expr (file, ssa_name (i), 0); 3778 fprintf (file, ": "); 3779 dump_value_range (file, vr_value[i]); 3780 fprintf (file, "\n"); 3781 } 3782 } 3783 3784 fprintf (file, "\n"); 3785 } 3786 3787 3788 /* Dump all value ranges to stderr. */ 3789 3790 void 3791 debug_all_value_ranges (void) 3792 { 3793 dump_all_value_ranges (stderr); 3794 } 3795 3796 3797 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V, 3798 create a new SSA name N and return the assertion assignment 3799 'V = ASSERT_EXPR <V, V OP W>'. */ 3800 3801 static gimple 3802 build_assert_expr_for (tree cond, tree v) 3803 { 3804 tree n; 3805 gimple assertion; 3806 3807 gcc_assert (TREE_CODE (v) == SSA_NAME); 3808 n = duplicate_ssa_name (v, NULL); 3809 3810 if (COMPARISON_CLASS_P (cond)) 3811 { 3812 tree a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond); 3813 assertion = gimple_build_assign (n, a); 3814 } 3815 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR) 3816 { 3817 /* Given !V, build the assignment N = false. */ 3818 tree op0 = TREE_OPERAND (cond, 0); 3819 gcc_assert (op0 == v); 3820 assertion = gimple_build_assign (n, boolean_false_node); 3821 } 3822 else if (TREE_CODE (cond) == SSA_NAME) 3823 { 3824 /* Given V, build the assignment N = true. */ 3825 gcc_assert (v == cond); 3826 assertion = gimple_build_assign (n, boolean_true_node); 3827 } 3828 else 3829 gcc_unreachable (); 3830 3831 SSA_NAME_DEF_STMT (n) = assertion; 3832 3833 /* The new ASSERT_EXPR, creates a new SSA name that replaces the 3834 operand of the ASSERT_EXPR. Register the new name and the old one 3835 in the replacement table so that we can fix the SSA web after 3836 adding all the ASSERT_EXPRs. */ 3837 register_new_name_mapping (n, v); 3838 3839 return assertion; 3840 } 3841 3842 3843 /* Return false if EXPR is a predicate expression involving floating 3844 point values. */ 3845 3846 static inline bool 3847 fp_predicate (gimple stmt) 3848 { 3849 GIMPLE_CHECK (stmt, GIMPLE_COND); 3850 3851 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt))); 3852 } 3853 3854 3855 /* If the range of values taken by OP can be inferred after STMT executes, 3856 return the comparison code (COMP_CODE_P) and value (VAL_P) that 3857 describes the inferred range. Return true if a range could be 3858 inferred. */ 3859 3860 static bool 3861 infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p) 3862 { 3863 *val_p = NULL_TREE; 3864 *comp_code_p = ERROR_MARK; 3865 3866 /* Do not attempt to infer anything in names that flow through 3867 abnormal edges. */ 3868 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op)) 3869 return false; 3870 3871 /* Similarly, don't infer anything from statements that may throw 3872 exceptions. */ 3873 if (stmt_could_throw_p (stmt)) 3874 return false; 3875 3876 /* If STMT is the last statement of a basic block with no 3877 successors, there is no point inferring anything about any of its 3878 operands. We would not be able to find a proper insertion point 3879 for the assertion, anyway. */ 3880 if (stmt_ends_bb_p (stmt) && EDGE_COUNT (gimple_bb (stmt)->succs) == 0) 3881 return false; 3882 3883 /* We can only assume that a pointer dereference will yield 3884 non-NULL if -fdelete-null-pointer-checks is enabled. */ 3885 if (flag_delete_null_pointer_checks 3886 && POINTER_TYPE_P (TREE_TYPE (op)) 3887 && gimple_code (stmt) != GIMPLE_ASM) 3888 { 3889 unsigned num_uses, num_loads, num_stores; 3890 3891 count_uses_and_derefs (op, stmt, &num_uses, &num_loads, &num_stores); 3892 if (num_loads + num_stores > 0) 3893 { 3894 *val_p = build_int_cst (TREE_TYPE (op), 0); 3895 *comp_code_p = NE_EXPR; 3896 return true; 3897 } 3898 } 3899 3900 return false; 3901 } 3902 3903 3904 void dump_asserts_for (FILE *, tree); 3905 void debug_asserts_for (tree); 3906 void dump_all_asserts (FILE *); 3907 void debug_all_asserts (void); 3908 3909 /* Dump all the registered assertions for NAME to FILE. */ 3910 3911 void 3912 dump_asserts_for (FILE *file, tree name) 3913 { 3914 assert_locus_t loc; 3915 3916 fprintf (file, "Assertions to be inserted for "); 3917 print_generic_expr (file, name, 0); 3918 fprintf (file, "\n"); 3919 3920 loc = asserts_for[SSA_NAME_VERSION (name)]; 3921 while (loc) 3922 { 3923 fprintf (file, "\t"); 3924 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0); 3925 fprintf (file, "\n\tBB #%d", loc->bb->index); 3926 if (loc->e) 3927 { 3928 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index, 3929 loc->e->dest->index); 3930 dump_edge_info (file, loc->e, 0); 3931 } 3932 fprintf (file, "\n\tPREDICATE: "); 3933 print_generic_expr (file, name, 0); 3934 fprintf (file, " %s ", tree_code_name[(int)loc->comp_code]); 3935 print_generic_expr (file, loc->val, 0); 3936 fprintf (file, "\n\n"); 3937 loc = loc->next; 3938 } 3939 3940 fprintf (file, "\n"); 3941 } 3942 3943 3944 /* Dump all the registered assertions for NAME to stderr. */ 3945 3946 void 3947 debug_asserts_for (tree name) 3948 { 3949 dump_asserts_for (stderr, name); 3950 } 3951 3952 3953 /* Dump all the registered assertions for all the names to FILE. */ 3954 3955 void 3956 dump_all_asserts (FILE *file) 3957 { 3958 unsigned i; 3959 bitmap_iterator bi; 3960 3961 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n"); 3962 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi) 3963 dump_asserts_for (file, ssa_name (i)); 3964 fprintf (file, "\n"); 3965 } 3966 3967 3968 /* Dump all the registered assertions for all the names to stderr. */ 3969 3970 void 3971 debug_all_asserts (void) 3972 { 3973 dump_all_asserts (stderr); 3974 } 3975 3976 3977 /* If NAME doesn't have an ASSERT_EXPR registered for asserting 3978 'EXPR COMP_CODE VAL' at a location that dominates block BB or 3979 E->DEST, then register this location as a possible insertion point 3980 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>. 3981 3982 BB, E and SI provide the exact insertion point for the new 3983 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted 3984 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on 3985 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E 3986 must not be NULL. */ 3987 3988 static void 3989 register_new_assert_for (tree name, tree expr, 3990 enum tree_code comp_code, 3991 tree val, 3992 basic_block bb, 3993 edge e, 3994 gimple_stmt_iterator si) 3995 { 3996 assert_locus_t n, loc, last_loc; 3997 basic_block dest_bb; 3998 3999 #if defined ENABLE_CHECKING 4000 gcc_assert (bb == NULL || e == NULL); 4001 4002 if (e == NULL) 4003 gcc_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND 4004 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH); 4005 #endif 4006 4007 /* Never build an assert comparing against an integer constant with 4008 TREE_OVERFLOW set. This confuses our undefined overflow warning 4009 machinery. */ 4010 if (TREE_CODE (val) == INTEGER_CST 4011 && TREE_OVERFLOW (val)) 4012 val = build_int_cst_wide (TREE_TYPE (val), 4013 TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val)); 4014 4015 /* The new assertion A will be inserted at BB or E. We need to 4016 determine if the new location is dominated by a previously 4017 registered location for A. If we are doing an edge insertion, 4018 assume that A will be inserted at E->DEST. Note that this is not 4019 necessarily true. 4020 4021 If E is a critical edge, it will be split. But even if E is 4022 split, the new block will dominate the same set of blocks that 4023 E->DEST dominates. 4024 4025 The reverse, however, is not true, blocks dominated by E->DEST 4026 will not be dominated by the new block created to split E. So, 4027 if the insertion location is on a critical edge, we will not use 4028 the new location to move another assertion previously registered 4029 at a block dominated by E->DEST. */ 4030 dest_bb = (bb) ? bb : e->dest; 4031 4032 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and 4033 VAL at a block dominating DEST_BB, then we don't need to insert a new 4034 one. Similarly, if the same assertion already exists at a block 4035 dominated by DEST_BB and the new location is not on a critical 4036 edge, then update the existing location for the assertion (i.e., 4037 move the assertion up in the dominance tree). 4038 4039 Note, this is implemented as a simple linked list because there 4040 should not be more than a handful of assertions registered per 4041 name. If this becomes a performance problem, a table hashed by 4042 COMP_CODE and VAL could be implemented. */ 4043 loc = asserts_for[SSA_NAME_VERSION (name)]; 4044 last_loc = loc; 4045 while (loc) 4046 { 4047 if (loc->comp_code == comp_code 4048 && (loc->val == val 4049 || operand_equal_p (loc->val, val, 0)) 4050 && (loc->expr == expr 4051 || operand_equal_p (loc->expr, expr, 0))) 4052 { 4053 /* If the assertion NAME COMP_CODE VAL has already been 4054 registered at a basic block that dominates DEST_BB, then 4055 we don't need to insert the same assertion again. Note 4056 that we don't check strict dominance here to avoid 4057 replicating the same assertion inside the same basic 4058 block more than once (e.g., when a pointer is 4059 dereferenced several times inside a block). 4060 4061 An exception to this rule are edge insertions. If the 4062 new assertion is to be inserted on edge E, then it will 4063 dominate all the other insertions that we may want to 4064 insert in DEST_BB. So, if we are doing an edge 4065 insertion, don't do this dominance check. */ 4066 if (e == NULL 4067 && dominated_by_p (CDI_DOMINATORS, dest_bb, loc->bb)) 4068 return; 4069 4070 /* Otherwise, if E is not a critical edge and DEST_BB 4071 dominates the existing location for the assertion, move 4072 the assertion up in the dominance tree by updating its 4073 location information. */ 4074 if ((e == NULL || !EDGE_CRITICAL_P (e)) 4075 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb)) 4076 { 4077 loc->bb = dest_bb; 4078 loc->e = e; 4079 loc->si = si; 4080 return; 4081 } 4082 } 4083 4084 /* Update the last node of the list and move to the next one. */ 4085 last_loc = loc; 4086 loc = loc->next; 4087 } 4088 4089 /* If we didn't find an assertion already registered for 4090 NAME COMP_CODE VAL, add a new one at the end of the list of 4091 assertions associated with NAME. */ 4092 n = XNEW (struct assert_locus_d); 4093 n->bb = dest_bb; 4094 n->e = e; 4095 n->si = si; 4096 n->comp_code = comp_code; 4097 n->val = val; 4098 n->expr = expr; 4099 n->next = NULL; 4100 4101 if (last_loc) 4102 last_loc->next = n; 4103 else 4104 asserts_for[SSA_NAME_VERSION (name)] = n; 4105 4106 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name)); 4107 } 4108 4109 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME. 4110 Extract a suitable test code and value and store them into *CODE_P and 4111 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P. 4112 4113 If no extraction was possible, return FALSE, otherwise return TRUE. 4114 4115 If INVERT is true, then we invert the result stored into *CODE_P. */ 4116 4117 static bool 4118 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code, 4119 tree cond_op0, tree cond_op1, 4120 bool invert, enum tree_code *code_p, 4121 tree *val_p) 4122 { 4123 enum tree_code comp_code; 4124 tree val; 4125 4126 /* Otherwise, we have a comparison of the form NAME COMP VAL 4127 or VAL COMP NAME. */ 4128 if (name == cond_op1) 4129 { 4130 /* If the predicate is of the form VAL COMP NAME, flip 4131 COMP around because we need to register NAME as the 4132 first operand in the predicate. */ 4133 comp_code = swap_tree_comparison (cond_code); 4134 val = cond_op0; 4135 } 4136 else 4137 { 4138 /* The comparison is of the form NAME COMP VAL, so the 4139 comparison code remains unchanged. */ 4140 comp_code = cond_code; 4141 val = cond_op1; 4142 } 4143 4144 /* Invert the comparison code as necessary. */ 4145 if (invert) 4146 comp_code = invert_tree_comparison (comp_code, 0); 4147 4148 /* VRP does not handle float types. */ 4149 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val))) 4150 return false; 4151 4152 /* Do not register always-false predicates. 4153 FIXME: this works around a limitation in fold() when dealing with 4154 enumerations. Given 'enum { N1, N2 } x;', fold will not 4155 fold 'if (x > N2)' to 'if (0)'. */ 4156 if ((comp_code == GT_EXPR || comp_code == LT_EXPR) 4157 && INTEGRAL_TYPE_P (TREE_TYPE (val))) 4158 { 4159 tree min = TYPE_MIN_VALUE (TREE_TYPE (val)); 4160 tree max = TYPE_MAX_VALUE (TREE_TYPE (val)); 4161 4162 if (comp_code == GT_EXPR 4163 && (!max 4164 || compare_values (val, max) == 0)) 4165 return false; 4166 4167 if (comp_code == LT_EXPR 4168 && (!min 4169 || compare_values (val, min) == 0)) 4170 return false; 4171 } 4172 *code_p = comp_code; 4173 *val_p = val; 4174 return true; 4175 } 4176 4177 /* Try to register an edge assertion for SSA name NAME on edge E for 4178 the condition COND contributing to the conditional jump pointed to by BSI. 4179 Invert the condition COND if INVERT is true. 4180 Return true if an assertion for NAME could be registered. */ 4181 4182 static bool 4183 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, 4184 enum tree_code cond_code, 4185 tree cond_op0, tree cond_op1, bool invert) 4186 { 4187 tree val; 4188 enum tree_code comp_code; 4189 bool retval = false; 4190 4191 if (!extract_code_and_val_from_cond_with_ops (name, cond_code, 4192 cond_op0, 4193 cond_op1, 4194 invert, &comp_code, &val)) 4195 return false; 4196 4197 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph 4198 reachable from E. */ 4199 if (live_on_edge (e, name) 4200 && !has_single_use (name)) 4201 { 4202 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi); 4203 retval = true; 4204 } 4205 4206 /* In the case of NAME <= CST and NAME being defined as 4207 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2 4208 and NAME2 <= CST - CST2. We can do the same for NAME > CST. 4209 This catches range and anti-range tests. */ 4210 if ((comp_code == LE_EXPR 4211 || comp_code == GT_EXPR) 4212 && TREE_CODE (val) == INTEGER_CST 4213 && TYPE_UNSIGNED (TREE_TYPE (val))) 4214 { 4215 gimple def_stmt = SSA_NAME_DEF_STMT (name); 4216 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE; 4217 4218 /* Extract CST2 from the (optional) addition. */ 4219 if (is_gimple_assign (def_stmt) 4220 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR) 4221 { 4222 name2 = gimple_assign_rhs1 (def_stmt); 4223 cst2 = gimple_assign_rhs2 (def_stmt); 4224 if (TREE_CODE (name2) == SSA_NAME 4225 && TREE_CODE (cst2) == INTEGER_CST) 4226 def_stmt = SSA_NAME_DEF_STMT (name2); 4227 } 4228 4229 /* Extract NAME2 from the (optional) sign-changing cast. */ 4230 if (gimple_assign_cast_p (def_stmt)) 4231 { 4232 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)) 4233 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt))) 4234 && (TYPE_PRECISION (gimple_expr_type (def_stmt)) 4235 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))) 4236 name3 = gimple_assign_rhs1 (def_stmt); 4237 } 4238 4239 /* If name3 is used later, create an ASSERT_EXPR for it. */ 4240 if (name3 != NULL_TREE 4241 && TREE_CODE (name3) == SSA_NAME 4242 && (cst2 == NULL_TREE 4243 || TREE_CODE (cst2) == INTEGER_CST) 4244 && INTEGRAL_TYPE_P (TREE_TYPE (name3)) 4245 && live_on_edge (e, name3) 4246 && !has_single_use (name3)) 4247 { 4248 tree tmp; 4249 4250 /* Build an expression for the range test. */ 4251 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3); 4252 if (cst2 != NULL_TREE) 4253 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); 4254 4255 if (dump_file) 4256 { 4257 fprintf (dump_file, "Adding assert for "); 4258 print_generic_expr (dump_file, name3, 0); 4259 fprintf (dump_file, " from "); 4260 print_generic_expr (dump_file, tmp, 0); 4261 fprintf (dump_file, "\n"); 4262 } 4263 4264 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi); 4265 4266 retval = true; 4267 } 4268 4269 /* If name2 is used later, create an ASSERT_EXPR for it. */ 4270 if (name2 != NULL_TREE 4271 && TREE_CODE (name2) == SSA_NAME 4272 && TREE_CODE (cst2) == INTEGER_CST 4273 && INTEGRAL_TYPE_P (TREE_TYPE (name2)) 4274 && live_on_edge (e, name2) 4275 && !has_single_use (name2)) 4276 { 4277 tree tmp; 4278 4279 /* Build an expression for the range test. */ 4280 tmp = name2; 4281 if (TREE_TYPE (name) != TREE_TYPE (name2)) 4282 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp); 4283 if (cst2 != NULL_TREE) 4284 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2); 4285 4286 if (dump_file) 4287 { 4288 fprintf (dump_file, "Adding assert for "); 4289 print_generic_expr (dump_file, name2, 0); 4290 fprintf (dump_file, " from "); 4291 print_generic_expr (dump_file, tmp, 0); 4292 fprintf (dump_file, "\n"); 4293 } 4294 4295 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi); 4296 4297 retval = true; 4298 } 4299 } 4300 4301 return retval; 4302 } 4303 4304 /* OP is an operand of a truth value expression which is known to have 4305 a particular value. Register any asserts for OP and for any 4306 operands in OP's defining statement. 4307 4308 If CODE is EQ_EXPR, then we want to register OP is zero (false), 4309 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */ 4310 4311 static bool 4312 register_edge_assert_for_1 (tree op, enum tree_code code, 4313 edge e, gimple_stmt_iterator bsi) 4314 { 4315 bool retval = false; 4316 gimple op_def; 4317 tree val; 4318 enum tree_code rhs_code; 4319 4320 /* We only care about SSA_NAMEs. */ 4321 if (TREE_CODE (op) != SSA_NAME) 4322 return false; 4323 4324 /* We know that OP will have a zero or nonzero value. If OP is used 4325 more than once go ahead and register an assert for OP. 4326 4327 The FOUND_IN_SUBGRAPH support is not helpful in this situation as 4328 it will always be set for OP (because OP is used in a COND_EXPR in 4329 the subgraph). */ 4330 if (!has_single_use (op)) 4331 { 4332 val = build_int_cst (TREE_TYPE (op), 0); 4333 register_new_assert_for (op, op, code, val, NULL, e, bsi); 4334 retval = true; 4335 } 4336 4337 /* Now look at how OP is set. If it's set from a comparison, 4338 a truth operation or some bit operations, then we may be able 4339 to register information about the operands of that assignment. */ 4340 op_def = SSA_NAME_DEF_STMT (op); 4341 if (gimple_code (op_def) != GIMPLE_ASSIGN) 4342 return retval; 4343 4344 rhs_code = gimple_assign_rhs_code (op_def); 4345 4346 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison) 4347 { 4348 bool invert = (code == EQ_EXPR ? true : false); 4349 tree op0 = gimple_assign_rhs1 (op_def); 4350 tree op1 = gimple_assign_rhs2 (op_def); 4351 4352 if (TREE_CODE (op0) == SSA_NAME) 4353 retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1, 4354 invert); 4355 if (TREE_CODE (op1) == SSA_NAME) 4356 retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1, 4357 invert); 4358 } 4359 else if ((code == NE_EXPR 4360 && (gimple_assign_rhs_code (op_def) == TRUTH_AND_EXPR 4361 || gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)) 4362 || (code == EQ_EXPR 4363 && (gimple_assign_rhs_code (op_def) == TRUTH_OR_EXPR 4364 || gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))) 4365 { 4366 /* Recurse on each operand. */ 4367 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), 4368 code, e, bsi); 4369 retval |= register_edge_assert_for_1 (gimple_assign_rhs2 (op_def), 4370 code, e, bsi); 4371 } 4372 else if (gimple_assign_rhs_code (op_def) == TRUTH_NOT_EXPR) 4373 { 4374 /* Recurse, flipping CODE. */ 4375 code = invert_tree_comparison (code, false); 4376 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), 4377 code, e, bsi); 4378 } 4379 else if (gimple_assign_rhs_code (op_def) == SSA_NAME) 4380 { 4381 /* Recurse through the copy. */ 4382 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), 4383 code, e, bsi); 4384 } 4385 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def))) 4386 { 4387 /* Recurse through the type conversion. */ 4388 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), 4389 code, e, bsi); 4390 } 4391 4392 return retval; 4393 } 4394 4395 /* Try to register an edge assertion for SSA name NAME on edge E for 4396 the condition COND contributing to the conditional jump pointed to by SI. 4397 Return true if an assertion for NAME could be registered. */ 4398 4399 static bool 4400 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si, 4401 enum tree_code cond_code, tree cond_op0, 4402 tree cond_op1) 4403 { 4404 tree val; 4405 enum tree_code comp_code; 4406 bool retval = false; 4407 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0; 4408 4409 /* Do not attempt to infer anything in names that flow through 4410 abnormal edges. */ 4411 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name)) 4412 return false; 4413 4414 if (!extract_code_and_val_from_cond_with_ops (name, cond_code, 4415 cond_op0, cond_op1, 4416 is_else_edge, 4417 &comp_code, &val)) 4418 return false; 4419 4420 /* Register ASSERT_EXPRs for name. */ 4421 retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0, 4422 cond_op1, is_else_edge); 4423 4424 4425 /* If COND is effectively an equality test of an SSA_NAME against 4426 the value zero or one, then we may be able to assert values 4427 for SSA_NAMEs which flow into COND. */ 4428 4429 /* In the case of NAME == 1 or NAME != 0, for TRUTH_AND_EXPR defining 4430 statement of NAME we can assert both operands of the TRUTH_AND_EXPR 4431 have nonzero value. */ 4432 if (((comp_code == EQ_EXPR && integer_onep (val)) 4433 || (comp_code == NE_EXPR && integer_zerop (val)))) 4434 { 4435 gimple def_stmt = SSA_NAME_DEF_STMT (name); 4436 4437 if (is_gimple_assign (def_stmt) 4438 && (gimple_assign_rhs_code (def_stmt) == TRUTH_AND_EXPR 4439 || gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)) 4440 { 4441 tree op0 = gimple_assign_rhs1 (def_stmt); 4442 tree op1 = gimple_assign_rhs2 (def_stmt); 4443 retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si); 4444 retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si); 4445 } 4446 } 4447 4448 /* In the case of NAME == 0 or NAME != 1, for TRUTH_OR_EXPR defining 4449 statement of NAME we can assert both operands of the TRUTH_OR_EXPR 4450 have zero value. */ 4451 if (((comp_code == EQ_EXPR && integer_zerop (val)) 4452 || (comp_code == NE_EXPR && integer_onep (val)))) 4453 { 4454 gimple def_stmt = SSA_NAME_DEF_STMT (name); 4455 4456 if (is_gimple_assign (def_stmt) 4457 && (gimple_assign_rhs_code (def_stmt) == TRUTH_OR_EXPR 4458 /* For BIT_IOR_EXPR only if NAME == 0 both operands have 4459 necessarily zero value. */ 4460 || (comp_code == EQ_EXPR 4461 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR)))) 4462 { 4463 tree op0 = gimple_assign_rhs1 (def_stmt); 4464 tree op1 = gimple_assign_rhs2 (def_stmt); 4465 retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si); 4466 retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si); 4467 } 4468 } 4469 4470 return retval; 4471 } 4472 4473 4474 /* Determine whether the outgoing edges of BB should receive an 4475 ASSERT_EXPR for each of the operands of BB's LAST statement. 4476 The last statement of BB must be a COND_EXPR. 4477 4478 If any of the sub-graphs rooted at BB have an interesting use of 4479 the predicate operands, an assert location node is added to the 4480 list of assertions for the corresponding operands. */ 4481 4482 static bool 4483 find_conditional_asserts (basic_block bb, gimple last) 4484 { 4485 bool need_assert; 4486 gimple_stmt_iterator bsi; 4487 tree op; 4488 edge_iterator ei; 4489 edge e; 4490 ssa_op_iter iter; 4491 4492 need_assert = false; 4493 bsi = gsi_for_stmt (last); 4494 4495 /* Look for uses of the operands in each of the sub-graphs 4496 rooted at BB. We need to check each of the outgoing edges 4497 separately, so that we know what kind of ASSERT_EXPR to 4498 insert. */ 4499 FOR_EACH_EDGE (e, ei, bb->succs) 4500 { 4501 if (e->dest == bb) 4502 continue; 4503 4504 /* Register the necessary assertions for each operand in the 4505 conditional predicate. */ 4506 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE) 4507 { 4508 need_assert |= register_edge_assert_for (op, e, bsi, 4509 gimple_cond_code (last), 4510 gimple_cond_lhs (last), 4511 gimple_cond_rhs (last)); 4512 } 4513 } 4514 4515 return need_assert; 4516 } 4517 4518 struct case_info 4519 { 4520 tree expr; 4521 basic_block bb; 4522 }; 4523 4524 /* Compare two case labels sorting first by the destination bb index 4525 and then by the case value. */ 4526 4527 static int 4528 compare_case_labels (const void *p1, const void *p2) 4529 { 4530 const struct case_info *ci1 = (const struct case_info *) p1; 4531 const struct case_info *ci2 = (const struct case_info *) p2; 4532 int idx1 = ci1->bb->index; 4533 int idx2 = ci2->bb->index; 4534 4535 if (idx1 < idx2) 4536 return -1; 4537 else if (idx1 == idx2) 4538 { 4539 /* Make sure the default label is first in a group. */ 4540 if (!CASE_LOW (ci1->expr)) 4541 return -1; 4542 else if (!CASE_LOW (ci2->expr)) 4543 return 1; 4544 else 4545 return tree_int_cst_compare (CASE_LOW (ci1->expr), 4546 CASE_LOW (ci2->expr)); 4547 } 4548 else 4549 return 1; 4550 } 4551 4552 /* Determine whether the outgoing edges of BB should receive an 4553 ASSERT_EXPR for each of the operands of BB's LAST statement. 4554 The last statement of BB must be a SWITCH_EXPR. 4555 4556 If any of the sub-graphs rooted at BB have an interesting use of 4557 the predicate operands, an assert location node is added to the 4558 list of assertions for the corresponding operands. */ 4559 4560 static bool 4561 find_switch_asserts (basic_block bb, gimple last) 4562 { 4563 bool need_assert; 4564 gimple_stmt_iterator bsi; 4565 tree op; 4566 edge e; 4567 struct case_info *ci; 4568 size_t n = gimple_switch_num_labels (last); 4569 #if GCC_VERSION >= 4000 4570 unsigned int idx; 4571 #else 4572 /* Work around GCC 3.4 bug (PR 37086). */ 4573 volatile unsigned int idx; 4574 #endif 4575 4576 need_assert = false; 4577 bsi = gsi_for_stmt (last); 4578 op = gimple_switch_index (last); 4579 if (TREE_CODE (op) != SSA_NAME) 4580 return false; 4581 4582 /* Build a vector of case labels sorted by destination label. */ 4583 ci = XNEWVEC (struct case_info, n); 4584 for (idx = 0; idx < n; ++idx) 4585 { 4586 ci[idx].expr = gimple_switch_label (last, idx); 4587 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr)); 4588 } 4589 qsort (ci, n, sizeof (struct case_info), compare_case_labels); 4590 4591 for (idx = 0; idx < n; ++idx) 4592 { 4593 tree min, max; 4594 tree cl = ci[idx].expr; 4595 basic_block cbb = ci[idx].bb; 4596 4597 min = CASE_LOW (cl); 4598 max = CASE_HIGH (cl); 4599 4600 /* If there are multiple case labels with the same destination 4601 we need to combine them to a single value range for the edge. */ 4602 if (idx + 1 < n && cbb == ci[idx + 1].bb) 4603 { 4604 /* Skip labels until the last of the group. */ 4605 do { 4606 ++idx; 4607 } while (idx < n && cbb == ci[idx].bb); 4608 --idx; 4609 4610 /* Pick up the maximum of the case label range. */ 4611 if (CASE_HIGH (ci[idx].expr)) 4612 max = CASE_HIGH (ci[idx].expr); 4613 else 4614 max = CASE_LOW (ci[idx].expr); 4615 } 4616 4617 /* Nothing to do if the range includes the default label until we 4618 can register anti-ranges. */ 4619 if (min == NULL_TREE) 4620 continue; 4621 4622 /* Find the edge to register the assert expr on. */ 4623 e = find_edge (bb, cbb); 4624 4625 /* Register the necessary assertions for the operand in the 4626 SWITCH_EXPR. */ 4627 need_assert |= register_edge_assert_for (op, e, bsi, 4628 max ? GE_EXPR : EQ_EXPR, 4629 op, 4630 fold_convert (TREE_TYPE (op), 4631 min)); 4632 if (max) 4633 { 4634 need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR, 4635 op, 4636 fold_convert (TREE_TYPE (op), 4637 max)); 4638 } 4639 } 4640 4641 XDELETEVEC (ci); 4642 return need_assert; 4643 } 4644 4645 4646 /* Traverse all the statements in block BB looking for statements that 4647 may generate useful assertions for the SSA names in their operand. 4648 If a statement produces a useful assertion A for name N_i, then the 4649 list of assertions already generated for N_i is scanned to 4650 determine if A is actually needed. 4651 4652 If N_i already had the assertion A at a location dominating the 4653 current location, then nothing needs to be done. Otherwise, the 4654 new location for A is recorded instead. 4655 4656 1- For every statement S in BB, all the variables used by S are 4657 added to bitmap FOUND_IN_SUBGRAPH. 4658 4659 2- If statement S uses an operand N in a way that exposes a known 4660 value range for N, then if N was not already generated by an 4661 ASSERT_EXPR, create a new assert location for N. For instance, 4662 if N is a pointer and the statement dereferences it, we can 4663 assume that N is not NULL. 4664 4665 3- COND_EXPRs are a special case of #2. We can derive range 4666 information from the predicate but need to insert different 4667 ASSERT_EXPRs for each of the sub-graphs rooted at the 4668 conditional block. If the last statement of BB is a conditional 4669 expression of the form 'X op Y', then 4670 4671 a) Remove X and Y from the set FOUND_IN_SUBGRAPH. 4672 4673 b) If the conditional is the only entry point to the sub-graph 4674 corresponding to the THEN_CLAUSE, recurse into it. On 4675 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then 4676 an ASSERT_EXPR is added for the corresponding variable. 4677 4678 c) Repeat step (b) on the ELSE_CLAUSE. 4679 4680 d) Mark X and Y in FOUND_IN_SUBGRAPH. 4681 4682 For instance, 4683 4684 if (a == 9) 4685 b = a; 4686 else 4687 b = c + 1; 4688 4689 In this case, an assertion on the THEN clause is useful to 4690 determine that 'a' is always 9 on that edge. However, an assertion 4691 on the ELSE clause would be unnecessary. 4692 4693 4- If BB does not end in a conditional expression, then we recurse 4694 into BB's dominator children. 4695 4696 At the end of the recursive traversal, every SSA name will have a 4697 list of locations where ASSERT_EXPRs should be added. When a new 4698 location for name N is found, it is registered by calling 4699 register_new_assert_for. That function keeps track of all the 4700 registered assertions to prevent adding unnecessary assertions. 4701 For instance, if a pointer P_4 is dereferenced more than once in a 4702 dominator tree, only the location dominating all the dereference of 4703 P_4 will receive an ASSERT_EXPR. 4704 4705 If this function returns true, then it means that there are names 4706 for which we need to generate ASSERT_EXPRs. Those assertions are 4707 inserted by process_assert_insertions. */ 4708 4709 static bool 4710 find_assert_locations_1 (basic_block bb, sbitmap live) 4711 { 4712 gimple_stmt_iterator si; 4713 gimple last; 4714 gimple phi; 4715 bool need_assert; 4716 4717 need_assert = false; 4718 last = last_stmt (bb); 4719 4720 /* If BB's last statement is a conditional statement involving integer 4721 operands, determine if we need to add ASSERT_EXPRs. */ 4722 if (last 4723 && gimple_code (last) == GIMPLE_COND 4724 && !fp_predicate (last) 4725 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE)) 4726 need_assert |= find_conditional_asserts (bb, last); 4727 4728 /* If BB's last statement is a switch statement involving integer 4729 operands, determine if we need to add ASSERT_EXPRs. */ 4730 if (last 4731 && gimple_code (last) == GIMPLE_SWITCH 4732 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE)) 4733 need_assert |= find_switch_asserts (bb, last); 4734 4735 /* Traverse all the statements in BB marking used names and looking 4736 for statements that may infer assertions for their used operands. */ 4737 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) 4738 { 4739 gimple stmt; 4740 tree op; 4741 ssa_op_iter i; 4742 4743 stmt = gsi_stmt (si); 4744 4745 if (is_gimple_debug (stmt)) 4746 continue; 4747 4748 /* See if we can derive an assertion for any of STMT's operands. */ 4749 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE) 4750 { 4751 tree value; 4752 enum tree_code comp_code; 4753 4754 /* Mark OP in our live bitmap. */ 4755 SET_BIT (live, SSA_NAME_VERSION (op)); 4756 4757 /* If OP is used in such a way that we can infer a value 4758 range for it, and we don't find a previous assertion for 4759 it, create a new assertion location node for OP. */ 4760 if (infer_value_range (stmt, op, &comp_code, &value)) 4761 { 4762 /* If we are able to infer a nonzero value range for OP, 4763 then walk backwards through the use-def chain to see if OP 4764 was set via a typecast. 4765 4766 If so, then we can also infer a nonzero value range 4767 for the operand of the NOP_EXPR. */ 4768 if (comp_code == NE_EXPR && integer_zerop (value)) 4769 { 4770 tree t = op; 4771 gimple def_stmt = SSA_NAME_DEF_STMT (t); 4772 4773 while (is_gimple_assign (def_stmt) 4774 && gimple_assign_rhs_code (def_stmt) == NOP_EXPR 4775 && TREE_CODE 4776 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME 4777 && POINTER_TYPE_P 4778 (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))) 4779 { 4780 t = gimple_assign_rhs1 (def_stmt); 4781 def_stmt = SSA_NAME_DEF_STMT (t); 4782 4783 /* Note we want to register the assert for the 4784 operand of the NOP_EXPR after SI, not after the 4785 conversion. */ 4786 if (! has_single_use (t)) 4787 { 4788 register_new_assert_for (t, t, comp_code, value, 4789 bb, NULL, si); 4790 need_assert = true; 4791 } 4792 } 4793 } 4794 4795 /* If OP is used only once, namely in this STMT, don't 4796 bother creating an ASSERT_EXPR for it. Such an 4797 ASSERT_EXPR would do nothing but increase compile time. */ 4798 if (!has_single_use (op)) 4799 { 4800 register_new_assert_for (op, op, comp_code, value, 4801 bb, NULL, si); 4802 need_assert = true; 4803 } 4804 } 4805 } 4806 } 4807 4808 /* Traverse all PHI nodes in BB marking used operands. */ 4809 for (si = gsi_start_phis (bb); !gsi_end_p(si); gsi_next (&si)) 4810 { 4811 use_operand_p arg_p; 4812 ssa_op_iter i; 4813 phi = gsi_stmt (si); 4814 4815 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE) 4816 { 4817 tree arg = USE_FROM_PTR (arg_p); 4818 if (TREE_CODE (arg) == SSA_NAME) 4819 SET_BIT (live, SSA_NAME_VERSION (arg)); 4820 } 4821 } 4822 4823 return need_assert; 4824 } 4825 4826 /* Do an RPO walk over the function computing SSA name liveness 4827 on-the-fly and deciding on assert expressions to insert. 4828 Returns true if there are assert expressions to be inserted. */ 4829 4830 static bool 4831 find_assert_locations (void) 4832 { 4833 int *rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS); 4834 int *bb_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS); 4835 int *last_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS); 4836 int rpo_cnt, i; 4837 bool need_asserts; 4838 4839 live = XCNEWVEC (sbitmap, last_basic_block + NUM_FIXED_BLOCKS); 4840 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false); 4841 for (i = 0; i < rpo_cnt; ++i) 4842 bb_rpo[rpo[i]] = i; 4843 4844 need_asserts = false; 4845 for (i = rpo_cnt-1; i >= 0; --i) 4846 { 4847 basic_block bb = BASIC_BLOCK (rpo[i]); 4848 edge e; 4849 edge_iterator ei; 4850 4851 if (!live[rpo[i]]) 4852 { 4853 live[rpo[i]] = sbitmap_alloc (num_ssa_names); 4854 sbitmap_zero (live[rpo[i]]); 4855 } 4856 4857 /* Process BB and update the live information with uses in 4858 this block. */ 4859 need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]); 4860 4861 /* Merge liveness into the predecessor blocks and free it. */ 4862 if (!sbitmap_empty_p (live[rpo[i]])) 4863 { 4864 int pred_rpo = i; 4865 FOR_EACH_EDGE (e, ei, bb->preds) 4866 { 4867 int pred = e->src->index; 4868 if (e->flags & EDGE_DFS_BACK) 4869 continue; 4870 4871 if (!live[pred]) 4872 { 4873 live[pred] = sbitmap_alloc (num_ssa_names); 4874 sbitmap_zero (live[pred]); 4875 } 4876 sbitmap_a_or_b (live[pred], live[pred], live[rpo[i]]); 4877 4878 if (bb_rpo[pred] < pred_rpo) 4879 pred_rpo = bb_rpo[pred]; 4880 } 4881 4882 /* Record the RPO number of the last visited block that needs 4883 live information from this block. */ 4884 last_rpo[rpo[i]] = pred_rpo; 4885 } 4886 else 4887 { 4888 sbitmap_free (live[rpo[i]]); 4889 live[rpo[i]] = NULL; 4890 } 4891 4892 /* We can free all successors live bitmaps if all their 4893 predecessors have been visited already. */ 4894 FOR_EACH_EDGE (e, ei, bb->succs) 4895 if (last_rpo[e->dest->index] == i 4896 && live[e->dest->index]) 4897 { 4898 sbitmap_free (live[e->dest->index]); 4899 live[e->dest->index] = NULL; 4900 } 4901 } 4902 4903 XDELETEVEC (rpo); 4904 XDELETEVEC (bb_rpo); 4905 XDELETEVEC (last_rpo); 4906 for (i = 0; i < last_basic_block + NUM_FIXED_BLOCKS; ++i) 4907 if (live[i]) 4908 sbitmap_free (live[i]); 4909 XDELETEVEC (live); 4910 4911 return need_asserts; 4912 } 4913 4914 /* Create an ASSERT_EXPR for NAME and insert it in the location 4915 indicated by LOC. Return true if we made any edge insertions. */ 4916 4917 static bool 4918 process_assert_insertions_for (tree name, assert_locus_t loc) 4919 { 4920 /* Build the comparison expression NAME_i COMP_CODE VAL. */ 4921 gimple stmt; 4922 tree cond; 4923 gimple assert_stmt; 4924 edge_iterator ei; 4925 edge e; 4926 4927 /* If we have X <=> X do not insert an assert expr for that. */ 4928 if (loc->expr == loc->val) 4929 return false; 4930 4931 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val); 4932 assert_stmt = build_assert_expr_for (cond, name); 4933 if (loc->e) 4934 { 4935 /* We have been asked to insert the assertion on an edge. This 4936 is used only by COND_EXPR and SWITCH_EXPR assertions. */ 4937 #if defined ENABLE_CHECKING 4938 gcc_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND 4939 || gimple_code (gsi_stmt (loc->si)) == GIMPLE_SWITCH); 4940 #endif 4941 4942 gsi_insert_on_edge (loc->e, assert_stmt); 4943 return true; 4944 } 4945 4946 /* Otherwise, we can insert right after LOC->SI iff the 4947 statement must not be the last statement in the block. */ 4948 stmt = gsi_stmt (loc->si); 4949 if (!stmt_ends_bb_p (stmt)) 4950 { 4951 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT); 4952 return false; 4953 } 4954 4955 /* If STMT must be the last statement in BB, we can only insert new 4956 assertions on the non-abnormal edge out of BB. Note that since 4957 STMT is not control flow, there may only be one non-abnormal edge 4958 out of BB. */ 4959 FOR_EACH_EDGE (e, ei, loc->bb->succs) 4960 if (!(e->flags & EDGE_ABNORMAL)) 4961 { 4962 gsi_insert_on_edge (e, assert_stmt); 4963 return true; 4964 } 4965 4966 gcc_unreachable (); 4967 } 4968 4969 4970 /* Process all the insertions registered for every name N_i registered 4971 in NEED_ASSERT_FOR. The list of assertions to be inserted are 4972 found in ASSERTS_FOR[i]. */ 4973 4974 static void 4975 process_assert_insertions (void) 4976 { 4977 unsigned i; 4978 bitmap_iterator bi; 4979 bool update_edges_p = false; 4980 int num_asserts = 0; 4981 4982 if (dump_file && (dump_flags & TDF_DETAILS)) 4983 dump_all_asserts (dump_file); 4984 4985 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi) 4986 { 4987 assert_locus_t loc = asserts_for[i]; 4988 gcc_assert (loc); 4989 4990 while (loc) 4991 { 4992 assert_locus_t next = loc->next; 4993 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc); 4994 free (loc); 4995 loc = next; 4996 num_asserts++; 4997 } 4998 } 4999 5000 if (update_edges_p) 5001 gsi_commit_edge_inserts (); 5002 5003 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted", 5004 num_asserts); 5005 } 5006 5007 5008 /* Traverse the flowgraph looking for conditional jumps to insert range 5009 expressions. These range expressions are meant to provide information 5010 to optimizations that need to reason in terms of value ranges. They 5011 will not be expanded into RTL. For instance, given: 5012 5013 x = ... 5014 y = ... 5015 if (x < y) 5016 y = x - 2; 5017 else 5018 x = y + 3; 5019 5020 this pass will transform the code into: 5021 5022 x = ... 5023 y = ... 5024 if (x < y) 5025 { 5026 x = ASSERT_EXPR <x, x < y> 5027 y = x - 2 5028 } 5029 else 5030 { 5031 y = ASSERT_EXPR <y, x <= y> 5032 x = y + 3 5033 } 5034 5035 The idea is that once copy and constant propagation have run, other 5036 optimizations will be able to determine what ranges of values can 'x' 5037 take in different paths of the code, simply by checking the reaching 5038 definition of 'x'. */ 5039 5040 static void 5041 insert_range_assertions (void) 5042 { 5043 need_assert_for = BITMAP_ALLOC (NULL); 5044 asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names); 5045 5046 calculate_dominance_info (CDI_DOMINATORS); 5047 5048 if (find_assert_locations ()) 5049 { 5050 process_assert_insertions (); 5051 update_ssa (TODO_update_ssa_no_phi); 5052 } 5053 5054 if (dump_file && (dump_flags & TDF_DETAILS)) 5055 { 5056 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n"); 5057 dump_function_to_file (current_function_decl, dump_file, dump_flags); 5058 } 5059 5060 free (asserts_for); 5061 BITMAP_FREE (need_assert_for); 5062 } 5063 5064 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays 5065 and "struct" hacks. If VRP can determine that the 5066 array subscript is a constant, check if it is outside valid 5067 range. If the array subscript is a RANGE, warn if it is 5068 non-overlapping with valid range. 5069 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */ 5070 5071 static void 5072 check_array_ref (location_t location, tree ref, bool ignore_off_by_one) 5073 { 5074 value_range_t* vr = NULL; 5075 tree low_sub, up_sub; 5076 tree low_bound, up_bound = array_ref_up_bound (ref); 5077 5078 low_sub = up_sub = TREE_OPERAND (ref, 1); 5079 5080 if (!up_bound || TREE_NO_WARNING (ref) 5081 || TREE_CODE (up_bound) != INTEGER_CST 5082 /* Can not check flexible arrays. */ 5083 || (TYPE_SIZE (TREE_TYPE (ref)) == NULL_TREE 5084 && TYPE_DOMAIN (TREE_TYPE (ref)) != NULL_TREE 5085 && TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (ref))) == NULL_TREE) 5086 /* Accesses after the end of arrays of size 0 (gcc 5087 extension) and 1 are likely intentional ("struct 5088 hack"). */ 5089 || compare_tree_int (up_bound, 1) <= 0) 5090 return; 5091 5092 low_bound = array_ref_low_bound (ref); 5093 5094 if (TREE_CODE (low_sub) == SSA_NAME) 5095 { 5096 vr = get_value_range (low_sub); 5097 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE) 5098 { 5099 low_sub = vr->type == VR_RANGE ? vr->max : vr->min; 5100 up_sub = vr->type == VR_RANGE ? vr->min : vr->max; 5101 } 5102 } 5103 5104 if (vr && vr->type == VR_ANTI_RANGE) 5105 { 5106 if (TREE_CODE (up_sub) == INTEGER_CST 5107 && tree_int_cst_lt (up_bound, up_sub) 5108 && TREE_CODE (low_sub) == INTEGER_CST 5109 && tree_int_cst_lt (low_sub, low_bound)) 5110 { 5111 warning_at (location, OPT_Warray_bounds, 5112 "array subscript is outside array bounds"); 5113 TREE_NO_WARNING (ref) = 1; 5114 } 5115 } 5116 else if (TREE_CODE (up_sub) == INTEGER_CST 5117 && tree_int_cst_lt (up_bound, up_sub) 5118 && !tree_int_cst_equal (up_bound, up_sub) 5119 && (!ignore_off_by_one 5120 || !tree_int_cst_equal (int_const_binop (PLUS_EXPR, 5121 up_bound, 5122 integer_one_node, 5123 0), 5124 up_sub))) 5125 { 5126 warning_at (location, OPT_Warray_bounds, 5127 "array subscript is above array bounds"); 5128 TREE_NO_WARNING (ref) = 1; 5129 } 5130 else if (TREE_CODE (low_sub) == INTEGER_CST 5131 && tree_int_cst_lt (low_sub, low_bound)) 5132 { 5133 warning_at (location, OPT_Warray_bounds, 5134 "array subscript is below array bounds"); 5135 TREE_NO_WARNING (ref) = 1; 5136 } 5137 } 5138 5139 /* Searches if the expr T, located at LOCATION computes 5140 address of an ARRAY_REF, and call check_array_ref on it. */ 5141 5142 static void 5143 search_for_addr_array (tree t, location_t location) 5144 { 5145 while (TREE_CODE (t) == SSA_NAME) 5146 { 5147 gimple g = SSA_NAME_DEF_STMT (t); 5148 5149 if (gimple_code (g) != GIMPLE_ASSIGN) 5150 return; 5151 5152 if (get_gimple_rhs_class (gimple_assign_rhs_code (g)) 5153 != GIMPLE_SINGLE_RHS) 5154 return; 5155 5156 t = gimple_assign_rhs1 (g); 5157 } 5158 5159 5160 /* We are only interested in addresses of ARRAY_REF's. */ 5161 if (TREE_CODE (t) != ADDR_EXPR) 5162 return; 5163 5164 /* Check each ARRAY_REFs in the reference chain. */ 5165 do 5166 { 5167 if (TREE_CODE (t) == ARRAY_REF) 5168 check_array_ref (location, t, true /*ignore_off_by_one*/); 5169 5170 t = TREE_OPERAND (t, 0); 5171 } 5172 while (handled_component_p (t)); 5173 } 5174 5175 /* walk_tree() callback that checks if *TP is 5176 an ARRAY_REF inside an ADDR_EXPR (in which an array 5177 subscript one outside the valid range is allowed). Call 5178 check_array_ref for each ARRAY_REF found. The location is 5179 passed in DATA. */ 5180 5181 static tree 5182 check_array_bounds (tree *tp, int *walk_subtree, void *data) 5183 { 5184 tree t = *tp; 5185 struct walk_stmt_info *wi = (struct walk_stmt_info *) data; 5186 location_t location; 5187 5188 if (EXPR_HAS_LOCATION (t)) 5189 location = EXPR_LOCATION (t); 5190 else 5191 { 5192 location_t *locp = (location_t *) wi->info; 5193 location = *locp; 5194 } 5195 5196 *walk_subtree = TRUE; 5197 5198 if (TREE_CODE (t) == ARRAY_REF) 5199 check_array_ref (location, t, false /*ignore_off_by_one*/); 5200 5201 if (TREE_CODE (t) == INDIRECT_REF 5202 || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0))) 5203 search_for_addr_array (TREE_OPERAND (t, 0), location); 5204 5205 if (TREE_CODE (t) == ADDR_EXPR) 5206 *walk_subtree = FALSE; 5207 5208 return NULL_TREE; 5209 } 5210 5211 /* Walk over all statements of all reachable BBs and call check_array_bounds 5212 on them. */ 5213 5214 static void 5215 check_all_array_refs (void) 5216 { 5217 basic_block bb; 5218 gimple_stmt_iterator si; 5219 5220 FOR_EACH_BB (bb) 5221 { 5222 edge_iterator ei; 5223 edge e; 5224 bool executable = false; 5225 5226 /* Skip blocks that were found to be unreachable. */ 5227 FOR_EACH_EDGE (e, ei, bb->preds) 5228 executable |= !!(e->flags & EDGE_EXECUTABLE); 5229 if (!executable) 5230 continue; 5231 5232 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) 5233 { 5234 gimple stmt = gsi_stmt (si); 5235 struct walk_stmt_info wi; 5236 if (!gimple_has_location (stmt)) 5237 continue; 5238 5239 if (is_gimple_call (stmt)) 5240 { 5241 size_t i; 5242 size_t n = gimple_call_num_args (stmt); 5243 for (i = 0; i < n; i++) 5244 { 5245 tree arg = gimple_call_arg (stmt, i); 5246 search_for_addr_array (arg, gimple_location (stmt)); 5247 } 5248 } 5249 else 5250 { 5251 memset (&wi, 0, sizeof (wi)); 5252 wi.info = CONST_CAST (void *, (const void *) 5253 gimple_location_ptr (stmt)); 5254 5255 walk_gimple_op (gsi_stmt (si), 5256 check_array_bounds, 5257 &wi); 5258 } 5259 } 5260 } 5261 } 5262 5263 /* Convert range assertion expressions into the implied copies and 5264 copy propagate away the copies. Doing the trivial copy propagation 5265 here avoids the need to run the full copy propagation pass after 5266 VRP. 5267 5268 FIXME, this will eventually lead to copy propagation removing the 5269 names that had useful range information attached to them. For 5270 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>, 5271 then N_i will have the range [3, +INF]. 5272 5273 However, by converting the assertion into the implied copy 5274 operation N_i = N_j, we will then copy-propagate N_j into the uses 5275 of N_i and lose the range information. We may want to hold on to 5276 ASSERT_EXPRs a little while longer as the ranges could be used in 5277 things like jump threading. 5278 5279 The problem with keeping ASSERT_EXPRs around is that passes after 5280 VRP need to handle them appropriately. 5281 5282 Another approach would be to make the range information a first 5283 class property of the SSA_NAME so that it can be queried from 5284 any pass. This is made somewhat more complex by the need for 5285 multiple ranges to be associated with one SSA_NAME. */ 5286 5287 static void 5288 remove_range_assertions (void) 5289 { 5290 basic_block bb; 5291 gimple_stmt_iterator si; 5292 5293 /* Note that the BSI iterator bump happens at the bottom of the 5294 loop and no bump is necessary if we're removing the statement 5295 referenced by the current BSI. */ 5296 FOR_EACH_BB (bb) 5297 for (si = gsi_start_bb (bb); !gsi_end_p (si);) 5298 { 5299 gimple stmt = gsi_stmt (si); 5300 gimple use_stmt; 5301 5302 if (is_gimple_assign (stmt) 5303 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR) 5304 { 5305 tree rhs = gimple_assign_rhs1 (stmt); 5306 tree var; 5307 tree cond = fold (ASSERT_EXPR_COND (rhs)); 5308 use_operand_p use_p; 5309 imm_use_iterator iter; 5310 5311 gcc_assert (cond != boolean_false_node); 5312 5313 /* Propagate the RHS into every use of the LHS. */ 5314 var = ASSERT_EXPR_VAR (rhs); 5315 FOR_EACH_IMM_USE_STMT (use_stmt, iter, 5316 gimple_assign_lhs (stmt)) 5317 FOR_EACH_IMM_USE_ON_STMT (use_p, iter) 5318 { 5319 SET_USE (use_p, var); 5320 gcc_assert (TREE_CODE (var) == SSA_NAME); 5321 } 5322 5323 /* And finally, remove the copy, it is not needed. */ 5324 gsi_remove (&si, true); 5325 release_defs (stmt); 5326 } 5327 else 5328 gsi_next (&si); 5329 } 5330 } 5331 5332 5333 /* Return true if STMT is interesting for VRP. */ 5334 5335 static bool 5336 stmt_interesting_for_vrp (gimple stmt) 5337 { 5338 if (gimple_code (stmt) == GIMPLE_PHI 5339 && is_gimple_reg (gimple_phi_result (stmt)) 5340 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_phi_result (stmt))) 5341 || POINTER_TYPE_P (TREE_TYPE (gimple_phi_result (stmt))))) 5342 return true; 5343 else if (is_gimple_assign (stmt) || is_gimple_call (stmt)) 5344 { 5345 tree lhs = gimple_get_lhs (stmt); 5346 5347 /* In general, assignments with virtual operands are not useful 5348 for deriving ranges, with the obvious exception of calls to 5349 builtin functions. */ 5350 if (lhs && TREE_CODE (lhs) == SSA_NAME 5351 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs)) 5352 || POINTER_TYPE_P (TREE_TYPE (lhs))) 5353 && ((is_gimple_call (stmt) 5354 && gimple_call_fndecl (stmt) != NULL_TREE 5355 && DECL_IS_BUILTIN (gimple_call_fndecl (stmt))) 5356 || !gimple_vuse (stmt))) 5357 return true; 5358 } 5359 else if (gimple_code (stmt) == GIMPLE_COND 5360 || gimple_code (stmt) == GIMPLE_SWITCH) 5361 return true; 5362 5363 return false; 5364 } 5365 5366 5367 /* Initialize local data structures for VRP. */ 5368 5369 static void 5370 vrp_initialize (void) 5371 { 5372 basic_block bb; 5373 5374 vr_value = XCNEWVEC (value_range_t *, num_ssa_names); 5375 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names); 5376 5377 FOR_EACH_BB (bb) 5378 { 5379 gimple_stmt_iterator si; 5380 5381 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) 5382 { 5383 gimple phi = gsi_stmt (si); 5384 if (!stmt_interesting_for_vrp (phi)) 5385 { 5386 tree lhs = PHI_RESULT (phi); 5387 set_value_range_to_varying (get_value_range (lhs)); 5388 prop_set_simulate_again (phi, false); 5389 } 5390 else 5391 prop_set_simulate_again (phi, true); 5392 } 5393 5394 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) 5395 { 5396 gimple stmt = gsi_stmt (si); 5397 5398 /* If the statement is a control insn, then we do not 5399 want to avoid simulating the statement once. Failure 5400 to do so means that those edges will never get added. */ 5401 if (stmt_ends_bb_p (stmt)) 5402 prop_set_simulate_again (stmt, true); 5403 else if (!stmt_interesting_for_vrp (stmt)) 5404 { 5405 ssa_op_iter i; 5406 tree def; 5407 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF) 5408 set_value_range_to_varying (get_value_range (def)); 5409 prop_set_simulate_again (stmt, false); 5410 } 5411 else 5412 prop_set_simulate_again (stmt, true); 5413 } 5414 } 5415 } 5416 5417 5418 /* Visit assignment STMT. If it produces an interesting range, record 5419 the SSA name in *OUTPUT_P. */ 5420 5421 static enum ssa_prop_result 5422 vrp_visit_assignment_or_call (gimple stmt, tree *output_p) 5423 { 5424 tree def, lhs; 5425 ssa_op_iter iter; 5426 enum gimple_code code = gimple_code (stmt); 5427 lhs = gimple_get_lhs (stmt); 5428 5429 /* We only keep track of ranges in integral and pointer types. */ 5430 if (TREE_CODE (lhs) == SSA_NAME 5431 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs)) 5432 /* It is valid to have NULL MIN/MAX values on a type. See 5433 build_range_type. */ 5434 && TYPE_MIN_VALUE (TREE_TYPE (lhs)) 5435 && TYPE_MAX_VALUE (TREE_TYPE (lhs))) 5436 || POINTER_TYPE_P (TREE_TYPE (lhs)))) 5437 { 5438 value_range_t new_vr = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; 5439 5440 if (code == GIMPLE_CALL) 5441 extract_range_basic (&new_vr, stmt); 5442 else 5443 extract_range_from_assignment (&new_vr, stmt); 5444 5445 if (update_value_range (lhs, &new_vr)) 5446 { 5447 *output_p = lhs; 5448 5449 if (dump_file && (dump_flags & TDF_DETAILS)) 5450 { 5451 fprintf (dump_file, "Found new range for "); 5452 print_generic_expr (dump_file, lhs, 0); 5453 fprintf (dump_file, ": "); 5454 dump_value_range (dump_file, &new_vr); 5455 fprintf (dump_file, "\n\n"); 5456 } 5457 5458 if (new_vr.type == VR_VARYING) 5459 return SSA_PROP_VARYING; 5460 5461 return SSA_PROP_INTERESTING; 5462 } 5463 5464 return SSA_PROP_NOT_INTERESTING; 5465 } 5466 5467 /* Every other statement produces no useful ranges. */ 5468 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF) 5469 set_value_range_to_varying (get_value_range (def)); 5470 5471 return SSA_PROP_VARYING; 5472 } 5473 5474 /* Helper that gets the value range of the SSA_NAME with version I 5475 or a symbolic range containing the SSA_NAME only if the value range 5476 is varying or undefined. */ 5477 5478 static inline value_range_t 5479 get_vr_for_comparison (int i) 5480 { 5481 value_range_t vr = *(vr_value[i]); 5482 5483 /* If name N_i does not have a valid range, use N_i as its own 5484 range. This allows us to compare against names that may 5485 have N_i in their ranges. */ 5486 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED) 5487 { 5488 vr.type = VR_RANGE; 5489 vr.min = ssa_name (i); 5490 vr.max = ssa_name (i); 5491 } 5492 5493 return vr; 5494 } 5495 5496 /* Compare all the value ranges for names equivalent to VAR with VAL 5497 using comparison code COMP. Return the same value returned by 5498 compare_range_with_value, including the setting of 5499 *STRICT_OVERFLOW_P. */ 5500 5501 static tree 5502 compare_name_with_value (enum tree_code comp, tree var, tree val, 5503 bool *strict_overflow_p) 5504 { 5505 bitmap_iterator bi; 5506 unsigned i; 5507 bitmap e; 5508 tree retval, t; 5509 int used_strict_overflow; 5510 bool sop; 5511 value_range_t equiv_vr; 5512 5513 /* Get the set of equivalences for VAR. */ 5514 e = get_value_range (var)->equiv; 5515 5516 /* Start at -1. Set it to 0 if we do a comparison without relying 5517 on overflow, or 1 if all comparisons rely on overflow. */ 5518 used_strict_overflow = -1; 5519 5520 /* Compare vars' value range with val. */ 5521 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var)); 5522 sop = false; 5523 retval = compare_range_with_value (comp, &equiv_vr, val, &sop); 5524 if (retval) 5525 used_strict_overflow = sop ? 1 : 0; 5526 5527 /* If the equiv set is empty we have done all work we need to do. */ 5528 if (e == NULL) 5529 { 5530 if (retval 5531 && used_strict_overflow > 0) 5532 *strict_overflow_p = true; 5533 return retval; 5534 } 5535 5536 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi) 5537 { 5538 equiv_vr = get_vr_for_comparison (i); 5539 sop = false; 5540 t = compare_range_with_value (comp, &equiv_vr, val, &sop); 5541 if (t) 5542 { 5543 /* If we get different answers from different members 5544 of the equivalence set this check must be in a dead 5545 code region. Folding it to a trap representation 5546 would be correct here. For now just return don't-know. */ 5547 if (retval != NULL 5548 && t != retval) 5549 { 5550 retval = NULL_TREE; 5551 break; 5552 } 5553 retval = t; 5554 5555 if (!sop) 5556 used_strict_overflow = 0; 5557 else if (used_strict_overflow < 0) 5558 used_strict_overflow = 1; 5559 } 5560 } 5561 5562 if (retval 5563 && used_strict_overflow > 0) 5564 *strict_overflow_p = true; 5565 5566 return retval; 5567 } 5568 5569 5570 /* Given a comparison code COMP and names N1 and N2, compare all the 5571 ranges equivalent to N1 against all the ranges equivalent to N2 5572 to determine the value of N1 COMP N2. Return the same value 5573 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate 5574 whether we relied on an overflow infinity in the comparison. */ 5575 5576 5577 static tree 5578 compare_names (enum tree_code comp, tree n1, tree n2, 5579 bool *strict_overflow_p) 5580 { 5581 tree t, retval; 5582 bitmap e1, e2; 5583 bitmap_iterator bi1, bi2; 5584 unsigned i1, i2; 5585 int used_strict_overflow; 5586 static bitmap_obstack *s_obstack = NULL; 5587 static bitmap s_e1 = NULL, s_e2 = NULL; 5588 5589 /* Compare the ranges of every name equivalent to N1 against the 5590 ranges of every name equivalent to N2. */ 5591 e1 = get_value_range (n1)->equiv; 5592 e2 = get_value_range (n2)->equiv; 5593 5594 /* Use the fake bitmaps if e1 or e2 are not available. */ 5595 if (s_obstack == NULL) 5596 { 5597 s_obstack = XNEW (bitmap_obstack); 5598 bitmap_obstack_initialize (s_obstack); 5599 s_e1 = BITMAP_ALLOC (s_obstack); 5600 s_e2 = BITMAP_ALLOC (s_obstack); 5601 } 5602 if (e1 == NULL) 5603 e1 = s_e1; 5604 if (e2 == NULL) 5605 e2 = s_e2; 5606 5607 /* Add N1 and N2 to their own set of equivalences to avoid 5608 duplicating the body of the loop just to check N1 and N2 5609 ranges. */ 5610 bitmap_set_bit (e1, SSA_NAME_VERSION (n1)); 5611 bitmap_set_bit (e2, SSA_NAME_VERSION (n2)); 5612 5613 /* If the equivalence sets have a common intersection, then the two 5614 names can be compared without checking their ranges. */ 5615 if (bitmap_intersect_p (e1, e2)) 5616 { 5617 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); 5618 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); 5619 5620 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR) 5621 ? boolean_true_node 5622 : boolean_false_node; 5623 } 5624 5625 /* Start at -1. Set it to 0 if we do a comparison without relying 5626 on overflow, or 1 if all comparisons rely on overflow. */ 5627 used_strict_overflow = -1; 5628 5629 /* Otherwise, compare all the equivalent ranges. First, add N1 and 5630 N2 to their own set of equivalences to avoid duplicating the body 5631 of the loop just to check N1 and N2 ranges. */ 5632 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1) 5633 { 5634 value_range_t vr1 = get_vr_for_comparison (i1); 5635 5636 t = retval = NULL_TREE; 5637 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2) 5638 { 5639 bool sop = false; 5640 5641 value_range_t vr2 = get_vr_for_comparison (i2); 5642 5643 t = compare_ranges (comp, &vr1, &vr2, &sop); 5644 if (t) 5645 { 5646 /* If we get different answers from different members 5647 of the equivalence set this check must be in a dead 5648 code region. Folding it to a trap representation 5649 would be correct here. For now just return don't-know. */ 5650 if (retval != NULL 5651 && t != retval) 5652 { 5653 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); 5654 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); 5655 return NULL_TREE; 5656 } 5657 retval = t; 5658 5659 if (!sop) 5660 used_strict_overflow = 0; 5661 else if (used_strict_overflow < 0) 5662 used_strict_overflow = 1; 5663 } 5664 } 5665 5666 if (retval) 5667 { 5668 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); 5669 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); 5670 if (used_strict_overflow > 0) 5671 *strict_overflow_p = true; 5672 return retval; 5673 } 5674 } 5675 5676 /* None of the equivalent ranges are useful in computing this 5677 comparison. */ 5678 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1)); 5679 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2)); 5680 return NULL_TREE; 5681 } 5682 5683 /* Helper function for vrp_evaluate_conditional_warnv. */ 5684 5685 static tree 5686 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code, 5687 tree op0, tree op1, 5688 bool * strict_overflow_p) 5689 { 5690 value_range_t *vr0, *vr1; 5691 5692 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL; 5693 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL; 5694 5695 if (vr0 && vr1) 5696 return compare_ranges (code, vr0, vr1, strict_overflow_p); 5697 else if (vr0 && vr1 == NULL) 5698 return compare_range_with_value (code, vr0, op1, strict_overflow_p); 5699 else if (vr0 == NULL && vr1) 5700 return (compare_range_with_value 5701 (swap_tree_comparison (code), vr1, op0, strict_overflow_p)); 5702 return NULL; 5703 } 5704 5705 /* Helper function for vrp_evaluate_conditional_warnv. */ 5706 5707 static tree 5708 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0, 5709 tree op1, bool use_equiv_p, 5710 bool *strict_overflow_p, bool *only_ranges) 5711 { 5712 tree ret; 5713 if (only_ranges) 5714 *only_ranges = true; 5715 5716 /* We only deal with integral and pointer types. */ 5717 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0)) 5718 && !POINTER_TYPE_P (TREE_TYPE (op0))) 5719 return NULL_TREE; 5720 5721 if (use_equiv_p) 5722 { 5723 if (only_ranges 5724 && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges 5725 (code, op0, op1, strict_overflow_p))) 5726 return ret; 5727 *only_ranges = false; 5728 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME) 5729 return compare_names (code, op0, op1, strict_overflow_p); 5730 else if (TREE_CODE (op0) == SSA_NAME) 5731 return compare_name_with_value (code, op0, op1, strict_overflow_p); 5732 else if (TREE_CODE (op1) == SSA_NAME) 5733 return (compare_name_with_value 5734 (swap_tree_comparison (code), op1, op0, strict_overflow_p)); 5735 } 5736 else 5737 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1, 5738 strict_overflow_p); 5739 return NULL_TREE; 5740 } 5741 5742 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range 5743 information. Return NULL if the conditional can not be evaluated. 5744 The ranges of all the names equivalent with the operands in COND 5745 will be used when trying to compute the value. If the result is 5746 based on undefined signed overflow, issue a warning if 5747 appropriate. */ 5748 5749 static tree 5750 vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt) 5751 { 5752 bool sop; 5753 tree ret; 5754 bool only_ranges; 5755 5756 /* Some passes and foldings leak constants with overflow flag set 5757 into the IL. Avoid doing wrong things with these and bail out. */ 5758 if ((TREE_CODE (op0) == INTEGER_CST 5759 && TREE_OVERFLOW (op0)) 5760 || (TREE_CODE (op1) == INTEGER_CST 5761 && TREE_OVERFLOW (op1))) 5762 return NULL_TREE; 5763 5764 sop = false; 5765 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop, 5766 &only_ranges); 5767 5768 if (ret && sop) 5769 { 5770 enum warn_strict_overflow_code wc; 5771 const char* warnmsg; 5772 5773 if (is_gimple_min_invariant (ret)) 5774 { 5775 wc = WARN_STRICT_OVERFLOW_CONDITIONAL; 5776 warnmsg = G_("assuming signed overflow does not occur when " 5777 "simplifying conditional to constant"); 5778 } 5779 else 5780 { 5781 wc = WARN_STRICT_OVERFLOW_COMPARISON; 5782 warnmsg = G_("assuming signed overflow does not occur when " 5783 "simplifying conditional"); 5784 } 5785 5786 if (issue_strict_overflow_warning (wc)) 5787 { 5788 location_t location; 5789 5790 if (!gimple_has_location (stmt)) 5791 location = input_location; 5792 else 5793 location = gimple_location (stmt); 5794 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg); 5795 } 5796 } 5797 5798 if (warn_type_limits 5799 && ret && only_ranges 5800 && TREE_CODE_CLASS (code) == tcc_comparison 5801 && TREE_CODE (op0) == SSA_NAME) 5802 { 5803 /* If the comparison is being folded and the operand on the LHS 5804 is being compared against a constant value that is outside of 5805 the natural range of OP0's type, then the predicate will 5806 always fold regardless of the value of OP0. If -Wtype-limits 5807 was specified, emit a warning. */ 5808 tree type = TREE_TYPE (op0); 5809 value_range_t *vr0 = get_value_range (op0); 5810 5811 if (vr0->type != VR_VARYING 5812 && INTEGRAL_TYPE_P (type) 5813 && vrp_val_is_min (vr0->min) 5814 && vrp_val_is_max (vr0->max) 5815 && is_gimple_min_invariant (op1)) 5816 { 5817 location_t location; 5818 5819 if (!gimple_has_location (stmt)) 5820 location = input_location; 5821 else 5822 location = gimple_location (stmt); 5823 5824 warning_at (location, OPT_Wtype_limits, 5825 integer_zerop (ret) 5826 ? G_("comparison always false " 5827 "due to limited range of data type") 5828 : G_("comparison always true " 5829 "due to limited range of data type")); 5830 } 5831 } 5832 5833 return ret; 5834 } 5835 5836 5837 /* Visit conditional statement STMT. If we can determine which edge 5838 will be taken out of STMT's basic block, record it in 5839 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return 5840 SSA_PROP_VARYING. */ 5841 5842 static enum ssa_prop_result 5843 vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p) 5844 { 5845 tree val; 5846 bool sop; 5847 5848 *taken_edge_p = NULL; 5849 5850 if (dump_file && (dump_flags & TDF_DETAILS)) 5851 { 5852 tree use; 5853 ssa_op_iter i; 5854 5855 fprintf (dump_file, "\nVisiting conditional with predicate: "); 5856 print_gimple_stmt (dump_file, stmt, 0, 0); 5857 fprintf (dump_file, "\nWith known ranges\n"); 5858 5859 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE) 5860 { 5861 fprintf (dump_file, "\t"); 5862 print_generic_expr (dump_file, use, 0); 5863 fprintf (dump_file, ": "); 5864 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]); 5865 } 5866 5867 fprintf (dump_file, "\n"); 5868 } 5869 5870 /* Compute the value of the predicate COND by checking the known 5871 ranges of each of its operands. 5872 5873 Note that we cannot evaluate all the equivalent ranges here 5874 because those ranges may not yet be final and with the current 5875 propagation strategy, we cannot determine when the value ranges 5876 of the names in the equivalence set have changed. 5877 5878 For instance, given the following code fragment 5879 5880 i_5 = PHI <8, i_13> 5881 ... 5882 i_14 = ASSERT_EXPR <i_5, i_5 != 0> 5883 if (i_14 == 1) 5884 ... 5885 5886 Assume that on the first visit to i_14, i_5 has the temporary 5887 range [8, 8] because the second argument to the PHI function is 5888 not yet executable. We derive the range ~[0, 0] for i_14 and the 5889 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for 5890 the first time, since i_14 is equivalent to the range [8, 8], we 5891 determine that the predicate is always false. 5892 5893 On the next round of propagation, i_13 is determined to be 5894 VARYING, which causes i_5 to drop down to VARYING. So, another 5895 visit to i_14 is scheduled. In this second visit, we compute the 5896 exact same range and equivalence set for i_14, namely ~[0, 0] and 5897 { i_5 }. But we did not have the previous range for i_5 5898 registered, so vrp_visit_assignment thinks that the range for 5899 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)' 5900 is not visited again, which stops propagation from visiting 5901 statements in the THEN clause of that if(). 5902 5903 To properly fix this we would need to keep the previous range 5904 value for the names in the equivalence set. This way we would've 5905 discovered that from one visit to the other i_5 changed from 5906 range [8, 8] to VR_VARYING. 5907 5908 However, fixing this apparent limitation may not be worth the 5909 additional checking. Testing on several code bases (GCC, DLV, 5910 MICO, TRAMP3D and SPEC2000) showed that doing this results in 5911 4 more predicates folded in SPEC. */ 5912 sop = false; 5913 5914 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt), 5915 gimple_cond_lhs (stmt), 5916 gimple_cond_rhs (stmt), 5917 false, &sop, NULL); 5918 if (val) 5919 { 5920 if (!sop) 5921 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val); 5922 else 5923 { 5924 if (dump_file && (dump_flags & TDF_DETAILS)) 5925 fprintf (dump_file, 5926 "\nIgnoring predicate evaluation because " 5927 "it assumes that signed overflow is undefined"); 5928 val = NULL_TREE; 5929 } 5930 } 5931 5932 if (dump_file && (dump_flags & TDF_DETAILS)) 5933 { 5934 fprintf (dump_file, "\nPredicate evaluates to: "); 5935 if (val == NULL_TREE) 5936 fprintf (dump_file, "DON'T KNOW\n"); 5937 else 5938 print_generic_stmt (dump_file, val, 0); 5939 } 5940 5941 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING; 5942 } 5943 5944 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL 5945 that includes the value VAL. The search is restricted to the range 5946 [START_IDX, n - 1] where n is the size of VEC. 5947 5948 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is 5949 returned. 5950 5951 If there is no CASE_LABEL for VAL and there is one that is larger than VAL, 5952 it is placed in IDX and false is returned. 5953 5954 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is 5955 returned. */ 5956 5957 static bool 5958 find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx) 5959 { 5960 size_t n = gimple_switch_num_labels (stmt); 5961 size_t low, high; 5962 5963 /* Find case label for minimum of the value range or the next one. 5964 At each iteration we are searching in [low, high - 1]. */ 5965 5966 for (low = start_idx, high = n; high != low; ) 5967 { 5968 tree t; 5969 int cmp; 5970 /* Note that i != high, so we never ask for n. */ 5971 size_t i = (high + low) / 2; 5972 t = gimple_switch_label (stmt, i); 5973 5974 /* Cache the result of comparing CASE_LOW and val. */ 5975 cmp = tree_int_cst_compare (CASE_LOW (t), val); 5976 5977 if (cmp == 0) 5978 { 5979 /* Ranges cannot be empty. */ 5980 *idx = i; 5981 return true; 5982 } 5983 else if (cmp > 0) 5984 high = i; 5985 else 5986 { 5987 low = i + 1; 5988 if (CASE_HIGH (t) != NULL 5989 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0) 5990 { 5991 *idx = i; 5992 return true; 5993 } 5994 } 5995 } 5996 5997 *idx = high; 5998 return false; 5999 } 6000 6001 /* Searches the case label vector VEC for the range of CASE_LABELs that is used 6002 for values between MIN and MAX. The first index is placed in MIN_IDX. The 6003 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty 6004 then MAX_IDX < MIN_IDX. 6005 Returns true if the default label is not needed. */ 6006 6007 static bool 6008 find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx, 6009 size_t *max_idx) 6010 { 6011 size_t i, j; 6012 bool min_take_default = !find_case_label_index (stmt, 1, min, &i); 6013 bool max_take_default = !find_case_label_index (stmt, i, max, &j); 6014 6015 if (i == j 6016 && min_take_default 6017 && max_take_default) 6018 { 6019 /* Only the default case label reached. 6020 Return an empty range. */ 6021 *min_idx = 1; 6022 *max_idx = 0; 6023 return false; 6024 } 6025 else 6026 { 6027 bool take_default = min_take_default || max_take_default; 6028 tree low, high; 6029 size_t k; 6030 6031 if (max_take_default) 6032 j--; 6033 6034 /* If the case label range is continuous, we do not need 6035 the default case label. Verify that. */ 6036 high = CASE_LOW (gimple_switch_label (stmt, i)); 6037 if (CASE_HIGH (gimple_switch_label (stmt, i))) 6038 high = CASE_HIGH (gimple_switch_label (stmt, i)); 6039 for (k = i + 1; k <= j; ++k) 6040 { 6041 low = CASE_LOW (gimple_switch_label (stmt, k)); 6042 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high, 0))) 6043 { 6044 take_default = true; 6045 break; 6046 } 6047 high = low; 6048 if (CASE_HIGH (gimple_switch_label (stmt, k))) 6049 high = CASE_HIGH (gimple_switch_label (stmt, k)); 6050 } 6051 6052 *min_idx = i; 6053 *max_idx = j; 6054 return !take_default; 6055 } 6056 } 6057 6058 /* Visit switch statement STMT. If we can determine which edge 6059 will be taken out of STMT's basic block, record it in 6060 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return 6061 SSA_PROP_VARYING. */ 6062 6063 static enum ssa_prop_result 6064 vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p) 6065 { 6066 tree op, val; 6067 value_range_t *vr; 6068 size_t i = 0, j = 0; 6069 bool take_default; 6070 6071 *taken_edge_p = NULL; 6072 op = gimple_switch_index (stmt); 6073 if (TREE_CODE (op) != SSA_NAME) 6074 return SSA_PROP_VARYING; 6075 6076 vr = get_value_range (op); 6077 if (dump_file && (dump_flags & TDF_DETAILS)) 6078 { 6079 fprintf (dump_file, "\nVisiting switch expression with operand "); 6080 print_generic_expr (dump_file, op, 0); 6081 fprintf (dump_file, " with known range "); 6082 dump_value_range (dump_file, vr); 6083 fprintf (dump_file, "\n"); 6084 } 6085 6086 if (vr->type != VR_RANGE 6087 || symbolic_range_p (vr)) 6088 return SSA_PROP_VARYING; 6089 6090 /* Find the single edge that is taken from the switch expression. */ 6091 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j); 6092 6093 /* Check if the range spans no CASE_LABEL. If so, we only reach the default 6094 label */ 6095 if (j < i) 6096 { 6097 gcc_assert (take_default); 6098 val = gimple_switch_default_label (stmt); 6099 } 6100 else 6101 { 6102 /* Check if labels with index i to j and maybe the default label 6103 are all reaching the same label. */ 6104 6105 val = gimple_switch_label (stmt, i); 6106 if (take_default 6107 && CASE_LABEL (gimple_switch_default_label (stmt)) 6108 != CASE_LABEL (val)) 6109 { 6110 if (dump_file && (dump_flags & TDF_DETAILS)) 6111 fprintf (dump_file, " not a single destination for this " 6112 "range\n"); 6113 return SSA_PROP_VARYING; 6114 } 6115 for (++i; i <= j; ++i) 6116 { 6117 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val)) 6118 { 6119 if (dump_file && (dump_flags & TDF_DETAILS)) 6120 fprintf (dump_file, " not a single destination for this " 6121 "range\n"); 6122 return SSA_PROP_VARYING; 6123 } 6124 } 6125 } 6126 6127 *taken_edge_p = find_edge (gimple_bb (stmt), 6128 label_to_block (CASE_LABEL (val))); 6129 6130 if (dump_file && (dump_flags & TDF_DETAILS)) 6131 { 6132 fprintf (dump_file, " will take edge to "); 6133 print_generic_stmt (dump_file, CASE_LABEL (val), 0); 6134 } 6135 6136 return SSA_PROP_INTERESTING; 6137 } 6138 6139 6140 /* Evaluate statement STMT. If the statement produces a useful range, 6141 return SSA_PROP_INTERESTING and record the SSA name with the 6142 interesting range into *OUTPUT_P. 6143 6144 If STMT is a conditional branch and we can determine its truth 6145 value, the taken edge is recorded in *TAKEN_EDGE_P. 6146 6147 If STMT produces a varying value, return SSA_PROP_VARYING. */ 6148 6149 static enum ssa_prop_result 6150 vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p) 6151 { 6152 tree def; 6153 ssa_op_iter iter; 6154 6155 if (dump_file && (dump_flags & TDF_DETAILS)) 6156 { 6157 fprintf (dump_file, "\nVisiting statement:\n"); 6158 print_gimple_stmt (dump_file, stmt, 0, dump_flags); 6159 fprintf (dump_file, "\n"); 6160 } 6161 6162 if (!stmt_interesting_for_vrp (stmt)) 6163 gcc_assert (stmt_ends_bb_p (stmt)); 6164 else if (is_gimple_assign (stmt) || is_gimple_call (stmt)) 6165 { 6166 /* In general, assignments with virtual operands are not useful 6167 for deriving ranges, with the obvious exception of calls to 6168 builtin functions. */ 6169 6170 if ((is_gimple_call (stmt) 6171 && gimple_call_fndecl (stmt) != NULL_TREE 6172 && DECL_IS_BUILTIN (gimple_call_fndecl (stmt))) 6173 || !gimple_vuse (stmt)) 6174 return vrp_visit_assignment_or_call (stmt, output_p); 6175 } 6176 else if (gimple_code (stmt) == GIMPLE_COND) 6177 return vrp_visit_cond_stmt (stmt, taken_edge_p); 6178 else if (gimple_code (stmt) == GIMPLE_SWITCH) 6179 return vrp_visit_switch_stmt (stmt, taken_edge_p); 6180 6181 /* All other statements produce nothing of interest for VRP, so mark 6182 their outputs varying and prevent further simulation. */ 6183 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF) 6184 set_value_range_to_varying (get_value_range (def)); 6185 6186 return SSA_PROP_VARYING; 6187 } 6188 6189 6190 /* Meet operation for value ranges. Given two value ranges VR0 and 6191 VR1, store in VR0 a range that contains both VR0 and VR1. This 6192 may not be the smallest possible such range. */ 6193 6194 static void 6195 vrp_meet (value_range_t *vr0, value_range_t *vr1) 6196 { 6197 if (vr0->type == VR_UNDEFINED) 6198 { 6199 copy_value_range (vr0, vr1); 6200 return; 6201 } 6202 6203 if (vr1->type == VR_UNDEFINED) 6204 { 6205 /* Nothing to do. VR0 already has the resulting range. */ 6206 return; 6207 } 6208 6209 if (vr0->type == VR_VARYING) 6210 { 6211 /* Nothing to do. VR0 already has the resulting range. */ 6212 return; 6213 } 6214 6215 if (vr1->type == VR_VARYING) 6216 { 6217 set_value_range_to_varying (vr0); 6218 return; 6219 } 6220 6221 if (vr0->type == VR_RANGE && vr1->type == VR_RANGE) 6222 { 6223 int cmp; 6224 tree min, max; 6225 6226 /* Compute the convex hull of the ranges. The lower limit of 6227 the new range is the minimum of the two ranges. If they 6228 cannot be compared, then give up. */ 6229 cmp = compare_values (vr0->min, vr1->min); 6230 if (cmp == 0 || cmp == 1) 6231 min = vr1->min; 6232 else if (cmp == -1) 6233 min = vr0->min; 6234 else 6235 goto give_up; 6236 6237 /* Similarly, the upper limit of the new range is the maximum 6238 of the two ranges. If they cannot be compared, then 6239 give up. */ 6240 cmp = compare_values (vr0->max, vr1->max); 6241 if (cmp == 0 || cmp == -1) 6242 max = vr1->max; 6243 else if (cmp == 1) 6244 max = vr0->max; 6245 else 6246 goto give_up; 6247 6248 /* Check for useless ranges. */ 6249 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) 6250 && ((vrp_val_is_min (min) || is_overflow_infinity (min)) 6251 && (vrp_val_is_max (max) || is_overflow_infinity (max)))) 6252 goto give_up; 6253 6254 /* The resulting set of equivalences is the intersection of 6255 the two sets. */ 6256 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv) 6257 bitmap_and_into (vr0->equiv, vr1->equiv); 6258 else if (vr0->equiv && !vr1->equiv) 6259 bitmap_clear (vr0->equiv); 6260 6261 set_value_range (vr0, vr0->type, min, max, vr0->equiv); 6262 } 6263 else if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE) 6264 { 6265 /* Two anti-ranges meet only if their complements intersect. 6266 Only handle the case of identical ranges. */ 6267 if (compare_values (vr0->min, vr1->min) == 0 6268 && compare_values (vr0->max, vr1->max) == 0 6269 && compare_values (vr0->min, vr0->max) == 0) 6270 { 6271 /* The resulting set of equivalences is the intersection of 6272 the two sets. */ 6273 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv) 6274 bitmap_and_into (vr0->equiv, vr1->equiv); 6275 else if (vr0->equiv && !vr1->equiv) 6276 bitmap_clear (vr0->equiv); 6277 } 6278 else 6279 goto give_up; 6280 } 6281 else if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE) 6282 { 6283 /* For a numeric range [VAL1, VAL2] and an anti-range ~[VAL3, VAL4], 6284 only handle the case where the ranges have an empty intersection. 6285 The result of the meet operation is the anti-range. */ 6286 if (!symbolic_range_p (vr0) 6287 && !symbolic_range_p (vr1) 6288 && !value_ranges_intersect_p (vr0, vr1)) 6289 { 6290 /* Copy most of VR1 into VR0. Don't copy VR1's equivalence 6291 set. We need to compute the intersection of the two 6292 equivalence sets. */ 6293 if (vr1->type == VR_ANTI_RANGE) 6294 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr0->equiv); 6295 6296 /* The resulting set of equivalences is the intersection of 6297 the two sets. */ 6298 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv) 6299 bitmap_and_into (vr0->equiv, vr1->equiv); 6300 else if (vr0->equiv && !vr1->equiv) 6301 bitmap_clear (vr0->equiv); 6302 } 6303 else 6304 goto give_up; 6305 } 6306 else 6307 gcc_unreachable (); 6308 6309 return; 6310 6311 give_up: 6312 /* Failed to find an efficient meet. Before giving up and setting 6313 the result to VARYING, see if we can at least derive a useful 6314 anti-range. FIXME, all this nonsense about distinguishing 6315 anti-ranges from ranges is necessary because of the odd 6316 semantics of range_includes_zero_p and friends. */ 6317 if (!symbolic_range_p (vr0) 6318 && ((vr0->type == VR_RANGE && !range_includes_zero_p (vr0)) 6319 || (vr0->type == VR_ANTI_RANGE && range_includes_zero_p (vr0))) 6320 && !symbolic_range_p (vr1) 6321 && ((vr1->type == VR_RANGE && !range_includes_zero_p (vr1)) 6322 || (vr1->type == VR_ANTI_RANGE && range_includes_zero_p (vr1)))) 6323 { 6324 set_value_range_to_nonnull (vr0, TREE_TYPE (vr0->min)); 6325 6326 /* Since this meet operation did not result from the meeting of 6327 two equivalent names, VR0 cannot have any equivalences. */ 6328 if (vr0->equiv) 6329 bitmap_clear (vr0->equiv); 6330 } 6331 else 6332 set_value_range_to_varying (vr0); 6333 } 6334 6335 6336 /* Visit all arguments for PHI node PHI that flow through executable 6337 edges. If a valid value range can be derived from all the incoming 6338 value ranges, set a new range for the LHS of PHI. */ 6339 6340 static enum ssa_prop_result 6341 vrp_visit_phi_node (gimple phi) 6342 { 6343 size_t i; 6344 tree lhs = PHI_RESULT (phi); 6345 value_range_t *lhs_vr = get_value_range (lhs); 6346 value_range_t vr_result = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }; 6347 int edges, old_edges; 6348 struct loop *l; 6349 6350 copy_value_range (&vr_result, lhs_vr); 6351 6352 if (dump_file && (dump_flags & TDF_DETAILS)) 6353 { 6354 fprintf (dump_file, "\nVisiting PHI node: "); 6355 print_gimple_stmt (dump_file, phi, 0, dump_flags); 6356 } 6357 6358 edges = 0; 6359 for (i = 0; i < gimple_phi_num_args (phi); i++) 6360 { 6361 edge e = gimple_phi_arg_edge (phi, i); 6362 6363 if (dump_file && (dump_flags & TDF_DETAILS)) 6364 { 6365 fprintf (dump_file, 6366 "\n Argument #%d (%d -> %d %sexecutable)\n", 6367 (int) i, e->src->index, e->dest->index, 6368 (e->flags & EDGE_EXECUTABLE) ? "" : "not "); 6369 } 6370 6371 if (e->flags & EDGE_EXECUTABLE) 6372 { 6373 tree arg = PHI_ARG_DEF (phi, i); 6374 value_range_t vr_arg; 6375 6376 ++edges; 6377 6378 if (TREE_CODE (arg) == SSA_NAME) 6379 { 6380 vr_arg = *(get_value_range (arg)); 6381 } 6382 else 6383 { 6384 if (is_overflow_infinity (arg)) 6385 { 6386 arg = copy_node (arg); 6387 TREE_OVERFLOW (arg) = 0; 6388 } 6389 6390 vr_arg.type = VR_RANGE; 6391 vr_arg.min = arg; 6392 vr_arg.max = arg; 6393 vr_arg.equiv = NULL; 6394 } 6395 6396 if (dump_file && (dump_flags & TDF_DETAILS)) 6397 { 6398 fprintf (dump_file, "\t"); 6399 print_generic_expr (dump_file, arg, dump_flags); 6400 fprintf (dump_file, "\n\tValue: "); 6401 dump_value_range (dump_file, &vr_arg); 6402 fprintf (dump_file, "\n"); 6403 } 6404 6405 vrp_meet (&vr_result, &vr_arg); 6406 6407 if (vr_result.type == VR_VARYING) 6408 break; 6409 } 6410 } 6411 6412 /* If this is a loop PHI node SCEV may known more about its 6413 value-range. */ 6414 if (current_loops 6415 && (l = loop_containing_stmt (phi)) 6416 && l->header == gimple_bb (phi)) 6417 adjust_range_with_scev (&vr_result, l, phi, lhs); 6418 6419 if (vr_result.type == VR_VARYING) 6420 goto varying; 6421 6422 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)]; 6423 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges; 6424 6425 /* To prevent infinite iterations in the algorithm, derive ranges 6426 when the new value is slightly bigger or smaller than the 6427 previous one. We don't do this if we have seen a new executable 6428 edge; this helps us avoid an overflow infinity for conditionals 6429 which are not in a loop. */ 6430 if (lhs_vr->type == VR_RANGE && vr_result.type == VR_RANGE 6431 && edges <= old_edges) 6432 { 6433 if (!POINTER_TYPE_P (TREE_TYPE (lhs))) 6434 { 6435 int cmp_min = compare_values (lhs_vr->min, vr_result.min); 6436 int cmp_max = compare_values (lhs_vr->max, vr_result.max); 6437 6438 /* If the new minimum is smaller or larger than the previous 6439 one, go all the way to -INF. In the first case, to avoid 6440 iterating millions of times to reach -INF, and in the 6441 other case to avoid infinite bouncing between different 6442 minimums. */ 6443 if (cmp_min > 0 || cmp_min < 0) 6444 { 6445 /* If we will end up with a (-INF, +INF) range, set it to 6446 VARYING. Same if the previous max value was invalid for 6447 the type and we'd end up with vr_result.min > vr_result.max. */ 6448 if (vrp_val_is_max (vr_result.max) 6449 || compare_values (TYPE_MIN_VALUE (TREE_TYPE (vr_result.min)), 6450 vr_result.max) > 0) 6451 goto varying; 6452 6453 if (!needs_overflow_infinity (TREE_TYPE (vr_result.min)) 6454 || !vrp_var_may_overflow (lhs, phi)) 6455 vr_result.min = TYPE_MIN_VALUE (TREE_TYPE (vr_result.min)); 6456 else if (supports_overflow_infinity (TREE_TYPE (vr_result.min))) 6457 vr_result.min = 6458 negative_overflow_infinity (TREE_TYPE (vr_result.min)); 6459 else 6460 goto varying; 6461 } 6462 6463 /* Similarly, if the new maximum is smaller or larger than 6464 the previous one, go all the way to +INF. */ 6465 if (cmp_max < 0 || cmp_max > 0) 6466 { 6467 /* If we will end up with a (-INF, +INF) range, set it to 6468 VARYING. Same if the previous min value was invalid for 6469 the type and we'd end up with vr_result.max < vr_result.min. */ 6470 if (vrp_val_is_min (vr_result.min) 6471 || compare_values (TYPE_MAX_VALUE (TREE_TYPE (vr_result.max)), 6472 vr_result.min) < 0) 6473 goto varying; 6474 6475 if (!needs_overflow_infinity (TREE_TYPE (vr_result.max)) 6476 || !vrp_var_may_overflow (lhs, phi)) 6477 vr_result.max = TYPE_MAX_VALUE (TREE_TYPE (vr_result.max)); 6478 else if (supports_overflow_infinity (TREE_TYPE (vr_result.max))) 6479 vr_result.max = 6480 positive_overflow_infinity (TREE_TYPE (vr_result.max)); 6481 else 6482 goto varying; 6483 } 6484 } 6485 } 6486 6487 /* If the new range is different than the previous value, keep 6488 iterating. */ 6489 if (update_value_range (lhs, &vr_result)) 6490 { 6491 if (dump_file && (dump_flags & TDF_DETAILS)) 6492 { 6493 fprintf (dump_file, "Found new range for "); 6494 print_generic_expr (dump_file, lhs, 0); 6495 fprintf (dump_file, ": "); 6496 dump_value_range (dump_file, &vr_result); 6497 fprintf (dump_file, "\n\n"); 6498 } 6499 6500 return SSA_PROP_INTERESTING; 6501 } 6502 6503 /* Nothing changed, don't add outgoing edges. */ 6504 return SSA_PROP_NOT_INTERESTING; 6505 6506 /* No match found. Set the LHS to VARYING. */ 6507 varying: 6508 set_value_range_to_varying (lhs_vr); 6509 return SSA_PROP_VARYING; 6510 } 6511 6512 /* Simplify boolean operations if the source is known 6513 to be already a boolean. */ 6514 static bool 6515 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) 6516 { 6517 enum tree_code rhs_code = gimple_assign_rhs_code (stmt); 6518 tree val = NULL; 6519 tree op0, op1; 6520 value_range_t *vr; 6521 bool sop = false; 6522 bool need_conversion; 6523 6524 op0 = gimple_assign_rhs1 (stmt); 6525 if (TYPE_PRECISION (TREE_TYPE (op0)) != 1) 6526 { 6527 if (TREE_CODE (op0) != SSA_NAME) 6528 return false; 6529 vr = get_value_range (op0); 6530 6531 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop); 6532 if (!val || !integer_onep (val)) 6533 return false; 6534 6535 val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop); 6536 if (!val || !integer_onep (val)) 6537 return false; 6538 } 6539 6540 if (rhs_code == TRUTH_NOT_EXPR) 6541 { 6542 rhs_code = NE_EXPR; 6543 op1 = build_int_cst (TREE_TYPE (op0), 1); 6544 } 6545 else 6546 { 6547 op1 = gimple_assign_rhs2 (stmt); 6548 6549 /* Reduce number of cases to handle. */ 6550 if (is_gimple_min_invariant (op1)) 6551 { 6552 /* Exclude anything that should have been already folded. */ 6553 if (rhs_code != EQ_EXPR 6554 && rhs_code != NE_EXPR 6555 && rhs_code != TRUTH_XOR_EXPR) 6556 return false; 6557 6558 if (!integer_zerop (op1) 6559 && !integer_onep (op1) 6560 && !integer_all_onesp (op1)) 6561 return false; 6562 6563 /* Limit the number of cases we have to consider. */ 6564 if (rhs_code == EQ_EXPR) 6565 { 6566 rhs_code = NE_EXPR; 6567 op1 = fold_unary (TRUTH_NOT_EXPR, TREE_TYPE (op1), op1); 6568 } 6569 } 6570 else 6571 { 6572 /* Punt on A == B as there is no BIT_XNOR_EXPR. */ 6573 if (rhs_code == EQ_EXPR) 6574 return false; 6575 6576 if (TYPE_PRECISION (TREE_TYPE (op1)) != 1) 6577 { 6578 vr = get_value_range (op1); 6579 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop); 6580 if (!val || !integer_onep (val)) 6581 return false; 6582 6583 val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop); 6584 if (!val || !integer_onep (val)) 6585 return false; 6586 } 6587 } 6588 } 6589 6590 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC)) 6591 { 6592 location_t location; 6593 6594 if (!gimple_has_location (stmt)) 6595 location = input_location; 6596 else 6597 location = gimple_location (stmt); 6598 6599 if (rhs_code == TRUTH_AND_EXPR || rhs_code == TRUTH_OR_EXPR) 6600 warning_at (location, OPT_Wstrict_overflow, 6601 _("assuming signed overflow does not occur when " 6602 "simplifying && or || to & or |")); 6603 else 6604 warning_at (location, OPT_Wstrict_overflow, 6605 _("assuming signed overflow does not occur when " 6606 "simplifying ==, != or ! to identity or ^")); 6607 } 6608 6609 need_conversion = 6610 !useless_type_conversion_p (TREE_TYPE (gimple_assign_lhs (stmt)), 6611 TREE_TYPE (op0)); 6612 6613 /* Make sure to not sign-extend -1 as a boolean value. */ 6614 if (need_conversion 6615 && !TYPE_UNSIGNED (TREE_TYPE (op0)) 6616 && TYPE_PRECISION (TREE_TYPE (op0)) == 1) 6617 return false; 6618 6619 switch (rhs_code) 6620 { 6621 case TRUTH_AND_EXPR: 6622 rhs_code = BIT_AND_EXPR; 6623 break; 6624 case TRUTH_OR_EXPR: 6625 rhs_code = BIT_IOR_EXPR; 6626 break; 6627 case TRUTH_XOR_EXPR: 6628 case NE_EXPR: 6629 if (integer_zerop (op1)) 6630 { 6631 gimple_assign_set_rhs_with_ops (gsi, 6632 need_conversion ? NOP_EXPR : SSA_NAME, 6633 op0, NULL); 6634 update_stmt (gsi_stmt (*gsi)); 6635 return true; 6636 } 6637 6638 rhs_code = BIT_XOR_EXPR; 6639 break; 6640 default: 6641 gcc_unreachable (); 6642 } 6643 6644 if (need_conversion) 6645 return false; 6646 6647 gimple_assign_set_rhs_with_ops (gsi, rhs_code, op0, op1); 6648 update_stmt (gsi_stmt (*gsi)); 6649 return true; 6650 } 6651 6652 /* Simplify a division or modulo operator to a right shift or 6653 bitwise and if the first operand is unsigned or is greater 6654 than zero and the second operand is an exact power of two. */ 6655 6656 static bool 6657 simplify_div_or_mod_using_ranges (gimple stmt) 6658 { 6659 enum tree_code rhs_code = gimple_assign_rhs_code (stmt); 6660 tree val = NULL; 6661 tree op0 = gimple_assign_rhs1 (stmt); 6662 tree op1 = gimple_assign_rhs2 (stmt); 6663 value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt)); 6664 6665 if (TYPE_UNSIGNED (TREE_TYPE (op0))) 6666 { 6667 val = integer_one_node; 6668 } 6669 else 6670 { 6671 bool sop = false; 6672 6673 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop); 6674 6675 if (val 6676 && sop 6677 && integer_onep (val) 6678 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC)) 6679 { 6680 location_t location; 6681 6682 if (!gimple_has_location (stmt)) 6683 location = input_location; 6684 else 6685 location = gimple_location (stmt); 6686 warning_at (location, OPT_Wstrict_overflow, 6687 "assuming signed overflow does not occur when " 6688 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>"); 6689 } 6690 } 6691 6692 if (val && integer_onep (val)) 6693 { 6694 tree t; 6695 6696 if (rhs_code == TRUNC_DIV_EXPR) 6697 { 6698 t = build_int_cst (NULL_TREE, tree_log2 (op1)); 6699 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR); 6700 gimple_assign_set_rhs1 (stmt, op0); 6701 gimple_assign_set_rhs2 (stmt, t); 6702 } 6703 else 6704 { 6705 t = build_int_cst (TREE_TYPE (op1), 1); 6706 t = int_const_binop (MINUS_EXPR, op1, t, 0); 6707 t = fold_convert (TREE_TYPE (op0), t); 6708 6709 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR); 6710 gimple_assign_set_rhs1 (stmt, op0); 6711 gimple_assign_set_rhs2 (stmt, t); 6712 } 6713 6714 update_stmt (stmt); 6715 return true; 6716 } 6717 6718 return false; 6719 } 6720 6721 /* If the operand to an ABS_EXPR is >= 0, then eliminate the 6722 ABS_EXPR. If the operand is <= 0, then simplify the 6723 ABS_EXPR into a NEGATE_EXPR. */ 6724 6725 static bool 6726 simplify_abs_using_ranges (gimple stmt) 6727 { 6728 tree val = NULL; 6729 tree op = gimple_assign_rhs1 (stmt); 6730 tree type = TREE_TYPE (op); 6731 value_range_t *vr = get_value_range (op); 6732 6733 if (TYPE_UNSIGNED (type)) 6734 { 6735 val = integer_zero_node; 6736 } 6737 else if (vr) 6738 { 6739 bool sop = false; 6740 6741 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop); 6742 if (!val) 6743 { 6744 sop = false; 6745 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, 6746 &sop); 6747 6748 if (val) 6749 { 6750 if (integer_zerop (val)) 6751 val = integer_one_node; 6752 else if (integer_onep (val)) 6753 val = integer_zero_node; 6754 } 6755 } 6756 6757 if (val 6758 && (integer_onep (val) || integer_zerop (val))) 6759 { 6760 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC)) 6761 { 6762 location_t location; 6763 6764 if (!gimple_has_location (stmt)) 6765 location = input_location; 6766 else 6767 location = gimple_location (stmt); 6768 warning_at (location, OPT_Wstrict_overflow, 6769 "assuming signed overflow does not occur when " 6770 "simplifying %<abs (X)%> to %<X%> or %<-X%>"); 6771 } 6772 6773 gimple_assign_set_rhs1 (stmt, op); 6774 if (integer_onep (val)) 6775 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR); 6776 else 6777 gimple_assign_set_rhs_code (stmt, SSA_NAME); 6778 update_stmt (stmt); 6779 return true; 6780 } 6781 } 6782 6783 return false; 6784 } 6785 6786 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has 6787 a known value range VR. 6788 6789 If there is one and only one value which will satisfy the 6790 conditional, then return that value. Else return NULL. */ 6791 6792 static tree 6793 test_for_singularity (enum tree_code cond_code, tree op0, 6794 tree op1, value_range_t *vr) 6795 { 6796 tree min = NULL; 6797 tree max = NULL; 6798 6799 /* Extract minimum/maximum values which satisfy the 6800 the conditional as it was written. */ 6801 if (cond_code == LE_EXPR || cond_code == LT_EXPR) 6802 { 6803 /* This should not be negative infinity; there is no overflow 6804 here. */ 6805 min = TYPE_MIN_VALUE (TREE_TYPE (op0)); 6806 6807 max = op1; 6808 if (cond_code == LT_EXPR && !is_overflow_infinity (max)) 6809 { 6810 tree one = build_int_cst (TREE_TYPE (op0), 1); 6811 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one); 6812 if (EXPR_P (max)) 6813 TREE_NO_WARNING (max) = 1; 6814 } 6815 } 6816 else if (cond_code == GE_EXPR || cond_code == GT_EXPR) 6817 { 6818 /* This should not be positive infinity; there is no overflow 6819 here. */ 6820 max = TYPE_MAX_VALUE (TREE_TYPE (op0)); 6821 6822 min = op1; 6823 if (cond_code == GT_EXPR && !is_overflow_infinity (min)) 6824 { 6825 tree one = build_int_cst (TREE_TYPE (op0), 1); 6826 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one); 6827 if (EXPR_P (min)) 6828 TREE_NO_WARNING (min) = 1; 6829 } 6830 } 6831 6832 /* Now refine the minimum and maximum values using any 6833 value range information we have for op0. */ 6834 if (min && max) 6835 { 6836 if (compare_values (vr->min, min) == 1) 6837 min = vr->min; 6838 if (compare_values (vr->max, max) == -1) 6839 max = vr->max; 6840 6841 /* If the new min/max values have converged to a single value, 6842 then there is only one value which can satisfy the condition, 6843 return that value. */ 6844 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min)) 6845 return min; 6846 } 6847 return NULL; 6848 } 6849 6850 /* Simplify a conditional using a relational operator to an equality 6851 test if the range information indicates only one value can satisfy 6852 the original conditional. */ 6853 6854 static bool 6855 simplify_cond_using_ranges (gimple stmt) 6856 { 6857 tree op0 = gimple_cond_lhs (stmt); 6858 tree op1 = gimple_cond_rhs (stmt); 6859 enum tree_code cond_code = gimple_cond_code (stmt); 6860 6861 if (cond_code != NE_EXPR 6862 && cond_code != EQ_EXPR 6863 && TREE_CODE (op0) == SSA_NAME 6864 && INTEGRAL_TYPE_P (TREE_TYPE (op0)) 6865 && is_gimple_min_invariant (op1)) 6866 { 6867 value_range_t *vr = get_value_range (op0); 6868 6869 /* If we have range information for OP0, then we might be 6870 able to simplify this conditional. */ 6871 if (vr->type == VR_RANGE) 6872 { 6873 tree new_tree = test_for_singularity (cond_code, op0, op1, vr); 6874 6875 if (new_tree) 6876 { 6877 if (dump_file) 6878 { 6879 fprintf (dump_file, "Simplified relational "); 6880 print_gimple_stmt (dump_file, stmt, 0, 0); 6881 fprintf (dump_file, " into "); 6882 } 6883 6884 gimple_cond_set_code (stmt, EQ_EXPR); 6885 gimple_cond_set_lhs (stmt, op0); 6886 gimple_cond_set_rhs (stmt, new_tree); 6887 6888 update_stmt (stmt); 6889 6890 if (dump_file) 6891 { 6892 print_gimple_stmt (dump_file, stmt, 0, 0); 6893 fprintf (dump_file, "\n"); 6894 } 6895 6896 return true; 6897 } 6898 6899 /* Try again after inverting the condition. We only deal 6900 with integral types here, so no need to worry about 6901 issues with inverting FP comparisons. */ 6902 cond_code = invert_tree_comparison (cond_code, false); 6903 new_tree = test_for_singularity (cond_code, op0, op1, vr); 6904 6905 if (new_tree) 6906 { 6907 if (dump_file) 6908 { 6909 fprintf (dump_file, "Simplified relational "); 6910 print_gimple_stmt (dump_file, stmt, 0, 0); 6911 fprintf (dump_file, " into "); 6912 } 6913 6914 gimple_cond_set_code (stmt, NE_EXPR); 6915 gimple_cond_set_lhs (stmt, op0); 6916 gimple_cond_set_rhs (stmt, new_tree); 6917 6918 update_stmt (stmt); 6919 6920 if (dump_file) 6921 { 6922 print_gimple_stmt (dump_file, stmt, 0, 0); 6923 fprintf (dump_file, "\n"); 6924 } 6925 6926 return true; 6927 } 6928 } 6929 } 6930 6931 return false; 6932 } 6933 6934 /* Simplify a switch statement using the value range of the switch 6935 argument. */ 6936 6937 static bool 6938 simplify_switch_using_ranges (gimple stmt) 6939 { 6940 tree op = gimple_switch_index (stmt); 6941 value_range_t *vr; 6942 bool take_default; 6943 edge e; 6944 edge_iterator ei; 6945 size_t i = 0, j = 0, n, n2; 6946 tree vec2; 6947 switch_update su; 6948 6949 if (TREE_CODE (op) == SSA_NAME) 6950 { 6951 vr = get_value_range (op); 6952 6953 /* We can only handle integer ranges. */ 6954 if (vr->type != VR_RANGE 6955 || symbolic_range_p (vr)) 6956 return false; 6957 6958 /* Find case label for min/max of the value range. */ 6959 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j); 6960 } 6961 else if (TREE_CODE (op) == INTEGER_CST) 6962 { 6963 take_default = !find_case_label_index (stmt, 1, op, &i); 6964 if (take_default) 6965 { 6966 i = 1; 6967 j = 0; 6968 } 6969 else 6970 { 6971 j = i; 6972 } 6973 } 6974 else 6975 return false; 6976 6977 n = gimple_switch_num_labels (stmt); 6978 6979 /* Bail out if this is just all edges taken. */ 6980 if (i == 1 6981 && j == n - 1 6982 && take_default) 6983 return false; 6984 6985 /* Build a new vector of taken case labels. */ 6986 vec2 = make_tree_vec (j - i + 1 + (int)take_default); 6987 n2 = 0; 6988 6989 /* Add the default edge, if necessary. */ 6990 if (take_default) 6991 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt); 6992 6993 for (; i <= j; ++i, ++n2) 6994 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i); 6995 6996 /* Mark needed edges. */ 6997 for (i = 0; i < n2; ++i) 6998 { 6999 e = find_edge (gimple_bb (stmt), 7000 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i)))); 7001 e->aux = (void *)-1; 7002 } 7003 7004 /* Queue not needed edges for later removal. */ 7005 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs) 7006 { 7007 if (e->aux == (void *)-1) 7008 { 7009 e->aux = NULL; 7010 continue; 7011 } 7012 7013 if (dump_file && (dump_flags & TDF_DETAILS)) 7014 { 7015 fprintf (dump_file, "removing unreachable case label\n"); 7016 } 7017 VEC_safe_push (edge, heap, to_remove_edges, e); 7018 e->flags &= ~EDGE_EXECUTABLE; 7019 } 7020 7021 /* And queue an update for the stmt. */ 7022 su.stmt = stmt; 7023 su.vec = vec2; 7024 VEC_safe_push (switch_update, heap, to_update_switch_stmts, &su); 7025 return false; 7026 } 7027 7028 /* Simplify STMT using ranges if possible. */ 7029 7030 static bool 7031 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi) 7032 { 7033 gimple stmt = gsi_stmt (*gsi); 7034 if (is_gimple_assign (stmt)) 7035 { 7036 enum tree_code rhs_code = gimple_assign_rhs_code (stmt); 7037 7038 switch (rhs_code) 7039 { 7040 case EQ_EXPR: 7041 case NE_EXPR: 7042 case TRUTH_NOT_EXPR: 7043 case TRUTH_AND_EXPR: 7044 case TRUTH_OR_EXPR: 7045 case TRUTH_XOR_EXPR: 7046 /* Transform EQ_EXPR, NE_EXPR, TRUTH_NOT_EXPR into BIT_XOR_EXPR 7047 or identity if the RHS is zero or one, and the LHS are known 7048 to be boolean values. Transform all TRUTH_*_EXPR into 7049 BIT_*_EXPR if both arguments are known to be boolean values. */ 7050 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt)))) 7051 return simplify_truth_ops_using_ranges (gsi, stmt); 7052 break; 7053 7054 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR 7055 and BIT_AND_EXPR respectively if the first operand is greater 7056 than zero and the second operand is an exact power of two. */ 7057 case TRUNC_DIV_EXPR: 7058 case TRUNC_MOD_EXPR: 7059 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt))) 7060 && integer_pow2p (gimple_assign_rhs2 (stmt))) 7061 return simplify_div_or_mod_using_ranges (stmt); 7062 break; 7063 7064 /* Transform ABS (X) into X or -X as appropriate. */ 7065 case ABS_EXPR: 7066 if (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME 7067 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt)))) 7068 return simplify_abs_using_ranges (stmt); 7069 break; 7070 7071 default: 7072 break; 7073 } 7074 } 7075 else if (gimple_code (stmt) == GIMPLE_COND) 7076 return simplify_cond_using_ranges (stmt); 7077 else if (gimple_code (stmt) == GIMPLE_SWITCH) 7078 return simplify_switch_using_ranges (stmt); 7079 7080 return false; 7081 } 7082 7083 /* If the statement pointed by SI has a predicate whose value can be 7084 computed using the value range information computed by VRP, compute 7085 its value and return true. Otherwise, return false. */ 7086 7087 static bool 7088 fold_predicate_in (gimple_stmt_iterator *si) 7089 { 7090 bool assignment_p = false; 7091 tree val; 7092 gimple stmt = gsi_stmt (*si); 7093 7094 if (is_gimple_assign (stmt) 7095 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison) 7096 { 7097 assignment_p = true; 7098 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt), 7099 gimple_assign_rhs1 (stmt), 7100 gimple_assign_rhs2 (stmt), 7101 stmt); 7102 } 7103 else if (gimple_code (stmt) == GIMPLE_COND) 7104 val = vrp_evaluate_conditional (gimple_cond_code (stmt), 7105 gimple_cond_lhs (stmt), 7106 gimple_cond_rhs (stmt), 7107 stmt); 7108 else 7109 return false; 7110 7111 if (val) 7112 { 7113 if (assignment_p) 7114 val = fold_convert (gimple_expr_type (stmt), val); 7115 7116 if (dump_file) 7117 { 7118 fprintf (dump_file, "Folding predicate "); 7119 print_gimple_expr (dump_file, stmt, 0, 0); 7120 fprintf (dump_file, " to "); 7121 print_generic_expr (dump_file, val, 0); 7122 fprintf (dump_file, "\n"); 7123 } 7124 7125 if (is_gimple_assign (stmt)) 7126 gimple_assign_set_rhs_from_tree (si, val); 7127 else 7128 { 7129 gcc_assert (gimple_code (stmt) == GIMPLE_COND); 7130 if (integer_zerop (val)) 7131 gimple_cond_make_false (stmt); 7132 else if (integer_onep (val)) 7133 gimple_cond_make_true (stmt); 7134 else 7135 gcc_unreachable (); 7136 } 7137 7138 return true; 7139 } 7140 7141 return false; 7142 } 7143 7144 /* Callback for substitute_and_fold folding the stmt at *SI. */ 7145 7146 static bool 7147 vrp_fold_stmt (gimple_stmt_iterator *si) 7148 { 7149 if (fold_predicate_in (si)) 7150 return true; 7151 7152 return simplify_stmt_using_ranges (si); 7153 } 7154 7155 /* Stack of dest,src equivalency pairs that need to be restored after 7156 each attempt to thread a block's incoming edge to an outgoing edge. 7157 7158 A NULL entry is used to mark the end of pairs which need to be 7159 restored. */ 7160 static VEC(tree,heap) *stack; 7161 7162 /* A trivial wrapper so that we can present the generic jump threading 7163 code with a simple API for simplifying statements. STMT is the 7164 statement we want to simplify, WITHIN_STMT provides the location 7165 for any overflow warnings. */ 7166 7167 static tree 7168 simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt) 7169 { 7170 /* We only use VRP information to simplify conditionals. This is 7171 overly conservative, but it's unclear if doing more would be 7172 worth the compile time cost. */ 7173 if (gimple_code (stmt) != GIMPLE_COND) 7174 return NULL; 7175 7176 return vrp_evaluate_conditional (gimple_cond_code (stmt), 7177 gimple_cond_lhs (stmt), 7178 gimple_cond_rhs (stmt), within_stmt); 7179 } 7180 7181 /* Blocks which have more than one predecessor and more than 7182 one successor present jump threading opportunities, i.e., 7183 when the block is reached from a specific predecessor, we 7184 may be able to determine which of the outgoing edges will 7185 be traversed. When this optimization applies, we are able 7186 to avoid conditionals at runtime and we may expose secondary 7187 optimization opportunities. 7188 7189 This routine is effectively a driver for the generic jump 7190 threading code. It basically just presents the generic code 7191 with edges that may be suitable for jump threading. 7192 7193 Unlike DOM, we do not iterate VRP if jump threading was successful. 7194 While iterating may expose new opportunities for VRP, it is expected 7195 those opportunities would be very limited and the compile time cost 7196 to expose those opportunities would be significant. 7197 7198 As jump threading opportunities are discovered, they are registered 7199 for later realization. */ 7200 7201 static void 7202 identify_jump_threads (void) 7203 { 7204 basic_block bb; 7205 gimple dummy; 7206 int i; 7207 edge e; 7208 7209 /* Ugh. When substituting values earlier in this pass we can 7210 wipe the dominance information. So rebuild the dominator 7211 information as we need it within the jump threading code. */ 7212 calculate_dominance_info (CDI_DOMINATORS); 7213 7214 /* We do not allow VRP information to be used for jump threading 7215 across a back edge in the CFG. Otherwise it becomes too 7216 difficult to avoid eliminating loop exit tests. Of course 7217 EDGE_DFS_BACK is not accurate at this time so we have to 7218 recompute it. */ 7219 mark_dfs_back_edges (); 7220 7221 /* Do not thread across edges we are about to remove. Just marking 7222 them as EDGE_DFS_BACK will do. */ 7223 for (i = 0; VEC_iterate (edge, to_remove_edges, i, e); ++i) 7224 e->flags |= EDGE_DFS_BACK; 7225 7226 /* Allocate our unwinder stack to unwind any temporary equivalences 7227 that might be recorded. */ 7228 stack = VEC_alloc (tree, heap, 20); 7229 7230 /* To avoid lots of silly node creation, we create a single 7231 conditional and just modify it in-place when attempting to 7232 thread jumps. */ 7233 dummy = gimple_build_cond (EQ_EXPR, 7234 integer_zero_node, integer_zero_node, 7235 NULL, NULL); 7236 7237 /* Walk through all the blocks finding those which present a 7238 potential jump threading opportunity. We could set this up 7239 as a dominator walker and record data during the walk, but 7240 I doubt it's worth the effort for the classes of jump 7241 threading opportunities we are trying to identify at this 7242 point in compilation. */ 7243 FOR_EACH_BB (bb) 7244 { 7245 gimple last; 7246 7247 /* If the generic jump threading code does not find this block 7248 interesting, then there is nothing to do. */ 7249 if (! potentially_threadable_block (bb)) 7250 continue; 7251 7252 /* We only care about blocks ending in a COND_EXPR. While there 7253 may be some value in handling SWITCH_EXPR here, I doubt it's 7254 terribly important. */ 7255 last = gsi_stmt (gsi_last_bb (bb)); 7256 if (gimple_code (last) != GIMPLE_COND) 7257 continue; 7258 7259 /* We're basically looking for any kind of conditional with 7260 integral type arguments. */ 7261 if (TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME 7262 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))) 7263 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME 7264 || is_gimple_min_invariant (gimple_cond_rhs (last))) 7265 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_rhs (last)))) 7266 { 7267 edge_iterator ei; 7268 7269 /* We've got a block with multiple predecessors and multiple 7270 successors which also ends in a suitable conditional. For 7271 each predecessor, see if we can thread it to a specific 7272 successor. */ 7273 FOR_EACH_EDGE (e, ei, bb->preds) 7274 { 7275 /* Do not thread across back edges or abnormal edges 7276 in the CFG. */ 7277 if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX)) 7278 continue; 7279 7280 thread_across_edge (dummy, e, true, &stack, 7281 simplify_stmt_for_jump_threading); 7282 } 7283 } 7284 } 7285 7286 /* We do not actually update the CFG or SSA graphs at this point as 7287 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet 7288 handle ASSERT_EXPRs gracefully. */ 7289 } 7290 7291 /* We identified all the jump threading opportunities earlier, but could 7292 not transform the CFG at that time. This routine transforms the 7293 CFG and arranges for the dominator tree to be rebuilt if necessary. 7294 7295 Note the SSA graph update will occur during the normal TODO 7296 processing by the pass manager. */ 7297 static void 7298 finalize_jump_threads (void) 7299 { 7300 thread_through_all_blocks (false); 7301 VEC_free (tree, heap, stack); 7302 } 7303 7304 7305 /* Traverse all the blocks folding conditionals with known ranges. */ 7306 7307 static void 7308 vrp_finalize (void) 7309 { 7310 size_t i; 7311 prop_value_t *single_val_range; 7312 bool do_value_subst_p; 7313 unsigned num = num_ssa_names; 7314 7315 if (dump_file) 7316 { 7317 fprintf (dump_file, "\nValue ranges after VRP:\n\n"); 7318 dump_all_value_ranges (dump_file); 7319 fprintf (dump_file, "\n"); 7320 } 7321 7322 /* We may have ended with ranges that have exactly one value. Those 7323 values can be substituted as any other const propagated 7324 value using substitute_and_fold. */ 7325 single_val_range = XCNEWVEC (prop_value_t, num); 7326 7327 do_value_subst_p = false; 7328 for (i = 0; i < num; i++) 7329 if (vr_value[i] 7330 && vr_value[i]->type == VR_RANGE 7331 && vr_value[i]->min == vr_value[i]->max 7332 && is_gimple_min_invariant (vr_value[i]->min)) 7333 { 7334 single_val_range[i].value = vr_value[i]->min; 7335 do_value_subst_p = true; 7336 } 7337 7338 if (!do_value_subst_p) 7339 { 7340 /* We found no single-valued ranges, don't waste time trying to 7341 do single value substitution in substitute_and_fold. */ 7342 free (single_val_range); 7343 single_val_range = NULL; 7344 } 7345 7346 substitute_and_fold (single_val_range, vrp_fold_stmt, false); 7347 7348 if (warn_array_bounds) 7349 check_all_array_refs (); 7350 7351 /* We must identify jump threading opportunities before we release 7352 the datastructures built by VRP. */ 7353 identify_jump_threads (); 7354 7355 /* Free allocated memory. */ 7356 for (i = 0; i < num; i++) 7357 if (vr_value[i]) 7358 { 7359 BITMAP_FREE (vr_value[i]->equiv); 7360 free (vr_value[i]); 7361 } 7362 7363 free (single_val_range); 7364 free (vr_value); 7365 free (vr_phi_edge_counts); 7366 7367 /* So that we can distinguish between VRP data being available 7368 and not available. */ 7369 vr_value = NULL; 7370 vr_phi_edge_counts = NULL; 7371 } 7372 7373 7374 /* Main entry point to VRP (Value Range Propagation). This pass is 7375 loosely based on J. R. C. Patterson, ``Accurate Static Branch 7376 Prediction by Value Range Propagation,'' in SIGPLAN Conference on 7377 Programming Language Design and Implementation, pp. 67-78, 1995. 7378 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html 7379 7380 This is essentially an SSA-CCP pass modified to deal with ranges 7381 instead of constants. 7382 7383 While propagating ranges, we may find that two or more SSA name 7384 have equivalent, though distinct ranges. For instance, 7385 7386 1 x_9 = p_3->a; 7387 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0> 7388 3 if (p_4 == q_2) 7389 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>; 7390 5 endif 7391 6 if (q_2) 7392 7393 In the code above, pointer p_5 has range [q_2, q_2], but from the 7394 code we can also determine that p_5 cannot be NULL and, if q_2 had 7395 a non-varying range, p_5's range should also be compatible with it. 7396 7397 These equivalences are created by two expressions: ASSERT_EXPR and 7398 copy operations. Since p_5 is an assertion on p_4, and p_4 was the 7399 result of another assertion, then we can use the fact that p_5 and 7400 p_4 are equivalent when evaluating p_5's range. 7401 7402 Together with value ranges, we also propagate these equivalences 7403 between names so that we can take advantage of information from 7404 multiple ranges when doing final replacement. Note that this 7405 equivalency relation is transitive but not symmetric. 7406 7407 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we 7408 cannot assert that q_2 is equivalent to p_5 because q_2 may be used 7409 in contexts where that assertion does not hold (e.g., in line 6). 7410 7411 TODO, the main difference between this pass and Patterson's is that 7412 we do not propagate edge probabilities. We only compute whether 7413 edges can be taken or not. That is, instead of having a spectrum 7414 of jump probabilities between 0 and 1, we only deal with 0, 1 and 7415 DON'T KNOW. In the future, it may be worthwhile to propagate 7416 probabilities to aid branch prediction. */ 7417 7418 static unsigned int 7419 execute_vrp (void) 7420 { 7421 int i; 7422 edge e; 7423 switch_update *su; 7424 7425 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS); 7426 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa); 7427 scev_initialize (); 7428 7429 insert_range_assertions (); 7430 7431 to_remove_edges = VEC_alloc (edge, heap, 10); 7432 to_update_switch_stmts = VEC_alloc (switch_update, heap, 5); 7433 threadedge_initialize_values (); 7434 7435 vrp_initialize (); 7436 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node); 7437 vrp_finalize (); 7438 7439 /* ASSERT_EXPRs must be removed before finalizing jump threads 7440 as finalizing jump threads calls the CFG cleanup code which 7441 does not properly handle ASSERT_EXPRs. */ 7442 remove_range_assertions (); 7443 7444 /* If we exposed any new variables, go ahead and put them into 7445 SSA form now, before we handle jump threading. This simplifies 7446 interactions between rewriting of _DECL nodes into SSA form 7447 and rewriting SSA_NAME nodes into SSA form after block 7448 duplication and CFG manipulation. */ 7449 update_ssa (TODO_update_ssa); 7450 7451 finalize_jump_threads (); 7452 7453 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the 7454 CFG in a broken state and requires a cfg_cleanup run. */ 7455 for (i = 0; VEC_iterate (edge, to_remove_edges, i, e); ++i) 7456 remove_edge (e); 7457 /* Update SWITCH_EXPR case label vector. */ 7458 for (i = 0; VEC_iterate (switch_update, to_update_switch_stmts, i, su); ++i) 7459 { 7460 size_t j; 7461 size_t n = TREE_VEC_LENGTH (su->vec); 7462 tree label; 7463 gimple_switch_set_num_labels (su->stmt, n); 7464 for (j = 0; j < n; j++) 7465 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j)); 7466 /* As we may have replaced the default label with a regular one 7467 make sure to make it a real default label again. This ensures 7468 optimal expansion. */ 7469 label = gimple_switch_default_label (su->stmt); 7470 CASE_LOW (label) = NULL_TREE; 7471 CASE_HIGH (label) = NULL_TREE; 7472 } 7473 7474 if (VEC_length (edge, to_remove_edges) > 0) 7475 free_dominance_info (CDI_DOMINATORS); 7476 7477 VEC_free (edge, heap, to_remove_edges); 7478 VEC_free (switch_update, heap, to_update_switch_stmts); 7479 threadedge_finalize_values (); 7480 7481 scev_finalize (); 7482 loop_optimizer_finalize (); 7483 return 0; 7484 } 7485 7486 static bool 7487 gate_vrp (void) 7488 { 7489 return flag_tree_vrp != 0; 7490 } 7491 7492 struct gimple_opt_pass pass_vrp = 7493 { 7494 { 7495 GIMPLE_PASS, 7496 "vrp", /* name */ 7497 gate_vrp, /* gate */ 7498 execute_vrp, /* execute */ 7499 NULL, /* sub */ 7500 NULL, /* next */ 7501 0, /* static_pass_number */ 7502 TV_TREE_VRP, /* tv_id */ 7503 PROP_ssa, /* properties_required */ 7504 0, /* properties_provided */ 7505 0, /* properties_destroyed */ 7506 0, /* todo_flags_start */ 7507 TODO_cleanup_cfg 7508 | TODO_ggc_collect 7509 | TODO_verify_ssa 7510 | TODO_dump_func 7511 | TODO_update_ssa /* todo_flags_finish */ 7512 } 7513 }; 7514