1 /* Alias analysis for GNU C 2 Copyright (C) 1997-2018 Free Software Foundation, Inc. 3 Contributed by John Carr (jfc@mit.edu). 4 5 This file is part of GCC. 6 7 GCC is free software; you can redistribute it and/or modify it under 8 the terms of the GNU General Public License as published by the Free 9 Software Foundation; either version 3, or (at your option) any later 10 version. 11 12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY 13 WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with GCC; see the file COPYING3. If not see 19 <http://www.gnu.org/licenses/>. */ 20 21 #include "config.h" 22 #include "system.h" 23 #include "coretypes.h" 24 #include "backend.h" 25 #include "target.h" 26 #include "rtl.h" 27 #include "tree.h" 28 #include "gimple.h" 29 #include "df.h" 30 #include "memmodel.h" 31 #include "tm_p.h" 32 #include "gimple-ssa.h" 33 #include "emit-rtl.h" 34 #include "alias.h" 35 #include "fold-const.h" 36 #include "varasm.h" 37 #include "cselib.h" 38 #include "langhooks.h" 39 #include "cfganal.h" 40 #include "rtl-iter.h" 41 #include "cgraph.h" 42 43 /* The aliasing API provided here solves related but different problems: 44 45 Say there exists (in c) 46 47 struct X { 48 struct Y y1; 49 struct Z z2; 50 } x1, *px1, *px2; 51 52 struct Y y2, *py; 53 struct Z z2, *pz; 54 55 56 py = &x1.y1; 57 px2 = &x1; 58 59 Consider the four questions: 60 61 Can a store to x1 interfere with px2->y1? 62 Can a store to x1 interfere with px2->z2? 63 Can a store to x1 change the value pointed to by with py? 64 Can a store to x1 change the value pointed to by with pz? 65 66 The answer to these questions can be yes, yes, yes, and maybe. 67 68 The first two questions can be answered with a simple examination 69 of the type system. If structure X contains a field of type Y then 70 a store through a pointer to an X can overwrite any field that is 71 contained (recursively) in an X (unless we know that px1 != px2). 72 73 The last two questions can be solved in the same way as the first 74 two questions but this is too conservative. The observation is 75 that in some cases we can know which (if any) fields are addressed 76 and if those addresses are used in bad ways. This analysis may be 77 language specific. In C, arbitrary operations may be applied to 78 pointers. However, there is some indication that this may be too 79 conservative for some C++ types. 80 81 The pass ipa-type-escape does this analysis for the types whose 82 instances do not escape across the compilation boundary. 83 84 Historically in GCC, these two problems were combined and a single 85 data structure that was used to represent the solution to these 86 problems. We now have two similar but different data structures, 87 The data structure to solve the last two questions is similar to 88 the first, but does not contain the fields whose address are never 89 taken. For types that do escape the compilation unit, the data 90 structures will have identical information. 91 */ 92 93 /* The alias sets assigned to MEMs assist the back-end in determining 94 which MEMs can alias which other MEMs. In general, two MEMs in 95 different alias sets cannot alias each other, with one important 96 exception. Consider something like: 97 98 struct S { int i; double d; }; 99 100 a store to an `S' can alias something of either type `int' or type 101 `double'. (However, a store to an `int' cannot alias a `double' 102 and vice versa.) We indicate this via a tree structure that looks 103 like: 104 struct S 105 / \ 106 / \ 107 |/_ _\| 108 int double 109 110 (The arrows are directed and point downwards.) 111 In this situation we say the alias set for `struct S' is the 112 `superset' and that those for `int' and `double' are `subsets'. 113 114 To see whether two alias sets can point to the same memory, we must 115 see if either alias set is a subset of the other. We need not trace 116 past immediate descendants, however, since we propagate all 117 grandchildren up one level. 118 119 Alias set zero is implicitly a superset of all other alias sets. 120 However, this is no actual entry for alias set zero. It is an 121 error to attempt to explicitly construct a subset of zero. */ 122 123 struct alias_set_hash : int_hash <int, INT_MIN, INT_MIN + 1> {}; 124 125 struct GTY(()) alias_set_entry { 126 /* The alias set number, as stored in MEM_ALIAS_SET. */ 127 alias_set_type alias_set; 128 129 /* Nonzero if would have a child of zero: this effectively makes this 130 alias set the same as alias set zero. */ 131 bool has_zero_child; 132 /* Nonzero if alias set corresponds to pointer type itself (i.e. not to 133 aggregate contaiing pointer. 134 This is used for a special case where we need an universal pointer type 135 compatible with all other pointer types. */ 136 bool is_pointer; 137 /* Nonzero if is_pointer or if one of childs have has_pointer set. */ 138 bool has_pointer; 139 140 /* The children of the alias set. These are not just the immediate 141 children, but, in fact, all descendants. So, if we have: 142 143 struct T { struct S s; float f; } 144 145 continuing our example above, the children here will be all of 146 `int', `double', `float', and `struct S'. */ 147 hash_map<alias_set_hash, int> *children; 148 }; 149 150 static int rtx_equal_for_memref_p (const_rtx, const_rtx); 151 static void record_set (rtx, const_rtx, void *); 152 static int base_alias_check (rtx, rtx, rtx, rtx, machine_mode, 153 machine_mode); 154 static rtx find_base_value (rtx); 155 static int mems_in_disjoint_alias_sets_p (const_rtx, const_rtx); 156 static alias_set_entry *get_alias_set_entry (alias_set_type); 157 static tree decl_for_component_ref (tree); 158 static int write_dependence_p (const_rtx, 159 const_rtx, machine_mode, rtx, 160 bool, bool, bool); 161 static int compare_base_symbol_refs (const_rtx, const_rtx); 162 163 static void memory_modified_1 (rtx, const_rtx, void *); 164 165 /* Query statistics for the different low-level disambiguators. 166 A high-level query may trigger multiple of them. */ 167 168 static struct { 169 unsigned long long num_alias_zero; 170 unsigned long long num_same_alias_set; 171 unsigned long long num_same_objects; 172 unsigned long long num_volatile; 173 unsigned long long num_dag; 174 unsigned long long num_universal; 175 unsigned long long num_disambiguated; 176 } alias_stats; 177 178 179 /* Set up all info needed to perform alias analysis on memory references. */ 180 181 /* Returns the size in bytes of the mode of X. */ 182 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X))) 183 184 /* Cap the number of passes we make over the insns propagating alias 185 information through set chains. 186 ??? 10 is a completely arbitrary choice. This should be based on the 187 maximum loop depth in the CFG, but we do not have this information 188 available (even if current_loops _is_ available). */ 189 #define MAX_ALIAS_LOOP_PASSES 10 190 191 /* reg_base_value[N] gives an address to which register N is related. 192 If all sets after the first add or subtract to the current value 193 or otherwise modify it so it does not point to a different top level 194 object, reg_base_value[N] is equal to the address part of the source 195 of the first set. 196 197 A base address can be an ADDRESS, SYMBOL_REF, or LABEL_REF. ADDRESS 198 expressions represent three types of base: 199 200 1. incoming arguments. There is just one ADDRESS to represent all 201 arguments, since we do not know at this level whether accesses 202 based on different arguments can alias. The ADDRESS has id 0. 203 204 2. stack_pointer_rtx, frame_pointer_rtx, hard_frame_pointer_rtx 205 (if distinct from frame_pointer_rtx) and arg_pointer_rtx. 206 Each of these rtxes has a separate ADDRESS associated with it, 207 each with a negative id. 208 209 GCC is (and is required to be) precise in which register it 210 chooses to access a particular region of stack. We can therefore 211 assume that accesses based on one of these rtxes do not alias 212 accesses based on another of these rtxes. 213 214 3. bases that are derived from malloc()ed memory (REG_NOALIAS). 215 Each such piece of memory has a separate ADDRESS associated 216 with it, each with an id greater than 0. 217 218 Accesses based on one ADDRESS do not alias accesses based on other 219 ADDRESSes. Accesses based on ADDRESSes in groups (2) and (3) do not 220 alias globals either; the ADDRESSes have Pmode to indicate this. 221 The ADDRESS in group (1) _may_ alias globals; it has VOIDmode to 222 indicate this. */ 223 224 static GTY(()) vec<rtx, va_gc> *reg_base_value; 225 static rtx *new_reg_base_value; 226 227 /* The single VOIDmode ADDRESS that represents all argument bases. 228 It has id 0. */ 229 static GTY(()) rtx arg_base_value; 230 231 /* Used to allocate unique ids to each REG_NOALIAS ADDRESS. */ 232 static int unique_id; 233 234 /* We preserve the copy of old array around to avoid amount of garbage 235 produced. About 8% of garbage produced were attributed to this 236 array. */ 237 static GTY((deletable)) vec<rtx, va_gc> *old_reg_base_value; 238 239 /* Values of XINT (address, 0) of Pmode ADDRESS rtxes for special 240 registers. */ 241 #define UNIQUE_BASE_VALUE_SP -1 242 #define UNIQUE_BASE_VALUE_ARGP -2 243 #define UNIQUE_BASE_VALUE_FP -3 244 #define UNIQUE_BASE_VALUE_HFP -4 245 246 #define static_reg_base_value \ 247 (this_target_rtl->x_static_reg_base_value) 248 249 #define REG_BASE_VALUE(X) \ 250 (REGNO (X) < vec_safe_length (reg_base_value) \ 251 ? (*reg_base_value)[REGNO (X)] : 0) 252 253 /* Vector indexed by N giving the initial (unchanging) value known for 254 pseudo-register N. This vector is initialized in init_alias_analysis, 255 and does not change until end_alias_analysis is called. */ 256 static GTY(()) vec<rtx, va_gc> *reg_known_value; 257 258 /* Vector recording for each reg_known_value whether it is due to a 259 REG_EQUIV note. Future passes (viz., reload) may replace the 260 pseudo with the equivalent expression and so we account for the 261 dependences that would be introduced if that happens. 262 263 The REG_EQUIV notes created in assign_parms may mention the arg 264 pointer, and there are explicit insns in the RTL that modify the 265 arg pointer. Thus we must ensure that such insns don't get 266 scheduled across each other because that would invalidate the 267 REG_EQUIV notes. One could argue that the REG_EQUIV notes are 268 wrong, but solving the problem in the scheduler will likely give 269 better code, so we do it here. */ 270 static sbitmap reg_known_equiv_p; 271 272 /* True when scanning insns from the start of the rtl to the 273 NOTE_INSN_FUNCTION_BEG note. */ 274 static bool copying_arguments; 275 276 277 /* The splay-tree used to store the various alias set entries. */ 278 static GTY (()) vec<alias_set_entry *, va_gc> *alias_sets; 279 280 /* Build a decomposed reference object for querying the alias-oracle 281 from the MEM rtx and store it in *REF. 282 Returns false if MEM is not suitable for the alias-oracle. */ 283 284 static bool 285 ao_ref_from_mem (ao_ref *ref, const_rtx mem) 286 { 287 tree expr = MEM_EXPR (mem); 288 tree base; 289 290 if (!expr) 291 return false; 292 293 ao_ref_init (ref, expr); 294 295 /* Get the base of the reference and see if we have to reject or 296 adjust it. */ 297 base = ao_ref_base (ref); 298 if (base == NULL_TREE) 299 return false; 300 301 /* The tree oracle doesn't like bases that are neither decls 302 nor indirect references of SSA names. */ 303 if (!(DECL_P (base) 304 || (TREE_CODE (base) == MEM_REF 305 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME) 306 || (TREE_CODE (base) == TARGET_MEM_REF 307 && TREE_CODE (TMR_BASE (base)) == SSA_NAME))) 308 return false; 309 310 /* If this is a reference based on a partitioned decl replace the 311 base with a MEM_REF of the pointer representative we 312 created during stack slot partitioning. */ 313 if (VAR_P (base) 314 && ! is_global_var (base) 315 && cfun->gimple_df->decls_to_pointers != NULL) 316 { 317 tree *namep = cfun->gimple_df->decls_to_pointers->get (base); 318 if (namep) 319 ref->base = build_simple_mem_ref (*namep); 320 } 321 322 ref->ref_alias_set = MEM_ALIAS_SET (mem); 323 324 /* If MEM_OFFSET or MEM_SIZE are unknown what we got from MEM_EXPR 325 is conservative, so trust it. */ 326 if (!MEM_OFFSET_KNOWN_P (mem) 327 || !MEM_SIZE_KNOWN_P (mem)) 328 return true; 329 330 /* If MEM_OFFSET/MEM_SIZE get us outside of ref->offset/ref->max_size 331 drop ref->ref. */ 332 if (maybe_lt (MEM_OFFSET (mem), 0) 333 || (ref->max_size_known_p () 334 && maybe_gt ((MEM_OFFSET (mem) + MEM_SIZE (mem)) * BITS_PER_UNIT, 335 ref->max_size))) 336 ref->ref = NULL_TREE; 337 338 /* Refine size and offset we got from analyzing MEM_EXPR by using 339 MEM_SIZE and MEM_OFFSET. */ 340 341 ref->offset += MEM_OFFSET (mem) * BITS_PER_UNIT; 342 ref->size = MEM_SIZE (mem) * BITS_PER_UNIT; 343 344 /* The MEM may extend into adjacent fields, so adjust max_size if 345 necessary. */ 346 if (ref->max_size_known_p ()) 347 ref->max_size = upper_bound (ref->max_size, ref->size); 348 349 /* If MEM_OFFSET and MEM_SIZE might get us outside of the base object of 350 the MEM_EXPR punt. This happens for STRICT_ALIGNMENT targets a lot. */ 351 if (MEM_EXPR (mem) != get_spill_slot_decl (false) 352 && (maybe_lt (ref->offset, 0) 353 || (DECL_P (ref->base) 354 && (DECL_SIZE (ref->base) == NULL_TREE 355 || !poly_int_tree_p (DECL_SIZE (ref->base)) 356 || maybe_lt (wi::to_poly_offset (DECL_SIZE (ref->base)), 357 ref->offset + ref->size))))) 358 return false; 359 360 return true; 361 } 362 363 /* Query the alias-oracle on whether the two memory rtx X and MEM may 364 alias. If TBAA_P is set also apply TBAA. Returns true if the 365 two rtxen may alias, false otherwise. */ 366 367 static bool 368 rtx_refs_may_alias_p (const_rtx x, const_rtx mem, bool tbaa_p) 369 { 370 ao_ref ref1, ref2; 371 372 if (!ao_ref_from_mem (&ref1, x) 373 || !ao_ref_from_mem (&ref2, mem)) 374 return true; 375 376 return refs_may_alias_p_1 (&ref1, &ref2, 377 tbaa_p 378 && MEM_ALIAS_SET (x) != 0 379 && MEM_ALIAS_SET (mem) != 0); 380 } 381 382 /* Returns a pointer to the alias set entry for ALIAS_SET, if there is 383 such an entry, or NULL otherwise. */ 384 385 static inline alias_set_entry * 386 get_alias_set_entry (alias_set_type alias_set) 387 { 388 return (*alias_sets)[alias_set]; 389 } 390 391 /* Returns nonzero if the alias sets for MEM1 and MEM2 are such that 392 the two MEMs cannot alias each other. */ 393 394 static inline int 395 mems_in_disjoint_alias_sets_p (const_rtx mem1, const_rtx mem2) 396 { 397 return (flag_strict_aliasing 398 && ! alias_sets_conflict_p (MEM_ALIAS_SET (mem1), 399 MEM_ALIAS_SET (mem2))); 400 } 401 402 /* Return true if the first alias set is a subset of the second. */ 403 404 bool 405 alias_set_subset_of (alias_set_type set1, alias_set_type set2) 406 { 407 alias_set_entry *ase2; 408 409 /* Disable TBAA oracle with !flag_strict_aliasing. */ 410 if (!flag_strict_aliasing) 411 return true; 412 413 /* Everything is a subset of the "aliases everything" set. */ 414 if (set2 == 0) 415 return true; 416 417 /* Check if set1 is a subset of set2. */ 418 ase2 = get_alias_set_entry (set2); 419 if (ase2 != 0 420 && (ase2->has_zero_child 421 || (ase2->children && ase2->children->get (set1)))) 422 return true; 423 424 /* As a special case we consider alias set of "void *" to be both subset 425 and superset of every alias set of a pointer. This extra symmetry does 426 not matter for alias_sets_conflict_p but it makes aliasing_component_refs_p 427 to return true on the following testcase: 428 429 void *ptr; 430 char **ptr2=(char **)&ptr; 431 *ptr2 = ... 432 433 Additionally if a set contains universal pointer, we consider every pointer 434 to be a subset of it, but we do not represent this explicitely - doing so 435 would require us to update transitive closure each time we introduce new 436 pointer type. This makes aliasing_component_refs_p to return true 437 on the following testcase: 438 439 struct a {void *ptr;} 440 char **ptr = (char **)&a.ptr; 441 ptr = ... 442 443 This makes void * truly universal pointer type. See pointer handling in 444 get_alias_set for more details. */ 445 if (ase2 && ase2->has_pointer) 446 { 447 alias_set_entry *ase1 = get_alias_set_entry (set1); 448 449 if (ase1 && ase1->is_pointer) 450 { 451 alias_set_type voidptr_set = TYPE_ALIAS_SET (ptr_type_node); 452 /* If one is ptr_type_node and other is pointer, then we consider 453 them subset of each other. */ 454 if (set1 == voidptr_set || set2 == voidptr_set) 455 return true; 456 /* If SET2 contains universal pointer's alias set, then we consdier 457 every (non-universal) pointer. */ 458 if (ase2->children && set1 != voidptr_set 459 && ase2->children->get (voidptr_set)) 460 return true; 461 } 462 } 463 return false; 464 } 465 466 /* Return 1 if the two specified alias sets may conflict. */ 467 468 int 469 alias_sets_conflict_p (alias_set_type set1, alias_set_type set2) 470 { 471 alias_set_entry *ase1; 472 alias_set_entry *ase2; 473 474 /* The easy case. */ 475 if (alias_sets_must_conflict_p (set1, set2)) 476 return 1; 477 478 /* See if the first alias set is a subset of the second. */ 479 ase1 = get_alias_set_entry (set1); 480 if (ase1 != 0 481 && ase1->children && ase1->children->get (set2)) 482 { 483 ++alias_stats.num_dag; 484 return 1; 485 } 486 487 /* Now do the same, but with the alias sets reversed. */ 488 ase2 = get_alias_set_entry (set2); 489 if (ase2 != 0 490 && ase2->children && ase2->children->get (set1)) 491 { 492 ++alias_stats.num_dag; 493 return 1; 494 } 495 496 /* We want void * to be compatible with any other pointer without 497 really dropping it to alias set 0. Doing so would make it 498 compatible with all non-pointer types too. 499 500 This is not strictly necessary by the C/C++ language 501 standards, but avoids common type punning mistakes. In 502 addition to that, we need the existence of such universal 503 pointer to implement Fortran's C_PTR type (which is defined as 504 type compatible with all C pointers). */ 505 if (ase1 && ase2 && ase1->has_pointer && ase2->has_pointer) 506 { 507 alias_set_type voidptr_set = TYPE_ALIAS_SET (ptr_type_node); 508 509 /* If one of the sets corresponds to universal pointer, 510 we consider it to conflict with anything that is 511 or contains pointer. */ 512 if (set1 == voidptr_set || set2 == voidptr_set) 513 { 514 ++alias_stats.num_universal; 515 return true; 516 } 517 /* If one of sets is (non-universal) pointer and the other 518 contains universal pointer, we also get conflict. */ 519 if (ase1->is_pointer && set2 != voidptr_set 520 && ase2->children && ase2->children->get (voidptr_set)) 521 { 522 ++alias_stats.num_universal; 523 return true; 524 } 525 if (ase2->is_pointer && set1 != voidptr_set 526 && ase1->children && ase1->children->get (voidptr_set)) 527 { 528 ++alias_stats.num_universal; 529 return true; 530 } 531 } 532 533 ++alias_stats.num_disambiguated; 534 535 /* The two alias sets are distinct and neither one is the 536 child of the other. Therefore, they cannot conflict. */ 537 return 0; 538 } 539 540 /* Return 1 if the two specified alias sets will always conflict. */ 541 542 int 543 alias_sets_must_conflict_p (alias_set_type set1, alias_set_type set2) 544 { 545 /* Disable TBAA oracle with !flag_strict_aliasing. */ 546 if (!flag_strict_aliasing) 547 return 1; 548 if (set1 == 0 || set2 == 0) 549 { 550 ++alias_stats.num_alias_zero; 551 return 1; 552 } 553 if (set1 == set2) 554 { 555 ++alias_stats.num_same_alias_set; 556 return 1; 557 } 558 559 return 0; 560 } 561 562 /* Return 1 if any MEM object of type T1 will always conflict (using the 563 dependency routines in this file) with any MEM object of type T2. 564 This is used when allocating temporary storage. If T1 and/or T2 are 565 NULL_TREE, it means we know nothing about the storage. */ 566 567 int 568 objects_must_conflict_p (tree t1, tree t2) 569 { 570 alias_set_type set1, set2; 571 572 /* If neither has a type specified, we don't know if they'll conflict 573 because we may be using them to store objects of various types, for 574 example the argument and local variables areas of inlined functions. */ 575 if (t1 == 0 && t2 == 0) 576 return 0; 577 578 /* If they are the same type, they must conflict. */ 579 if (t1 == t2) 580 { 581 ++alias_stats.num_same_objects; 582 return 1; 583 } 584 /* Likewise if both are volatile. */ 585 if (t1 != 0 && TYPE_VOLATILE (t1) && t2 != 0 && TYPE_VOLATILE (t2)) 586 { 587 ++alias_stats.num_volatile; 588 return 1; 589 } 590 591 set1 = t1 ? get_alias_set (t1) : 0; 592 set2 = t2 ? get_alias_set (t2) : 0; 593 594 /* We can't use alias_sets_conflict_p because we must make sure 595 that every subtype of t1 will conflict with every subtype of 596 t2 for which a pair of subobjects of these respective subtypes 597 overlaps on the stack. */ 598 return alias_sets_must_conflict_p (set1, set2); 599 } 600 601 /* Return the outermost parent of component present in the chain of 602 component references handled by get_inner_reference in T with the 603 following property: 604 - the component is non-addressable, or 605 - the parent has alias set zero, 606 or NULL_TREE if no such parent exists. In the former cases, the alias 607 set of this parent is the alias set that must be used for T itself. */ 608 609 tree 610 component_uses_parent_alias_set_from (const_tree t) 611 { 612 const_tree found = NULL_TREE; 613 614 if (AGGREGATE_TYPE_P (TREE_TYPE (t)) 615 && TYPE_TYPELESS_STORAGE (TREE_TYPE (t))) 616 return const_cast <tree> (t); 617 618 while (handled_component_p (t)) 619 { 620 switch (TREE_CODE (t)) 621 { 622 case COMPONENT_REF: 623 if (DECL_NONADDRESSABLE_P (TREE_OPERAND (t, 1))) 624 found = t; 625 /* Permit type-punning when accessing a union, provided the access 626 is directly through the union. For example, this code does not 627 permit taking the address of a union member and then storing 628 through it. Even the type-punning allowed here is a GCC 629 extension, albeit a common and useful one; the C standard says 630 that such accesses have implementation-defined behavior. */ 631 else if (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == UNION_TYPE) 632 found = t; 633 break; 634 635 case ARRAY_REF: 636 case ARRAY_RANGE_REF: 637 if (TYPE_NONALIASED_COMPONENT (TREE_TYPE (TREE_OPERAND (t, 0)))) 638 found = t; 639 break; 640 641 case REALPART_EXPR: 642 case IMAGPART_EXPR: 643 break; 644 645 case BIT_FIELD_REF: 646 case VIEW_CONVERT_EXPR: 647 /* Bitfields and casts are never addressable. */ 648 found = t; 649 break; 650 651 default: 652 gcc_unreachable (); 653 } 654 655 if (get_alias_set (TREE_TYPE (TREE_OPERAND (t, 0))) == 0) 656 found = t; 657 658 t = TREE_OPERAND (t, 0); 659 } 660 661 if (found) 662 return TREE_OPERAND (found, 0); 663 664 return NULL_TREE; 665 } 666 667 668 /* Return whether the pointer-type T effective for aliasing may 669 access everything and thus the reference has to be assigned 670 alias-set zero. */ 671 672 static bool 673 ref_all_alias_ptr_type_p (const_tree t) 674 { 675 return (TREE_CODE (TREE_TYPE (t)) == VOID_TYPE 676 || TYPE_REF_CAN_ALIAS_ALL (t)); 677 } 678 679 /* Return the alias set for the memory pointed to by T, which may be 680 either a type or an expression. Return -1 if there is nothing 681 special about dereferencing T. */ 682 683 static alias_set_type 684 get_deref_alias_set_1 (tree t) 685 { 686 /* All we care about is the type. */ 687 if (! TYPE_P (t)) 688 t = TREE_TYPE (t); 689 690 /* If we have an INDIRECT_REF via a void pointer, we don't 691 know anything about what that might alias. Likewise if the 692 pointer is marked that way. */ 693 if (ref_all_alias_ptr_type_p (t)) 694 return 0; 695 696 return -1; 697 } 698 699 /* Return the alias set for the memory pointed to by T, which may be 700 either a type or an expression. */ 701 702 alias_set_type 703 get_deref_alias_set (tree t) 704 { 705 /* If we're not doing any alias analysis, just assume everything 706 aliases everything else. */ 707 if (!flag_strict_aliasing) 708 return 0; 709 710 alias_set_type set = get_deref_alias_set_1 (t); 711 712 /* Fall back to the alias-set of the pointed-to type. */ 713 if (set == -1) 714 { 715 if (! TYPE_P (t)) 716 t = TREE_TYPE (t); 717 set = get_alias_set (TREE_TYPE (t)); 718 } 719 720 return set; 721 } 722 723 /* Return the pointer-type relevant for TBAA purposes from the 724 memory reference tree *T or NULL_TREE in which case *T is 725 adjusted to point to the outermost component reference that 726 can be used for assigning an alias set. */ 727 728 static tree 729 reference_alias_ptr_type_1 (tree *t) 730 { 731 tree inner; 732 733 /* Get the base object of the reference. */ 734 inner = *t; 735 while (handled_component_p (inner)) 736 { 737 /* If there is a VIEW_CONVERT_EXPR in the chain we cannot use 738 the type of any component references that wrap it to 739 determine the alias-set. */ 740 if (TREE_CODE (inner) == VIEW_CONVERT_EXPR) 741 *t = TREE_OPERAND (inner, 0); 742 inner = TREE_OPERAND (inner, 0); 743 } 744 745 /* Handle pointer dereferences here, they can override the 746 alias-set. */ 747 if (INDIRECT_REF_P (inner) 748 && ref_all_alias_ptr_type_p (TREE_TYPE (TREE_OPERAND (inner, 0)))) 749 return TREE_TYPE (TREE_OPERAND (inner, 0)); 750 else if (TREE_CODE (inner) == TARGET_MEM_REF) 751 return TREE_TYPE (TMR_OFFSET (inner)); 752 else if (TREE_CODE (inner) == MEM_REF 753 && ref_all_alias_ptr_type_p (TREE_TYPE (TREE_OPERAND (inner, 1)))) 754 return TREE_TYPE (TREE_OPERAND (inner, 1)); 755 756 /* If the innermost reference is a MEM_REF that has a 757 conversion embedded treat it like a VIEW_CONVERT_EXPR above, 758 using the memory access type for determining the alias-set. */ 759 if (TREE_CODE (inner) == MEM_REF 760 && (TYPE_MAIN_VARIANT (TREE_TYPE (inner)) 761 != TYPE_MAIN_VARIANT 762 (TREE_TYPE (TREE_TYPE (TREE_OPERAND (inner, 1)))))) 763 return TREE_TYPE (TREE_OPERAND (inner, 1)); 764 765 /* Otherwise, pick up the outermost object that we could have 766 a pointer to. */ 767 tree tem = component_uses_parent_alias_set_from (*t); 768 if (tem) 769 *t = tem; 770 771 return NULL_TREE; 772 } 773 774 /* Return the pointer-type relevant for TBAA purposes from the 775 gimple memory reference tree T. This is the type to be used for 776 the offset operand of MEM_REF or TARGET_MEM_REF replacements of T 777 and guarantees that get_alias_set will return the same alias 778 set for T and the replacement. */ 779 780 tree 781 reference_alias_ptr_type (tree t) 782 { 783 /* If the frontend assigns this alias-set zero, preserve that. */ 784 if (lang_hooks.get_alias_set (t) == 0) 785 return ptr_type_node; 786 787 tree ptype = reference_alias_ptr_type_1 (&t); 788 /* If there is a given pointer type for aliasing purposes, return it. */ 789 if (ptype != NULL_TREE) 790 return ptype; 791 792 /* Otherwise build one from the outermost component reference we 793 may use. */ 794 if (TREE_CODE (t) == MEM_REF 795 || TREE_CODE (t) == TARGET_MEM_REF) 796 return TREE_TYPE (TREE_OPERAND (t, 1)); 797 else 798 return build_pointer_type (TYPE_MAIN_VARIANT (TREE_TYPE (t))); 799 } 800 801 /* Return whether the pointer-types T1 and T2 used to determine 802 two alias sets of two references will yield the same answer 803 from get_deref_alias_set. */ 804 805 bool 806 alias_ptr_types_compatible_p (tree t1, tree t2) 807 { 808 if (TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2)) 809 return true; 810 811 if (ref_all_alias_ptr_type_p (t1) 812 || ref_all_alias_ptr_type_p (t2)) 813 return false; 814 815 return (TYPE_MAIN_VARIANT (TREE_TYPE (t1)) 816 == TYPE_MAIN_VARIANT (TREE_TYPE (t2))); 817 } 818 819 /* Create emptry alias set entry. */ 820 821 alias_set_entry * 822 init_alias_set_entry (alias_set_type set) 823 { 824 alias_set_entry *ase = ggc_alloc<alias_set_entry> (); 825 ase->alias_set = set; 826 ase->children = NULL; 827 ase->has_zero_child = false; 828 ase->is_pointer = false; 829 ase->has_pointer = false; 830 gcc_checking_assert (!get_alias_set_entry (set)); 831 (*alias_sets)[set] = ase; 832 return ase; 833 } 834 835 /* Return the alias set for T, which may be either a type or an 836 expression. Call language-specific routine for help, if needed. */ 837 838 alias_set_type 839 get_alias_set (tree t) 840 { 841 alias_set_type set; 842 843 /* We can not give up with -fno-strict-aliasing because we need to build 844 proper type representation for possible functions which are build with 845 -fstrict-aliasing. */ 846 847 /* return 0 if this or its type is an error. */ 848 if (t == error_mark_node 849 || (! TYPE_P (t) 850 && (TREE_TYPE (t) == 0 || TREE_TYPE (t) == error_mark_node))) 851 return 0; 852 853 /* We can be passed either an expression or a type. This and the 854 language-specific routine may make mutually-recursive calls to each other 855 to figure out what to do. At each juncture, we see if this is a tree 856 that the language may need to handle specially. First handle things that 857 aren't types. */ 858 if (! TYPE_P (t)) 859 { 860 /* Give the language a chance to do something with this tree 861 before we look at it. */ 862 STRIP_NOPS (t); 863 set = lang_hooks.get_alias_set (t); 864 if (set != -1) 865 return set; 866 867 /* Get the alias pointer-type to use or the outermost object 868 that we could have a pointer to. */ 869 tree ptype = reference_alias_ptr_type_1 (&t); 870 if (ptype != NULL) 871 return get_deref_alias_set (ptype); 872 873 /* If we've already determined the alias set for a decl, just return 874 it. This is necessary for C++ anonymous unions, whose component 875 variables don't look like union members (boo!). */ 876 if (VAR_P (t) 877 && DECL_RTL_SET_P (t) && MEM_P (DECL_RTL (t))) 878 return MEM_ALIAS_SET (DECL_RTL (t)); 879 880 /* Now all we care about is the type. */ 881 t = TREE_TYPE (t); 882 } 883 884 /* Variant qualifiers don't affect the alias set, so get the main 885 variant. */ 886 t = TYPE_MAIN_VARIANT (t); 887 888 if (AGGREGATE_TYPE_P (t) 889 && TYPE_TYPELESS_STORAGE (t)) 890 return 0; 891 892 /* Always use the canonical type as well. If this is a type that 893 requires structural comparisons to identify compatible types 894 use alias set zero. */ 895 if (TYPE_STRUCTURAL_EQUALITY_P (t)) 896 { 897 /* Allow the language to specify another alias set for this 898 type. */ 899 set = lang_hooks.get_alias_set (t); 900 if (set != -1) 901 return set; 902 /* Handle structure type equality for pointer types, arrays and vectors. 903 This is easy to do, because the code bellow ignore canonical types on 904 these anyway. This is important for LTO, where TYPE_CANONICAL for 905 pointers can not be meaningfuly computed by the frotnend. */ 906 if (canonical_type_used_p (t)) 907 { 908 /* In LTO we set canonical types for all types where it makes 909 sense to do so. Double check we did not miss some type. */ 910 gcc_checking_assert (!in_lto_p || !type_with_alias_set_p (t)); 911 return 0; 912 } 913 } 914 else 915 { 916 t = TYPE_CANONICAL (t); 917 gcc_checking_assert (!TYPE_STRUCTURAL_EQUALITY_P (t)); 918 } 919 920 /* If this is a type with a known alias set, return it. */ 921 gcc_checking_assert (t == TYPE_MAIN_VARIANT (t)); 922 if (TYPE_ALIAS_SET_KNOWN_P (t)) 923 return TYPE_ALIAS_SET (t); 924 925 /* We don't want to set TYPE_ALIAS_SET for incomplete types. */ 926 if (!COMPLETE_TYPE_P (t)) 927 { 928 /* For arrays with unknown size the conservative answer is the 929 alias set of the element type. */ 930 if (TREE_CODE (t) == ARRAY_TYPE) 931 return get_alias_set (TREE_TYPE (t)); 932 933 /* But return zero as a conservative answer for incomplete types. */ 934 return 0; 935 } 936 937 /* See if the language has special handling for this type. */ 938 set = lang_hooks.get_alias_set (t); 939 if (set != -1) 940 return set; 941 942 /* There are no objects of FUNCTION_TYPE, so there's no point in 943 using up an alias set for them. (There are, of course, pointers 944 and references to functions, but that's different.) */ 945 else if (TREE_CODE (t) == FUNCTION_TYPE || TREE_CODE (t) == METHOD_TYPE) 946 set = 0; 947 948 /* Unless the language specifies otherwise, let vector types alias 949 their components. This avoids some nasty type punning issues in 950 normal usage. And indeed lets vectors be treated more like an 951 array slice. */ 952 else if (TREE_CODE (t) == VECTOR_TYPE) 953 set = get_alias_set (TREE_TYPE (t)); 954 955 /* Unless the language specifies otherwise, treat array types the 956 same as their components. This avoids the asymmetry we get 957 through recording the components. Consider accessing a 958 character(kind=1) through a reference to a character(kind=1)[1:1]. 959 Or consider if we want to assign integer(kind=4)[0:D.1387] and 960 integer(kind=4)[4] the same alias set or not. 961 Just be pragmatic here and make sure the array and its element 962 type get the same alias set assigned. */ 963 else if (TREE_CODE (t) == ARRAY_TYPE 964 && (!TYPE_NONALIASED_COMPONENT (t) 965 || TYPE_STRUCTURAL_EQUALITY_P (t))) 966 set = get_alias_set (TREE_TYPE (t)); 967 968 /* From the former common C and C++ langhook implementation: 969 970 Unfortunately, there is no canonical form of a pointer type. 971 In particular, if we have `typedef int I', then `int *', and 972 `I *' are different types. So, we have to pick a canonical 973 representative. We do this below. 974 975 Technically, this approach is actually more conservative that 976 it needs to be. In particular, `const int *' and `int *' 977 should be in different alias sets, according to the C and C++ 978 standard, since their types are not the same, and so, 979 technically, an `int **' and `const int **' cannot point at 980 the same thing. 981 982 But, the standard is wrong. In particular, this code is 983 legal C++: 984 985 int *ip; 986 int **ipp = &ip; 987 const int* const* cipp = ipp; 988 And, it doesn't make sense for that to be legal unless you 989 can dereference IPP and CIPP. So, we ignore cv-qualifiers on 990 the pointed-to types. This issue has been reported to the 991 C++ committee. 992 993 For this reason go to canonical type of the unqalified pointer type. 994 Until GCC 6 this code set all pointers sets to have alias set of 995 ptr_type_node but that is a bad idea, because it prevents disabiguations 996 in between pointers. For Firefox this accounts about 20% of all 997 disambiguations in the program. */ 998 else if (POINTER_TYPE_P (t) && t != ptr_type_node) 999 { 1000 tree p; 1001 auto_vec <bool, 8> reference; 1002 1003 /* Unnest all pointers and references. 1004 We also want to make pointer to array/vector equivalent to pointer to 1005 its element (see the reasoning above). Skip all those types, too. */ 1006 for (p = t; POINTER_TYPE_P (p) 1007 || (TREE_CODE (p) == ARRAY_TYPE 1008 && (!TYPE_NONALIASED_COMPONENT (p) 1009 || !COMPLETE_TYPE_P (p) 1010 || TYPE_STRUCTURAL_EQUALITY_P (p))) 1011 || TREE_CODE (p) == VECTOR_TYPE; 1012 p = TREE_TYPE (p)) 1013 { 1014 /* Ada supports recusive pointers. Instead of doing recrusion check 1015 just give up once the preallocated space of 8 elements is up. 1016 In this case just punt to void * alias set. */ 1017 if (reference.length () == 8) 1018 { 1019 p = ptr_type_node; 1020 break; 1021 } 1022 if (TREE_CODE (p) == REFERENCE_TYPE) 1023 /* In LTO we want languages that use references to be compatible 1024 with languages that use pointers. */ 1025 reference.safe_push (true && !in_lto_p); 1026 if (TREE_CODE (p) == POINTER_TYPE) 1027 reference.safe_push (false); 1028 } 1029 p = TYPE_MAIN_VARIANT (p); 1030 1031 /* Make void * compatible with char * and also void **. 1032 Programs are commonly violating TBAA by this. 1033 1034 We also make void * to conflict with every pointer 1035 (see record_component_aliases) and thus it is safe it to use it for 1036 pointers to types with TYPE_STRUCTURAL_EQUALITY_P. */ 1037 if (TREE_CODE (p) == VOID_TYPE || TYPE_STRUCTURAL_EQUALITY_P (p)) 1038 set = get_alias_set (ptr_type_node); 1039 else 1040 { 1041 /* Rebuild pointer type starting from canonical types using 1042 unqualified pointers and references only. This way all such 1043 pointers will have the same alias set and will conflict with 1044 each other. 1045 1046 Most of time we already have pointers or references of a given type. 1047 If not we build new one just to be sure that if someone later 1048 (probably only middle-end can, as we should assign all alias 1049 classes only after finishing translation unit) builds the pointer 1050 type, the canonical type will match. */ 1051 p = TYPE_CANONICAL (p); 1052 while (!reference.is_empty ()) 1053 { 1054 if (reference.pop ()) 1055 p = build_reference_type (p); 1056 else 1057 p = build_pointer_type (p); 1058 gcc_checking_assert (p == TYPE_MAIN_VARIANT (p)); 1059 /* build_pointer_type should always return the canonical type. 1060 For LTO TYPE_CANOINCAL may be NULL, because we do not compute 1061 them. Be sure that frontends do not glob canonical types of 1062 pointers in unexpected way and that p == TYPE_CANONICAL (p) 1063 in all other cases. */ 1064 gcc_checking_assert (!TYPE_CANONICAL (p) 1065 || p == TYPE_CANONICAL (p)); 1066 } 1067 1068 /* Assign the alias set to both p and t. 1069 We can not call get_alias_set (p) here as that would trigger 1070 infinite recursion when p == t. In other cases it would just 1071 trigger unnecesary legwork of rebuilding the pointer again. */ 1072 gcc_checking_assert (p == TYPE_MAIN_VARIANT (p)); 1073 if (TYPE_ALIAS_SET_KNOWN_P (p)) 1074 set = TYPE_ALIAS_SET (p); 1075 else 1076 { 1077 set = new_alias_set (); 1078 TYPE_ALIAS_SET (p) = set; 1079 } 1080 } 1081 } 1082 /* Alias set of ptr_type_node is special and serve as universal pointer which 1083 is TBAA compatible with every other pointer type. Be sure we have the 1084 alias set built even for LTO which otherwise keeps all TYPE_CANONICAL 1085 of pointer types NULL. */ 1086 else if (t == ptr_type_node) 1087 set = new_alias_set (); 1088 1089 /* Otherwise make a new alias set for this type. */ 1090 else 1091 { 1092 /* Each canonical type gets its own alias set, so canonical types 1093 shouldn't form a tree. It doesn't really matter for types 1094 we handle specially above, so only check it where it possibly 1095 would result in a bogus alias set. */ 1096 gcc_checking_assert (TYPE_CANONICAL (t) == t); 1097 1098 set = new_alias_set (); 1099 } 1100 1101 TYPE_ALIAS_SET (t) = set; 1102 1103 /* If this is an aggregate type or a complex type, we must record any 1104 component aliasing information. */ 1105 if (AGGREGATE_TYPE_P (t) || TREE_CODE (t) == COMPLEX_TYPE) 1106 record_component_aliases (t); 1107 1108 /* We treat pointer types specially in alias_set_subset_of. */ 1109 if (POINTER_TYPE_P (t) && set) 1110 { 1111 alias_set_entry *ase = get_alias_set_entry (set); 1112 if (!ase) 1113 ase = init_alias_set_entry (set); 1114 ase->is_pointer = true; 1115 ase->has_pointer = true; 1116 } 1117 1118 return set; 1119 } 1120 1121 /* Return a brand-new alias set. */ 1122 1123 alias_set_type 1124 new_alias_set (void) 1125 { 1126 if (alias_sets == 0) 1127 vec_safe_push (alias_sets, (alias_set_entry *) NULL); 1128 vec_safe_push (alias_sets, (alias_set_entry *) NULL); 1129 return alias_sets->length () - 1; 1130 } 1131 1132 /* Indicate that things in SUBSET can alias things in SUPERSET, but that 1133 not everything that aliases SUPERSET also aliases SUBSET. For example, 1134 in C, a store to an `int' can alias a load of a structure containing an 1135 `int', and vice versa. But it can't alias a load of a 'double' member 1136 of the same structure. Here, the structure would be the SUPERSET and 1137 `int' the SUBSET. This relationship is also described in the comment at 1138 the beginning of this file. 1139 1140 This function should be called only once per SUPERSET/SUBSET pair. 1141 1142 It is illegal for SUPERSET to be zero; everything is implicitly a 1143 subset of alias set zero. */ 1144 1145 void 1146 record_alias_subset (alias_set_type superset, alias_set_type subset) 1147 { 1148 alias_set_entry *superset_entry; 1149 alias_set_entry *subset_entry; 1150 1151 /* It is possible in complex type situations for both sets to be the same, 1152 in which case we can ignore this operation. */ 1153 if (superset == subset) 1154 return; 1155 1156 gcc_assert (superset); 1157 1158 superset_entry = get_alias_set_entry (superset); 1159 if (superset_entry == 0) 1160 { 1161 /* Create an entry for the SUPERSET, so that we have a place to 1162 attach the SUBSET. */ 1163 superset_entry = init_alias_set_entry (superset); 1164 } 1165 1166 if (subset == 0) 1167 superset_entry->has_zero_child = 1; 1168 else 1169 { 1170 subset_entry = get_alias_set_entry (subset); 1171 if (!superset_entry->children) 1172 superset_entry->children 1173 = hash_map<alias_set_hash, int>::create_ggc (64); 1174 /* If there is an entry for the subset, enter all of its children 1175 (if they are not already present) as children of the SUPERSET. */ 1176 if (subset_entry) 1177 { 1178 if (subset_entry->has_zero_child) 1179 superset_entry->has_zero_child = true; 1180 if (subset_entry->has_pointer) 1181 superset_entry->has_pointer = true; 1182 1183 if (subset_entry->children) 1184 { 1185 hash_map<alias_set_hash, int>::iterator iter 1186 = subset_entry->children->begin (); 1187 for (; iter != subset_entry->children->end (); ++iter) 1188 superset_entry->children->put ((*iter).first, (*iter).second); 1189 } 1190 } 1191 1192 /* Enter the SUBSET itself as a child of the SUPERSET. */ 1193 superset_entry->children->put (subset, 0); 1194 } 1195 } 1196 1197 /* Record that component types of TYPE, if any, are part of SUPERSET for 1198 aliasing purposes. For record types, we only record component types 1199 for fields that are not marked non-addressable. For array types, we 1200 only record the component type if it is not marked non-aliased. */ 1201 1202 void 1203 record_component_aliases (tree type, alias_set_type superset) 1204 { 1205 tree field; 1206 1207 if (superset == 0) 1208 return; 1209 1210 switch (TREE_CODE (type)) 1211 { 1212 case RECORD_TYPE: 1213 case UNION_TYPE: 1214 case QUAL_UNION_TYPE: 1215 for (field = TYPE_FIELDS (type); field != 0; field = DECL_CHAIN (field)) 1216 if (TREE_CODE (field) == FIELD_DECL && !DECL_NONADDRESSABLE_P (field)) 1217 { 1218 /* LTO type merging does not make any difference between 1219 component pointer types. We may have 1220 1221 struct foo {int *a;}; 1222 1223 as TYPE_CANONICAL of 1224 1225 struct bar {float *a;}; 1226 1227 Because accesses to int * and float * do not alias, we would get 1228 false negative when accessing the same memory location by 1229 float ** and bar *. We thus record the canonical type as: 1230 1231 struct {void *a;}; 1232 1233 void * is special cased and works as a universal pointer type. 1234 Accesses to it conflicts with accesses to any other pointer 1235 type. */ 1236 tree t = TREE_TYPE (field); 1237 if (in_lto_p) 1238 { 1239 /* VECTOR_TYPE and ARRAY_TYPE share the alias set with their 1240 element type and that type has to be normalized to void *, 1241 too, in the case it is a pointer. */ 1242 while (!canonical_type_used_p (t) && !POINTER_TYPE_P (t)) 1243 { 1244 gcc_checking_assert (TYPE_STRUCTURAL_EQUALITY_P (t)); 1245 t = TREE_TYPE (t); 1246 } 1247 if (POINTER_TYPE_P (t)) 1248 t = ptr_type_node; 1249 else if (flag_checking) 1250 gcc_checking_assert (get_alias_set (t) 1251 == get_alias_set (TREE_TYPE (field))); 1252 } 1253 1254 alias_set_type set = get_alias_set (t); 1255 record_alias_subset (superset, set); 1256 /* If the field has alias-set zero make sure to still record 1257 any componets of it. This makes sure that for 1258 struct A { 1259 struct B { 1260 int i; 1261 char c[4]; 1262 } b; 1263 }; 1264 in C++ even though 'B' has alias-set zero because 1265 TYPE_TYPELESS_STORAGE is set, 'A' has the alias-set of 1266 'int' as subset. */ 1267 if (set == 0) 1268 record_component_aliases (t, superset); 1269 } 1270 break; 1271 1272 case COMPLEX_TYPE: 1273 record_alias_subset (superset, get_alias_set (TREE_TYPE (type))); 1274 break; 1275 1276 /* VECTOR_TYPE and ARRAY_TYPE share the alias set with their 1277 element type. */ 1278 1279 default: 1280 break; 1281 } 1282 } 1283 1284 /* Record that component types of TYPE, if any, are part of that type for 1285 aliasing purposes. For record types, we only record component types 1286 for fields that are not marked non-addressable. For array types, we 1287 only record the component type if it is not marked non-aliased. */ 1288 1289 void 1290 record_component_aliases (tree type) 1291 { 1292 alias_set_type superset = get_alias_set (type); 1293 record_component_aliases (type, superset); 1294 } 1295 1296 1297 /* Allocate an alias set for use in storing and reading from the varargs 1298 spill area. */ 1299 1300 static GTY(()) alias_set_type varargs_set = -1; 1301 1302 alias_set_type 1303 get_varargs_alias_set (void) 1304 { 1305 #if 1 1306 /* We now lower VA_ARG_EXPR, and there's currently no way to attach the 1307 varargs alias set to an INDIRECT_REF (FIXME!), so we can't 1308 consistently use the varargs alias set for loads from the varargs 1309 area. So don't use it anywhere. */ 1310 return 0; 1311 #else 1312 if (varargs_set == -1) 1313 varargs_set = new_alias_set (); 1314 1315 return varargs_set; 1316 #endif 1317 } 1318 1319 /* Likewise, but used for the fixed portions of the frame, e.g., register 1320 save areas. */ 1321 1322 static GTY(()) alias_set_type frame_set = -1; 1323 1324 alias_set_type 1325 get_frame_alias_set (void) 1326 { 1327 if (frame_set == -1) 1328 frame_set = new_alias_set (); 1329 1330 return frame_set; 1331 } 1332 1333 /* Create a new, unique base with id ID. */ 1334 1335 static rtx 1336 unique_base_value (HOST_WIDE_INT id) 1337 { 1338 return gen_rtx_ADDRESS (Pmode, id); 1339 } 1340 1341 /* Return true if accesses based on any other base value cannot alias 1342 those based on X. */ 1343 1344 static bool 1345 unique_base_value_p (rtx x) 1346 { 1347 return GET_CODE (x) == ADDRESS && GET_MODE (x) == Pmode; 1348 } 1349 1350 /* Return true if X is known to be a base value. */ 1351 1352 static bool 1353 known_base_value_p (rtx x) 1354 { 1355 switch (GET_CODE (x)) 1356 { 1357 case LABEL_REF: 1358 case SYMBOL_REF: 1359 return true; 1360 1361 case ADDRESS: 1362 /* Arguments may or may not be bases; we don't know for sure. */ 1363 return GET_MODE (x) != VOIDmode; 1364 1365 default: 1366 return false; 1367 } 1368 } 1369 1370 /* Inside SRC, the source of a SET, find a base address. */ 1371 1372 static rtx 1373 find_base_value (rtx src) 1374 { 1375 unsigned int regno; 1376 scalar_int_mode int_mode; 1377 1378 #if defined (FIND_BASE_TERM) 1379 /* Try machine-dependent ways to find the base term. */ 1380 src = FIND_BASE_TERM (src); 1381 #endif 1382 1383 switch (GET_CODE (src)) 1384 { 1385 case SYMBOL_REF: 1386 case LABEL_REF: 1387 return src; 1388 1389 case REG: 1390 regno = REGNO (src); 1391 /* At the start of a function, argument registers have known base 1392 values which may be lost later. Returning an ADDRESS 1393 expression here allows optimization based on argument values 1394 even when the argument registers are used for other purposes. */ 1395 if (regno < FIRST_PSEUDO_REGISTER && copying_arguments) 1396 return new_reg_base_value[regno]; 1397 1398 /* If a pseudo has a known base value, return it. Do not do this 1399 for non-fixed hard regs since it can result in a circular 1400 dependency chain for registers which have values at function entry. 1401 1402 The test above is not sufficient because the scheduler may move 1403 a copy out of an arg reg past the NOTE_INSN_FUNCTION_BEGIN. */ 1404 if ((regno >= FIRST_PSEUDO_REGISTER || fixed_regs[regno]) 1405 && regno < vec_safe_length (reg_base_value)) 1406 { 1407 /* If we're inside init_alias_analysis, use new_reg_base_value 1408 to reduce the number of relaxation iterations. */ 1409 if (new_reg_base_value && new_reg_base_value[regno] 1410 && DF_REG_DEF_COUNT (regno) == 1) 1411 return new_reg_base_value[regno]; 1412 1413 if ((*reg_base_value)[regno]) 1414 return (*reg_base_value)[regno]; 1415 } 1416 1417 return 0; 1418 1419 case MEM: 1420 /* Check for an argument passed in memory. Only record in the 1421 copying-arguments block; it is too hard to track changes 1422 otherwise. */ 1423 if (copying_arguments 1424 && (XEXP (src, 0) == arg_pointer_rtx 1425 || (GET_CODE (XEXP (src, 0)) == PLUS 1426 && XEXP (XEXP (src, 0), 0) == arg_pointer_rtx))) 1427 return arg_base_value; 1428 return 0; 1429 1430 case CONST: 1431 src = XEXP (src, 0); 1432 if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS) 1433 break; 1434 1435 /* fall through */ 1436 1437 case PLUS: 1438 case MINUS: 1439 { 1440 rtx temp, src_0 = XEXP (src, 0), src_1 = XEXP (src, 1); 1441 1442 /* If either operand is a REG that is a known pointer, then it 1443 is the base. */ 1444 if (REG_P (src_0) && REG_POINTER (src_0)) 1445 return find_base_value (src_0); 1446 if (REG_P (src_1) && REG_POINTER (src_1)) 1447 return find_base_value (src_1); 1448 1449 /* If either operand is a REG, then see if we already have 1450 a known value for it. */ 1451 if (REG_P (src_0)) 1452 { 1453 temp = find_base_value (src_0); 1454 if (temp != 0) 1455 src_0 = temp; 1456 } 1457 1458 if (REG_P (src_1)) 1459 { 1460 temp = find_base_value (src_1); 1461 if (temp!= 0) 1462 src_1 = temp; 1463 } 1464 1465 /* If either base is named object or a special address 1466 (like an argument or stack reference), then use it for the 1467 base term. */ 1468 if (src_0 != 0 && known_base_value_p (src_0)) 1469 return src_0; 1470 1471 if (src_1 != 0 && known_base_value_p (src_1)) 1472 return src_1; 1473 1474 /* Guess which operand is the base address: 1475 If either operand is a symbol, then it is the base. If 1476 either operand is a CONST_INT, then the other is the base. */ 1477 if (CONST_INT_P (src_1) || CONSTANT_P (src_0)) 1478 return find_base_value (src_0); 1479 else if (CONST_INT_P (src_0) || CONSTANT_P (src_1)) 1480 return find_base_value (src_1); 1481 1482 return 0; 1483 } 1484 1485 case LO_SUM: 1486 /* The standard form is (lo_sum reg sym) so look only at the 1487 second operand. */ 1488 return find_base_value (XEXP (src, 1)); 1489 1490 case AND: 1491 /* If the second operand is constant set the base 1492 address to the first operand. */ 1493 if (CONST_INT_P (XEXP (src, 1)) && INTVAL (XEXP (src, 1)) != 0) 1494 return find_base_value (XEXP (src, 0)); 1495 return 0; 1496 1497 case TRUNCATE: 1498 /* As we do not know which address space the pointer is referring to, we can 1499 handle this only if the target does not support different pointer or 1500 address modes depending on the address space. */ 1501 if (!target_default_pointer_address_modes_p ()) 1502 break; 1503 if (!is_a <scalar_int_mode> (GET_MODE (src), &int_mode) 1504 || GET_MODE_PRECISION (int_mode) < GET_MODE_PRECISION (Pmode)) 1505 break; 1506 /* Fall through. */ 1507 case HIGH: 1508 case PRE_INC: 1509 case PRE_DEC: 1510 case POST_INC: 1511 case POST_DEC: 1512 case PRE_MODIFY: 1513 case POST_MODIFY: 1514 return find_base_value (XEXP (src, 0)); 1515 1516 case ZERO_EXTEND: 1517 case SIGN_EXTEND: /* used for NT/Alpha pointers */ 1518 /* As we do not know which address space the pointer is referring to, we can 1519 handle this only if the target does not support different pointer or 1520 address modes depending on the address space. */ 1521 if (!target_default_pointer_address_modes_p ()) 1522 break; 1523 1524 { 1525 rtx temp = find_base_value (XEXP (src, 0)); 1526 1527 if (temp != 0 && CONSTANT_P (temp)) 1528 temp = convert_memory_address (Pmode, temp); 1529 1530 return temp; 1531 } 1532 1533 default: 1534 break; 1535 } 1536 1537 return 0; 1538 } 1539 1540 /* Called from init_alias_analysis indirectly through note_stores, 1541 or directly if DEST is a register with a REG_NOALIAS note attached. 1542 SET is null in the latter case. */ 1543 1544 /* While scanning insns to find base values, reg_seen[N] is nonzero if 1545 register N has been set in this function. */ 1546 static sbitmap reg_seen; 1547 1548 static void 1549 record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED) 1550 { 1551 unsigned regno; 1552 rtx src; 1553 int n; 1554 1555 if (!REG_P (dest)) 1556 return; 1557 1558 regno = REGNO (dest); 1559 1560 gcc_checking_assert (regno < reg_base_value->length ()); 1561 1562 n = REG_NREGS (dest); 1563 if (n != 1) 1564 { 1565 while (--n >= 0) 1566 { 1567 bitmap_set_bit (reg_seen, regno + n); 1568 new_reg_base_value[regno + n] = 0; 1569 } 1570 return; 1571 } 1572 1573 if (set) 1574 { 1575 /* A CLOBBER wipes out any old value but does not prevent a previously 1576 unset register from acquiring a base address (i.e. reg_seen is not 1577 set). */ 1578 if (GET_CODE (set) == CLOBBER) 1579 { 1580 new_reg_base_value[regno] = 0; 1581 return; 1582 } 1583 src = SET_SRC (set); 1584 } 1585 else 1586 { 1587 /* There's a REG_NOALIAS note against DEST. */ 1588 if (bitmap_bit_p (reg_seen, regno)) 1589 { 1590 new_reg_base_value[regno] = 0; 1591 return; 1592 } 1593 bitmap_set_bit (reg_seen, regno); 1594 new_reg_base_value[regno] = unique_base_value (unique_id++); 1595 return; 1596 } 1597 1598 /* If this is not the first set of REGNO, see whether the new value 1599 is related to the old one. There are two cases of interest: 1600 1601 (1) The register might be assigned an entirely new value 1602 that has the same base term as the original set. 1603 1604 (2) The set might be a simple self-modification that 1605 cannot change REGNO's base value. 1606 1607 If neither case holds, reject the original base value as invalid. 1608 Note that the following situation is not detected: 1609 1610 extern int x, y; int *p = &x; p += (&y-&x); 1611 1612 ANSI C does not allow computing the difference of addresses 1613 of distinct top level objects. */ 1614 if (new_reg_base_value[regno] != 0 1615 && find_base_value (src) != new_reg_base_value[regno]) 1616 switch (GET_CODE (src)) 1617 { 1618 case LO_SUM: 1619 case MINUS: 1620 if (XEXP (src, 0) != dest && XEXP (src, 1) != dest) 1621 new_reg_base_value[regno] = 0; 1622 break; 1623 case PLUS: 1624 /* If the value we add in the PLUS is also a valid base value, 1625 this might be the actual base value, and the original value 1626 an index. */ 1627 { 1628 rtx other = NULL_RTX; 1629 1630 if (XEXP (src, 0) == dest) 1631 other = XEXP (src, 1); 1632 else if (XEXP (src, 1) == dest) 1633 other = XEXP (src, 0); 1634 1635 if (! other || find_base_value (other)) 1636 new_reg_base_value[regno] = 0; 1637 break; 1638 } 1639 case AND: 1640 if (XEXP (src, 0) != dest || !CONST_INT_P (XEXP (src, 1))) 1641 new_reg_base_value[regno] = 0; 1642 break; 1643 default: 1644 new_reg_base_value[regno] = 0; 1645 break; 1646 } 1647 /* If this is the first set of a register, record the value. */ 1648 else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno]) 1649 && ! bitmap_bit_p (reg_seen, regno) && new_reg_base_value[regno] == 0) 1650 new_reg_base_value[regno] = find_base_value (src); 1651 1652 bitmap_set_bit (reg_seen, regno); 1653 } 1654 1655 /* Return REG_BASE_VALUE for REGNO. Selective scheduler uses this to avoid 1656 using hard registers with non-null REG_BASE_VALUE for renaming. */ 1657 rtx 1658 get_reg_base_value (unsigned int regno) 1659 { 1660 return (*reg_base_value)[regno]; 1661 } 1662 1663 /* If a value is known for REGNO, return it. */ 1664 1665 rtx 1666 get_reg_known_value (unsigned int regno) 1667 { 1668 if (regno >= FIRST_PSEUDO_REGISTER) 1669 { 1670 regno -= FIRST_PSEUDO_REGISTER; 1671 if (regno < vec_safe_length (reg_known_value)) 1672 return (*reg_known_value)[regno]; 1673 } 1674 return NULL; 1675 } 1676 1677 /* Set it. */ 1678 1679 static void 1680 set_reg_known_value (unsigned int regno, rtx val) 1681 { 1682 if (regno >= FIRST_PSEUDO_REGISTER) 1683 { 1684 regno -= FIRST_PSEUDO_REGISTER; 1685 if (regno < vec_safe_length (reg_known_value)) 1686 (*reg_known_value)[regno] = val; 1687 } 1688 } 1689 1690 /* Similarly for reg_known_equiv_p. */ 1691 1692 bool 1693 get_reg_known_equiv_p (unsigned int regno) 1694 { 1695 if (regno >= FIRST_PSEUDO_REGISTER) 1696 { 1697 regno -= FIRST_PSEUDO_REGISTER; 1698 if (regno < vec_safe_length (reg_known_value)) 1699 return bitmap_bit_p (reg_known_equiv_p, regno); 1700 } 1701 return false; 1702 } 1703 1704 static void 1705 set_reg_known_equiv_p (unsigned int regno, bool val) 1706 { 1707 if (regno >= FIRST_PSEUDO_REGISTER) 1708 { 1709 regno -= FIRST_PSEUDO_REGISTER; 1710 if (regno < vec_safe_length (reg_known_value)) 1711 { 1712 if (val) 1713 bitmap_set_bit (reg_known_equiv_p, regno); 1714 else 1715 bitmap_clear_bit (reg_known_equiv_p, regno); 1716 } 1717 } 1718 } 1719 1720 1721 /* Returns a canonical version of X, from the point of view alias 1722 analysis. (For example, if X is a MEM whose address is a register, 1723 and the register has a known value (say a SYMBOL_REF), then a MEM 1724 whose address is the SYMBOL_REF is returned.) */ 1725 1726 rtx 1727 canon_rtx (rtx x) 1728 { 1729 /* Recursively look for equivalences. */ 1730 if (REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER) 1731 { 1732 rtx t = get_reg_known_value (REGNO (x)); 1733 if (t == x) 1734 return x; 1735 if (t) 1736 return canon_rtx (t); 1737 } 1738 1739 if (GET_CODE (x) == PLUS) 1740 { 1741 rtx x0 = canon_rtx (XEXP (x, 0)); 1742 rtx x1 = canon_rtx (XEXP (x, 1)); 1743 1744 if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1)) 1745 return simplify_gen_binary (PLUS, GET_MODE (x), x0, x1); 1746 } 1747 1748 /* This gives us much better alias analysis when called from 1749 the loop optimizer. Note we want to leave the original 1750 MEM alone, but need to return the canonicalized MEM with 1751 all the flags with their original values. */ 1752 else if (MEM_P (x)) 1753 x = replace_equiv_address_nv (x, canon_rtx (XEXP (x, 0))); 1754 1755 return x; 1756 } 1757 1758 /* Return 1 if X and Y are identical-looking rtx's. 1759 Expect that X and Y has been already canonicalized. 1760 1761 We use the data in reg_known_value above to see if two registers with 1762 different numbers are, in fact, equivalent. */ 1763 1764 static int 1765 rtx_equal_for_memref_p (const_rtx x, const_rtx y) 1766 { 1767 int i; 1768 int j; 1769 enum rtx_code code; 1770 const char *fmt; 1771 1772 if (x == 0 && y == 0) 1773 return 1; 1774 if (x == 0 || y == 0) 1775 return 0; 1776 1777 if (x == y) 1778 return 1; 1779 1780 code = GET_CODE (x); 1781 /* Rtx's of different codes cannot be equal. */ 1782 if (code != GET_CODE (y)) 1783 return 0; 1784 1785 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. 1786 (REG:SI x) and (REG:HI x) are NOT equivalent. */ 1787 1788 if (GET_MODE (x) != GET_MODE (y)) 1789 return 0; 1790 1791 /* Some RTL can be compared without a recursive examination. */ 1792 switch (code) 1793 { 1794 case REG: 1795 return REGNO (x) == REGNO (y); 1796 1797 case LABEL_REF: 1798 return label_ref_label (x) == label_ref_label (y); 1799 1800 case SYMBOL_REF: 1801 return compare_base_symbol_refs (x, y) == 1; 1802 1803 case ENTRY_VALUE: 1804 /* This is magic, don't go through canonicalization et al. */ 1805 return rtx_equal_p (ENTRY_VALUE_EXP (x), ENTRY_VALUE_EXP (y)); 1806 1807 case VALUE: 1808 CASE_CONST_UNIQUE: 1809 /* Pointer equality guarantees equality for these nodes. */ 1810 return 0; 1811 1812 default: 1813 break; 1814 } 1815 1816 /* canon_rtx knows how to handle plus. No need to canonicalize. */ 1817 if (code == PLUS) 1818 return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0)) 1819 && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1))) 1820 || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1)) 1821 && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0)))); 1822 /* For commutative operations, the RTX match if the operand match in any 1823 order. Also handle the simple binary and unary cases without a loop. */ 1824 if (COMMUTATIVE_P (x)) 1825 { 1826 rtx xop0 = canon_rtx (XEXP (x, 0)); 1827 rtx yop0 = canon_rtx (XEXP (y, 0)); 1828 rtx yop1 = canon_rtx (XEXP (y, 1)); 1829 1830 return ((rtx_equal_for_memref_p (xop0, yop0) 1831 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop1)) 1832 || (rtx_equal_for_memref_p (xop0, yop1) 1833 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop0))); 1834 } 1835 else if (NON_COMMUTATIVE_P (x)) 1836 { 1837 return (rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)), 1838 canon_rtx (XEXP (y, 0))) 1839 && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), 1840 canon_rtx (XEXP (y, 1)))); 1841 } 1842 else if (UNARY_P (x)) 1843 return rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)), 1844 canon_rtx (XEXP (y, 0))); 1845 1846 /* Compare the elements. If any pair of corresponding elements 1847 fail to match, return 0 for the whole things. 1848 1849 Limit cases to types which actually appear in addresses. */ 1850 1851 fmt = GET_RTX_FORMAT (code); 1852 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) 1853 { 1854 switch (fmt[i]) 1855 { 1856 case 'i': 1857 if (XINT (x, i) != XINT (y, i)) 1858 return 0; 1859 break; 1860 1861 case 'p': 1862 if (maybe_ne (SUBREG_BYTE (x), SUBREG_BYTE (y))) 1863 return 0; 1864 break; 1865 1866 case 'E': 1867 /* Two vectors must have the same length. */ 1868 if (XVECLEN (x, i) != XVECLEN (y, i)) 1869 return 0; 1870 1871 /* And the corresponding elements must match. */ 1872 for (j = 0; j < XVECLEN (x, i); j++) 1873 if (rtx_equal_for_memref_p (canon_rtx (XVECEXP (x, i, j)), 1874 canon_rtx (XVECEXP (y, i, j))) == 0) 1875 return 0; 1876 break; 1877 1878 case 'e': 1879 if (rtx_equal_for_memref_p (canon_rtx (XEXP (x, i)), 1880 canon_rtx (XEXP (y, i))) == 0) 1881 return 0; 1882 break; 1883 1884 /* This can happen for asm operands. */ 1885 case 's': 1886 if (strcmp (XSTR (x, i), XSTR (y, i))) 1887 return 0; 1888 break; 1889 1890 /* This can happen for an asm which clobbers memory. */ 1891 case '0': 1892 break; 1893 1894 /* It is believed that rtx's at this level will never 1895 contain anything but integers and other rtx's, 1896 except for within LABEL_REFs and SYMBOL_REFs. */ 1897 default: 1898 gcc_unreachable (); 1899 } 1900 } 1901 return 1; 1902 } 1903 1904 static rtx 1905 find_base_term (rtx x, vec<std::pair<cselib_val *, 1906 struct elt_loc_list *> > &visited_vals) 1907 { 1908 cselib_val *val; 1909 struct elt_loc_list *l, *f; 1910 rtx ret; 1911 scalar_int_mode int_mode; 1912 1913 #if defined (FIND_BASE_TERM) 1914 /* Try machine-dependent ways to find the base term. */ 1915 x = FIND_BASE_TERM (x); 1916 #endif 1917 1918 switch (GET_CODE (x)) 1919 { 1920 case REG: 1921 return REG_BASE_VALUE (x); 1922 1923 case TRUNCATE: 1924 /* As we do not know which address space the pointer is referring to, we can 1925 handle this only if the target does not support different pointer or 1926 address modes depending on the address space. */ 1927 if (!target_default_pointer_address_modes_p ()) 1928 return 0; 1929 if (!is_a <scalar_int_mode> (GET_MODE (x), &int_mode) 1930 || GET_MODE_PRECISION (int_mode) < GET_MODE_PRECISION (Pmode)) 1931 return 0; 1932 /* Fall through. */ 1933 case HIGH: 1934 case PRE_INC: 1935 case PRE_DEC: 1936 case POST_INC: 1937 case POST_DEC: 1938 case PRE_MODIFY: 1939 case POST_MODIFY: 1940 return find_base_term (XEXP (x, 0), visited_vals); 1941 1942 case ZERO_EXTEND: 1943 case SIGN_EXTEND: /* Used for Alpha/NT pointers */ 1944 /* As we do not know which address space the pointer is referring to, we can 1945 handle this only if the target does not support different pointer or 1946 address modes depending on the address space. */ 1947 if (!target_default_pointer_address_modes_p ()) 1948 return 0; 1949 1950 { 1951 rtx temp = find_base_term (XEXP (x, 0), visited_vals); 1952 1953 if (temp != 0 && CONSTANT_P (temp)) 1954 temp = convert_memory_address (Pmode, temp); 1955 1956 return temp; 1957 } 1958 1959 case VALUE: 1960 val = CSELIB_VAL_PTR (x); 1961 ret = NULL_RTX; 1962 1963 if (!val) 1964 return ret; 1965 1966 if (cselib_sp_based_value_p (val)) 1967 return static_reg_base_value[STACK_POINTER_REGNUM]; 1968 1969 f = val->locs; 1970 /* Reset val->locs to avoid infinite recursion. */ 1971 if (f) 1972 visited_vals.safe_push (std::make_pair (val, f)); 1973 val->locs = NULL; 1974 1975 for (l = f; l; l = l->next) 1976 if (GET_CODE (l->loc) == VALUE 1977 && CSELIB_VAL_PTR (l->loc)->locs 1978 && !CSELIB_VAL_PTR (l->loc)->locs->next 1979 && CSELIB_VAL_PTR (l->loc)->locs->loc == x) 1980 continue; 1981 else if ((ret = find_base_term (l->loc, visited_vals)) != 0) 1982 break; 1983 1984 return ret; 1985 1986 case LO_SUM: 1987 /* The standard form is (lo_sum reg sym) so look only at the 1988 second operand. */ 1989 return find_base_term (XEXP (x, 1), visited_vals); 1990 1991 case CONST: 1992 x = XEXP (x, 0); 1993 if (GET_CODE (x) != PLUS && GET_CODE (x) != MINUS) 1994 return 0; 1995 /* Fall through. */ 1996 case PLUS: 1997 case MINUS: 1998 { 1999 rtx tmp1 = XEXP (x, 0); 2000 rtx tmp2 = XEXP (x, 1); 2001 2002 /* This is a little bit tricky since we have to determine which of 2003 the two operands represents the real base address. Otherwise this 2004 routine may return the index register instead of the base register. 2005 2006 That may cause us to believe no aliasing was possible, when in 2007 fact aliasing is possible. 2008 2009 We use a few simple tests to guess the base register. Additional 2010 tests can certainly be added. For example, if one of the operands 2011 is a shift or multiply, then it must be the index register and the 2012 other operand is the base register. */ 2013 2014 if (tmp1 == pic_offset_table_rtx && CONSTANT_P (tmp2)) 2015 return find_base_term (tmp2, visited_vals); 2016 2017 /* If either operand is known to be a pointer, then prefer it 2018 to determine the base term. */ 2019 if (REG_P (tmp1) && REG_POINTER (tmp1)) 2020 ; 2021 else if (REG_P (tmp2) && REG_POINTER (tmp2)) 2022 std::swap (tmp1, tmp2); 2023 /* If second argument is constant which has base term, prefer it 2024 over variable tmp1. See PR64025. */ 2025 else if (CONSTANT_P (tmp2) && !CONST_INT_P (tmp2)) 2026 std::swap (tmp1, tmp2); 2027 2028 /* Go ahead and find the base term for both operands. If either base 2029 term is from a pointer or is a named object or a special address 2030 (like an argument or stack reference), then use it for the 2031 base term. */ 2032 rtx base = find_base_term (tmp1, visited_vals); 2033 if (base != NULL_RTX 2034 && ((REG_P (tmp1) && REG_POINTER (tmp1)) 2035 || known_base_value_p (base))) 2036 return base; 2037 base = find_base_term (tmp2, visited_vals); 2038 if (base != NULL_RTX 2039 && ((REG_P (tmp2) && REG_POINTER (tmp2)) 2040 || known_base_value_p (base))) 2041 return base; 2042 2043 /* We could not determine which of the two operands was the 2044 base register and which was the index. So we can determine 2045 nothing from the base alias check. */ 2046 return 0; 2047 } 2048 2049 case AND: 2050 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) != 0) 2051 return find_base_term (XEXP (x, 0), visited_vals); 2052 return 0; 2053 2054 case SYMBOL_REF: 2055 case LABEL_REF: 2056 return x; 2057 2058 default: 2059 return 0; 2060 } 2061 } 2062 2063 /* Wrapper around the worker above which removes locs from visited VALUEs 2064 to avoid visiting them multiple times. We unwind that changes here. */ 2065 2066 static rtx 2067 find_base_term (rtx x) 2068 { 2069 auto_vec<std::pair<cselib_val *, struct elt_loc_list *>, 32> visited_vals; 2070 rtx res = find_base_term (x, visited_vals); 2071 for (unsigned i = 0; i < visited_vals.length (); ++i) 2072 visited_vals[i].first->locs = visited_vals[i].second; 2073 return res; 2074 } 2075 2076 /* Return true if accesses to address X may alias accesses based 2077 on the stack pointer. */ 2078 2079 bool 2080 may_be_sp_based_p (rtx x) 2081 { 2082 rtx base = find_base_term (x); 2083 return !base || base == static_reg_base_value[STACK_POINTER_REGNUM]; 2084 } 2085 2086 /* BASE1 and BASE2 are decls. Return 1 if they refer to same object, 0 2087 if they refer to different objects and -1 if we can not decide. */ 2088 2089 int 2090 compare_base_decls (tree base1, tree base2) 2091 { 2092 int ret; 2093 gcc_checking_assert (DECL_P (base1) && DECL_P (base2)); 2094 if (base1 == base2) 2095 return 1; 2096 2097 /* If we have two register decls with register specification we 2098 cannot decide unless their assembler names are the same. */ 2099 if (DECL_REGISTER (base1) 2100 && DECL_REGISTER (base2) 2101 && HAS_DECL_ASSEMBLER_NAME_P (base1) 2102 && HAS_DECL_ASSEMBLER_NAME_P (base2) 2103 && DECL_ASSEMBLER_NAME_SET_P (base1) 2104 && DECL_ASSEMBLER_NAME_SET_P (base2)) 2105 { 2106 if (DECL_ASSEMBLER_NAME_RAW (base1) == DECL_ASSEMBLER_NAME_RAW (base2)) 2107 return 1; 2108 return -1; 2109 } 2110 2111 /* Declarations of non-automatic variables may have aliases. All other 2112 decls are unique. */ 2113 if (!decl_in_symtab_p (base1) 2114 || !decl_in_symtab_p (base2)) 2115 return 0; 2116 2117 /* Don't cause symbols to be inserted by the act of checking. */ 2118 symtab_node *node1 = symtab_node::get (base1); 2119 if (!node1) 2120 return 0; 2121 symtab_node *node2 = symtab_node::get (base2); 2122 if (!node2) 2123 return 0; 2124 2125 ret = node1->equal_address_to (node2, true); 2126 return ret; 2127 } 2128 2129 /* Same as compare_base_decls but for SYMBOL_REF. */ 2130 2131 static int 2132 compare_base_symbol_refs (const_rtx x_base, const_rtx y_base) 2133 { 2134 tree x_decl = SYMBOL_REF_DECL (x_base); 2135 tree y_decl = SYMBOL_REF_DECL (y_base); 2136 bool binds_def = true; 2137 2138 if (XSTR (x_base, 0) == XSTR (y_base, 0)) 2139 return 1; 2140 if (x_decl && y_decl) 2141 return compare_base_decls (x_decl, y_decl); 2142 if (x_decl || y_decl) 2143 { 2144 if (!x_decl) 2145 { 2146 std::swap (x_decl, y_decl); 2147 std::swap (x_base, y_base); 2148 } 2149 /* We handle specially only section anchors and assume that other 2150 labels may overlap with user variables in an arbitrary way. */ 2151 if (!SYMBOL_REF_HAS_BLOCK_INFO_P (y_base)) 2152 return -1; 2153 /* Anchors contains static VAR_DECLs and CONST_DECLs. We are safe 2154 to ignore CONST_DECLs because they are readonly. */ 2155 if (!VAR_P (x_decl) 2156 || (!TREE_STATIC (x_decl) && !TREE_PUBLIC (x_decl))) 2157 return 0; 2158 2159 symtab_node *x_node = symtab_node::get_create (x_decl) 2160 ->ultimate_alias_target (); 2161 /* External variable can not be in section anchor. */ 2162 if (!x_node->definition) 2163 return 0; 2164 x_base = XEXP (DECL_RTL (x_node->decl), 0); 2165 /* If not in anchor, we can disambiguate. */ 2166 if (!SYMBOL_REF_HAS_BLOCK_INFO_P (x_base)) 2167 return 0; 2168 2169 /* We have an alias of anchored variable. If it can be interposed; 2170 we must assume it may or may not alias its anchor. */ 2171 binds_def = decl_binds_to_current_def_p (x_decl); 2172 } 2173 /* If we have variable in section anchor, we can compare by offset. */ 2174 if (SYMBOL_REF_HAS_BLOCK_INFO_P (x_base) 2175 && SYMBOL_REF_HAS_BLOCK_INFO_P (y_base)) 2176 { 2177 if (SYMBOL_REF_BLOCK (x_base) != SYMBOL_REF_BLOCK (y_base)) 2178 return 0; 2179 if (SYMBOL_REF_BLOCK_OFFSET (x_base) == SYMBOL_REF_BLOCK_OFFSET (y_base)) 2180 return binds_def ? 1 : -1; 2181 if (SYMBOL_REF_ANCHOR_P (x_base) != SYMBOL_REF_ANCHOR_P (y_base)) 2182 return -1; 2183 return 0; 2184 } 2185 /* In general we assume that memory locations pointed to by different labels 2186 may overlap in undefined ways. */ 2187 return -1; 2188 } 2189 2190 /* Return 0 if the addresses X and Y are known to point to different 2191 objects, 1 if they might be pointers to the same object. */ 2192 2193 static int 2194 base_alias_check (rtx x, rtx x_base, rtx y, rtx y_base, 2195 machine_mode x_mode, machine_mode y_mode) 2196 { 2197 /* If the address itself has no known base see if a known equivalent 2198 value has one. If either address still has no known base, nothing 2199 is known about aliasing. */ 2200 if (x_base == 0) 2201 { 2202 rtx x_c; 2203 2204 if (! flag_expensive_optimizations || (x_c = canon_rtx (x)) == x) 2205 return 1; 2206 2207 x_base = find_base_term (x_c); 2208 if (x_base == 0) 2209 return 1; 2210 } 2211 2212 if (y_base == 0) 2213 { 2214 rtx y_c; 2215 if (! flag_expensive_optimizations || (y_c = canon_rtx (y)) == y) 2216 return 1; 2217 2218 y_base = find_base_term (y_c); 2219 if (y_base == 0) 2220 return 1; 2221 } 2222 2223 /* If the base addresses are equal nothing is known about aliasing. */ 2224 if (rtx_equal_p (x_base, y_base)) 2225 return 1; 2226 2227 /* The base addresses are different expressions. If they are not accessed 2228 via AND, there is no conflict. We can bring knowledge of object 2229 alignment into play here. For example, on alpha, "char a, b;" can 2230 alias one another, though "char a; long b;" cannot. AND addresses may 2231 implicitly alias surrounding objects; i.e. unaligned access in DImode 2232 via AND address can alias all surrounding object types except those 2233 with aligment 8 or higher. */ 2234 if (GET_CODE (x) == AND && GET_CODE (y) == AND) 2235 return 1; 2236 if (GET_CODE (x) == AND 2237 && (!CONST_INT_P (XEXP (x, 1)) 2238 || (int) GET_MODE_UNIT_SIZE (y_mode) < -INTVAL (XEXP (x, 1)))) 2239 return 1; 2240 if (GET_CODE (y) == AND 2241 && (!CONST_INT_P (XEXP (y, 1)) 2242 || (int) GET_MODE_UNIT_SIZE (x_mode) < -INTVAL (XEXP (y, 1)))) 2243 return 1; 2244 2245 /* Differing symbols not accessed via AND never alias. */ 2246 if (GET_CODE (x_base) == SYMBOL_REF && GET_CODE (y_base) == SYMBOL_REF) 2247 return compare_base_symbol_refs (x_base, y_base) != 0; 2248 2249 if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS) 2250 return 0; 2251 2252 if (unique_base_value_p (x_base) || unique_base_value_p (y_base)) 2253 return 0; 2254 2255 return 1; 2256 } 2257 2258 /* Return TRUE if EXPR refers to a VALUE whose uid is greater than 2259 (or equal to) that of V. */ 2260 2261 static bool 2262 refs_newer_value_p (const_rtx expr, rtx v) 2263 { 2264 int minuid = CSELIB_VAL_PTR (v)->uid; 2265 subrtx_iterator::array_type array; 2266 FOR_EACH_SUBRTX (iter, array, expr, NONCONST) 2267 if (GET_CODE (*iter) == VALUE && CSELIB_VAL_PTR (*iter)->uid >= minuid) 2268 return true; 2269 return false; 2270 } 2271 2272 /* Convert the address X into something we can use. This is done by returning 2273 it unchanged unless it is a VALUE or VALUE +/- constant; for VALUE 2274 we call cselib to get a more useful rtx. */ 2275 2276 rtx 2277 get_addr (rtx x) 2278 { 2279 cselib_val *v; 2280 struct elt_loc_list *l; 2281 2282 if (GET_CODE (x) != VALUE) 2283 { 2284 if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS) 2285 && GET_CODE (XEXP (x, 0)) == VALUE 2286 && CONST_SCALAR_INT_P (XEXP (x, 1))) 2287 { 2288 rtx op0 = get_addr (XEXP (x, 0)); 2289 if (op0 != XEXP (x, 0)) 2290 { 2291 if (GET_CODE (x) == PLUS 2292 && GET_CODE (XEXP (x, 1)) == CONST_INT) 2293 return plus_constant (GET_MODE (x), op0, INTVAL (XEXP (x, 1))); 2294 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), 2295 op0, XEXP (x, 1)); 2296 } 2297 } 2298 return x; 2299 } 2300 v = CSELIB_VAL_PTR (x); 2301 if (v) 2302 { 2303 bool have_equivs = cselib_have_permanent_equivalences (); 2304 if (have_equivs) 2305 v = canonical_cselib_val (v); 2306 for (l = v->locs; l; l = l->next) 2307 if (CONSTANT_P (l->loc)) 2308 return l->loc; 2309 for (l = v->locs; l; l = l->next) 2310 if (!REG_P (l->loc) && !MEM_P (l->loc) 2311 /* Avoid infinite recursion when potentially dealing with 2312 var-tracking artificial equivalences, by skipping the 2313 equivalences themselves, and not choosing expressions 2314 that refer to newer VALUEs. */ 2315 && (!have_equivs 2316 || (GET_CODE (l->loc) != VALUE 2317 && !refs_newer_value_p (l->loc, x)))) 2318 return l->loc; 2319 if (have_equivs) 2320 { 2321 for (l = v->locs; l; l = l->next) 2322 if (REG_P (l->loc) 2323 || (GET_CODE (l->loc) != VALUE 2324 && !refs_newer_value_p (l->loc, x))) 2325 return l->loc; 2326 /* Return the canonical value. */ 2327 return v->val_rtx; 2328 } 2329 if (v->locs) 2330 return v->locs->loc; 2331 } 2332 return x; 2333 } 2334 2335 /* Return the address of the (N_REFS + 1)th memory reference to ADDR 2336 where SIZE is the size in bytes of the memory reference. If ADDR 2337 is not modified by the memory reference then ADDR is returned. */ 2338 2339 static rtx 2340 addr_side_effect_eval (rtx addr, poly_int64 size, int n_refs) 2341 { 2342 poly_int64 offset = 0; 2343 2344 switch (GET_CODE (addr)) 2345 { 2346 case PRE_INC: 2347 offset = (n_refs + 1) * size; 2348 break; 2349 case PRE_DEC: 2350 offset = -(n_refs + 1) * size; 2351 break; 2352 case POST_INC: 2353 offset = n_refs * size; 2354 break; 2355 case POST_DEC: 2356 offset = -n_refs * size; 2357 break; 2358 2359 default: 2360 return addr; 2361 } 2362 2363 addr = plus_constant (GET_MODE (addr), XEXP (addr, 0), offset); 2364 addr = canon_rtx (addr); 2365 2366 return addr; 2367 } 2368 2369 /* Return TRUE if an object X sized at XSIZE bytes and another object 2370 Y sized at YSIZE bytes, starting C bytes after X, may overlap. If 2371 any of the sizes is zero, assume an overlap, otherwise use the 2372 absolute value of the sizes as the actual sizes. */ 2373 2374 static inline bool 2375 offset_overlap_p (poly_int64 c, poly_int64 xsize, poly_int64 ysize) 2376 { 2377 if (known_eq (xsize, 0) || known_eq (ysize, 0)) 2378 return true; 2379 2380 if (maybe_ge (c, 0)) 2381 return maybe_gt (maybe_lt (xsize, 0) ? -xsize : xsize, c); 2382 else 2383 return maybe_gt (maybe_lt (ysize, 0) ? -ysize : ysize, -c); 2384 } 2385 2386 /* Return one if X and Y (memory addresses) reference the 2387 same location in memory or if the references overlap. 2388 Return zero if they do not overlap, else return 2389 minus one in which case they still might reference the same location. 2390 2391 C is an offset accumulator. When 2392 C is nonzero, we are testing aliases between X and Y + C. 2393 XSIZE is the size in bytes of the X reference, 2394 similarly YSIZE is the size in bytes for Y. 2395 Expect that canon_rtx has been already called for X and Y. 2396 2397 If XSIZE or YSIZE is zero, we do not know the amount of memory being 2398 referenced (the reference was BLKmode), so make the most pessimistic 2399 assumptions. 2400 2401 If XSIZE or YSIZE is negative, we may access memory outside the object 2402 being referenced as a side effect. This can happen when using AND to 2403 align memory references, as is done on the Alpha. 2404 2405 Nice to notice that varying addresses cannot conflict with fp if no 2406 local variables had their addresses taken, but that's too hard now. 2407 2408 ??? Contrary to the tree alias oracle this does not return 2409 one for X + non-constant and Y + non-constant when X and Y are equal. 2410 If that is fixed the TBAA hack for union type-punning can be removed. */ 2411 2412 static int 2413 memrefs_conflict_p (poly_int64 xsize, rtx x, poly_int64 ysize, rtx y, 2414 poly_int64 c) 2415 { 2416 if (GET_CODE (x) == VALUE) 2417 { 2418 if (REG_P (y)) 2419 { 2420 struct elt_loc_list *l = NULL; 2421 if (CSELIB_VAL_PTR (x)) 2422 for (l = canonical_cselib_val (CSELIB_VAL_PTR (x))->locs; 2423 l; l = l->next) 2424 if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, y)) 2425 break; 2426 if (l) 2427 x = y; 2428 else 2429 x = get_addr (x); 2430 } 2431 /* Don't call get_addr if y is the same VALUE. */ 2432 else if (x != y) 2433 x = get_addr (x); 2434 } 2435 if (GET_CODE (y) == VALUE) 2436 { 2437 if (REG_P (x)) 2438 { 2439 struct elt_loc_list *l = NULL; 2440 if (CSELIB_VAL_PTR (y)) 2441 for (l = canonical_cselib_val (CSELIB_VAL_PTR (y))->locs; 2442 l; l = l->next) 2443 if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, x)) 2444 break; 2445 if (l) 2446 y = x; 2447 else 2448 y = get_addr (y); 2449 } 2450 /* Don't call get_addr if x is the same VALUE. */ 2451 else if (y != x) 2452 y = get_addr (y); 2453 } 2454 if (GET_CODE (x) == HIGH) 2455 x = XEXP (x, 0); 2456 else if (GET_CODE (x) == LO_SUM) 2457 x = XEXP (x, 1); 2458 else 2459 x = addr_side_effect_eval (x, maybe_lt (xsize, 0) ? -xsize : xsize, 0); 2460 if (GET_CODE (y) == HIGH) 2461 y = XEXP (y, 0); 2462 else if (GET_CODE (y) == LO_SUM) 2463 y = XEXP (y, 1); 2464 else 2465 y = addr_side_effect_eval (y, maybe_lt (ysize, 0) ? -ysize : ysize, 0); 2466 2467 if (GET_CODE (x) == SYMBOL_REF && GET_CODE (y) == SYMBOL_REF) 2468 { 2469 int cmp = compare_base_symbol_refs (x,y); 2470 2471 /* If both decls are the same, decide by offsets. */ 2472 if (cmp == 1) 2473 return offset_overlap_p (c, xsize, ysize); 2474 /* Assume a potential overlap for symbolic addresses that went 2475 through alignment adjustments (i.e., that have negative 2476 sizes), because we can't know how far they are from each 2477 other. */ 2478 if (maybe_lt (xsize, 0) || maybe_lt (ysize, 0)) 2479 return -1; 2480 /* If decls are different or we know by offsets that there is no overlap, 2481 we win. */ 2482 if (!cmp || !offset_overlap_p (c, xsize, ysize)) 2483 return 0; 2484 /* Decls may or may not be different and offsets overlap....*/ 2485 return -1; 2486 } 2487 else if (rtx_equal_for_memref_p (x, y)) 2488 { 2489 return offset_overlap_p (c, xsize, ysize); 2490 } 2491 2492 /* This code used to check for conflicts involving stack references and 2493 globals but the base address alias code now handles these cases. */ 2494 2495 if (GET_CODE (x) == PLUS) 2496 { 2497 /* The fact that X is canonicalized means that this 2498 PLUS rtx is canonicalized. */ 2499 rtx x0 = XEXP (x, 0); 2500 rtx x1 = XEXP (x, 1); 2501 2502 /* However, VALUEs might end up in different positions even in 2503 canonical PLUSes. Comparing their addresses is enough. */ 2504 if (x0 == y) 2505 return memrefs_conflict_p (xsize, x1, ysize, const0_rtx, c); 2506 else if (x1 == y) 2507 return memrefs_conflict_p (xsize, x0, ysize, const0_rtx, c); 2508 2509 poly_int64 cx1, cy1; 2510 if (GET_CODE (y) == PLUS) 2511 { 2512 /* The fact that Y is canonicalized means that this 2513 PLUS rtx is canonicalized. */ 2514 rtx y0 = XEXP (y, 0); 2515 rtx y1 = XEXP (y, 1); 2516 2517 if (x0 == y1) 2518 return memrefs_conflict_p (xsize, x1, ysize, y0, c); 2519 if (x1 == y0) 2520 return memrefs_conflict_p (xsize, x0, ysize, y1, c); 2521 2522 if (rtx_equal_for_memref_p (x1, y1)) 2523 return memrefs_conflict_p (xsize, x0, ysize, y0, c); 2524 if (rtx_equal_for_memref_p (x0, y0)) 2525 return memrefs_conflict_p (xsize, x1, ysize, y1, c); 2526 if (poly_int_rtx_p (x1, &cx1)) 2527 { 2528 if (poly_int_rtx_p (y1, &cy1)) 2529 return memrefs_conflict_p (xsize, x0, ysize, y0, 2530 c - cx1 + cy1); 2531 else 2532 return memrefs_conflict_p (xsize, x0, ysize, y, c - cx1); 2533 } 2534 else if (poly_int_rtx_p (y1, &cy1)) 2535 return memrefs_conflict_p (xsize, x, ysize, y0, c + cy1); 2536 2537 return -1; 2538 } 2539 else if (poly_int_rtx_p (x1, &cx1)) 2540 return memrefs_conflict_p (xsize, x0, ysize, y, c - cx1); 2541 } 2542 else if (GET_CODE (y) == PLUS) 2543 { 2544 /* The fact that Y is canonicalized means that this 2545 PLUS rtx is canonicalized. */ 2546 rtx y0 = XEXP (y, 0); 2547 rtx y1 = XEXP (y, 1); 2548 2549 if (x == y0) 2550 return memrefs_conflict_p (xsize, const0_rtx, ysize, y1, c); 2551 if (x == y1) 2552 return memrefs_conflict_p (xsize, const0_rtx, ysize, y0, c); 2553 2554 poly_int64 cy1; 2555 if (poly_int_rtx_p (y1, &cy1)) 2556 return memrefs_conflict_p (xsize, x, ysize, y0, c + cy1); 2557 else 2558 return -1; 2559 } 2560 2561 if (GET_CODE (x) == GET_CODE (y)) 2562 switch (GET_CODE (x)) 2563 { 2564 case MULT: 2565 { 2566 /* Handle cases where we expect the second operands to be the 2567 same, and check only whether the first operand would conflict 2568 or not. */ 2569 rtx x0, y0; 2570 rtx x1 = canon_rtx (XEXP (x, 1)); 2571 rtx y1 = canon_rtx (XEXP (y, 1)); 2572 if (! rtx_equal_for_memref_p (x1, y1)) 2573 return -1; 2574 x0 = canon_rtx (XEXP (x, 0)); 2575 y0 = canon_rtx (XEXP (y, 0)); 2576 if (rtx_equal_for_memref_p (x0, y0)) 2577 return offset_overlap_p (c, xsize, ysize); 2578 2579 /* Can't properly adjust our sizes. */ 2580 if (!CONST_INT_P (x1) 2581 || !can_div_trunc_p (xsize, INTVAL (x1), &xsize) 2582 || !can_div_trunc_p (ysize, INTVAL (x1), &ysize) 2583 || !can_div_trunc_p (c, INTVAL (x1), &c)) 2584 return -1; 2585 return memrefs_conflict_p (xsize, x0, ysize, y0, c); 2586 } 2587 2588 default: 2589 break; 2590 } 2591 2592 /* Deal with alignment ANDs by adjusting offset and size so as to 2593 cover the maximum range, without taking any previously known 2594 alignment into account. Make a size negative after such an 2595 adjustments, so that, if we end up with e.g. two SYMBOL_REFs, we 2596 assume a potential overlap, because they may end up in contiguous 2597 memory locations and the stricter-alignment access may span over 2598 part of both. */ 2599 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))) 2600 { 2601 HOST_WIDE_INT sc = INTVAL (XEXP (x, 1)); 2602 unsigned HOST_WIDE_INT uc = sc; 2603 if (sc < 0 && pow2_or_zerop (-uc)) 2604 { 2605 if (maybe_gt (xsize, 0)) 2606 xsize = -xsize; 2607 if (maybe_ne (xsize, 0)) 2608 xsize += sc + 1; 2609 c -= sc + 1; 2610 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)), 2611 ysize, y, c); 2612 } 2613 } 2614 if (GET_CODE (y) == AND && CONST_INT_P (XEXP (y, 1))) 2615 { 2616 HOST_WIDE_INT sc = INTVAL (XEXP (y, 1)); 2617 unsigned HOST_WIDE_INT uc = sc; 2618 if (sc < 0 && pow2_or_zerop (-uc)) 2619 { 2620 if (maybe_gt (ysize, 0)) 2621 ysize = -ysize; 2622 if (maybe_ne (ysize, 0)) 2623 ysize += sc + 1; 2624 c += sc + 1; 2625 return memrefs_conflict_p (xsize, x, 2626 ysize, canon_rtx (XEXP (y, 0)), c); 2627 } 2628 } 2629 2630 if (CONSTANT_P (x)) 2631 { 2632 poly_int64 cx, cy; 2633 if (poly_int_rtx_p (x, &cx) && poly_int_rtx_p (y, &cy)) 2634 { 2635 c += cy - cx; 2636 return offset_overlap_p (c, xsize, ysize); 2637 } 2638 2639 if (GET_CODE (x) == CONST) 2640 { 2641 if (GET_CODE (y) == CONST) 2642 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)), 2643 ysize, canon_rtx (XEXP (y, 0)), c); 2644 else 2645 return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)), 2646 ysize, y, c); 2647 } 2648 if (GET_CODE (y) == CONST) 2649 return memrefs_conflict_p (xsize, x, ysize, 2650 canon_rtx (XEXP (y, 0)), c); 2651 2652 /* Assume a potential overlap for symbolic addresses that went 2653 through alignment adjustments (i.e., that have negative 2654 sizes), because we can't know how far they are from each 2655 other. */ 2656 if (CONSTANT_P (y)) 2657 return (maybe_lt (xsize, 0) 2658 || maybe_lt (ysize, 0) 2659 || offset_overlap_p (c, xsize, ysize)); 2660 2661 return -1; 2662 } 2663 2664 return -1; 2665 } 2666 2667 /* Functions to compute memory dependencies. 2668 2669 Since we process the insns in execution order, we can build tables 2670 to keep track of what registers are fixed (and not aliased), what registers 2671 are varying in known ways, and what registers are varying in unknown 2672 ways. 2673 2674 If both memory references are volatile, then there must always be a 2675 dependence between the two references, since their order can not be 2676 changed. A volatile and non-volatile reference can be interchanged 2677 though. 2678 2679 We also must allow AND addresses, because they may generate accesses 2680 outside the object being referenced. This is used to generate aligned 2681 addresses from unaligned addresses, for instance, the alpha 2682 storeqi_unaligned pattern. */ 2683 2684 /* Read dependence: X is read after read in MEM takes place. There can 2685 only be a dependence here if both reads are volatile, or if either is 2686 an explicit barrier. */ 2687 2688 int 2689 read_dependence (const_rtx mem, const_rtx x) 2690 { 2691 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem)) 2692 return true; 2693 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER 2694 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER) 2695 return true; 2696 return false; 2697 } 2698 2699 /* Look at the bottom of the COMPONENT_REF list for a DECL, and return it. */ 2700 2701 static tree 2702 decl_for_component_ref (tree x) 2703 { 2704 do 2705 { 2706 x = TREE_OPERAND (x, 0); 2707 } 2708 while (x && TREE_CODE (x) == COMPONENT_REF); 2709 2710 return x && DECL_P (x) ? x : NULL_TREE; 2711 } 2712 2713 /* Walk up the COMPONENT_REF list in X and adjust *OFFSET to compensate 2714 for the offset of the field reference. *KNOWN_P says whether the 2715 offset is known. */ 2716 2717 static void 2718 adjust_offset_for_component_ref (tree x, bool *known_p, 2719 poly_int64 *offset) 2720 { 2721 if (!*known_p) 2722 return; 2723 do 2724 { 2725 tree xoffset = component_ref_field_offset (x); 2726 tree field = TREE_OPERAND (x, 1); 2727 if (TREE_CODE (xoffset) != INTEGER_CST) 2728 { 2729 *known_p = false; 2730 return; 2731 } 2732 2733 offset_int woffset 2734 = (wi::to_offset (xoffset) 2735 + (wi::to_offset (DECL_FIELD_BIT_OFFSET (field)) 2736 >> LOG2_BITS_PER_UNIT)); 2737 if (!wi::fits_uhwi_p (woffset)) 2738 { 2739 *known_p = false; 2740 return; 2741 } 2742 *offset += woffset.to_uhwi (); 2743 2744 x = TREE_OPERAND (x, 0); 2745 } 2746 while (x && TREE_CODE (x) == COMPONENT_REF); 2747 } 2748 2749 /* Return nonzero if we can determine the exprs corresponding to memrefs 2750 X and Y and they do not overlap. 2751 If LOOP_VARIANT is set, skip offset-based disambiguation */ 2752 2753 int 2754 nonoverlapping_memrefs_p (const_rtx x, const_rtx y, bool loop_invariant) 2755 { 2756 tree exprx = MEM_EXPR (x), expry = MEM_EXPR (y); 2757 rtx rtlx, rtly; 2758 rtx basex, basey; 2759 bool moffsetx_known_p, moffsety_known_p; 2760 poly_int64 moffsetx = 0, moffsety = 0; 2761 poly_int64 offsetx = 0, offsety = 0, sizex, sizey; 2762 2763 /* Unless both have exprs, we can't tell anything. */ 2764 if (exprx == 0 || expry == 0) 2765 return 0; 2766 2767 /* For spill-slot accesses make sure we have valid offsets. */ 2768 if ((exprx == get_spill_slot_decl (false) 2769 && ! MEM_OFFSET_KNOWN_P (x)) 2770 || (expry == get_spill_slot_decl (false) 2771 && ! MEM_OFFSET_KNOWN_P (y))) 2772 return 0; 2773 2774 /* If the field reference test failed, look at the DECLs involved. */ 2775 moffsetx_known_p = MEM_OFFSET_KNOWN_P (x); 2776 if (moffsetx_known_p) 2777 moffsetx = MEM_OFFSET (x); 2778 if (TREE_CODE (exprx) == COMPONENT_REF) 2779 { 2780 tree t = decl_for_component_ref (exprx); 2781 if (! t) 2782 return 0; 2783 adjust_offset_for_component_ref (exprx, &moffsetx_known_p, &moffsetx); 2784 exprx = t; 2785 } 2786 2787 moffsety_known_p = MEM_OFFSET_KNOWN_P (y); 2788 if (moffsety_known_p) 2789 moffsety = MEM_OFFSET (y); 2790 if (TREE_CODE (expry) == COMPONENT_REF) 2791 { 2792 tree t = decl_for_component_ref (expry); 2793 if (! t) 2794 return 0; 2795 adjust_offset_for_component_ref (expry, &moffsety_known_p, &moffsety); 2796 expry = t; 2797 } 2798 2799 if (! DECL_P (exprx) || ! DECL_P (expry)) 2800 return 0; 2801 2802 /* If we refer to different gimple registers, or one gimple register 2803 and one non-gimple-register, we know they can't overlap. First, 2804 gimple registers don't have their addresses taken. Now, there 2805 could be more than one stack slot for (different versions of) the 2806 same gimple register, but we can presumably tell they don't 2807 overlap based on offsets from stack base addresses elsewhere. 2808 It's important that we don't proceed to DECL_RTL, because gimple 2809 registers may not pass DECL_RTL_SET_P, and make_decl_rtl won't be 2810 able to do anything about them since no SSA information will have 2811 remained to guide it. */ 2812 if (is_gimple_reg (exprx) || is_gimple_reg (expry)) 2813 return exprx != expry 2814 || (moffsetx_known_p && moffsety_known_p 2815 && MEM_SIZE_KNOWN_P (x) && MEM_SIZE_KNOWN_P (y) 2816 && !offset_overlap_p (moffsety - moffsetx, 2817 MEM_SIZE (x), MEM_SIZE (y))); 2818 2819 /* With invalid code we can end up storing into the constant pool. 2820 Bail out to avoid ICEing when creating RTL for this. 2821 See gfortran.dg/lto/20091028-2_0.f90. */ 2822 if (TREE_CODE (exprx) == CONST_DECL 2823 || TREE_CODE (expry) == CONST_DECL) 2824 return 1; 2825 2826 /* If one decl is known to be a function or label in a function and 2827 the other is some kind of data, they can't overlap. */ 2828 if ((TREE_CODE (exprx) == FUNCTION_DECL 2829 || TREE_CODE (exprx) == LABEL_DECL) 2830 != (TREE_CODE (expry) == FUNCTION_DECL 2831 || TREE_CODE (expry) == LABEL_DECL)) 2832 return 1; 2833 2834 /* If either of the decls doesn't have DECL_RTL set (e.g. marked as 2835 living in multiple places), we can't tell anything. Exception 2836 are FUNCTION_DECLs for which we can create DECL_RTL on demand. */ 2837 if ((!DECL_RTL_SET_P (exprx) && TREE_CODE (exprx) != FUNCTION_DECL) 2838 || (!DECL_RTL_SET_P (expry) && TREE_CODE (expry) != FUNCTION_DECL)) 2839 return 0; 2840 2841 rtlx = DECL_RTL (exprx); 2842 rtly = DECL_RTL (expry); 2843 2844 /* If either RTL is not a MEM, it must be a REG or CONCAT, meaning they 2845 can't overlap unless they are the same because we never reuse that part 2846 of the stack frame used for locals for spilled pseudos. */ 2847 if ((!MEM_P (rtlx) || !MEM_P (rtly)) 2848 && ! rtx_equal_p (rtlx, rtly)) 2849 return 1; 2850 2851 /* If we have MEMs referring to different address spaces (which can 2852 potentially overlap), we cannot easily tell from the addresses 2853 whether the references overlap. */ 2854 if (MEM_P (rtlx) && MEM_P (rtly) 2855 && MEM_ADDR_SPACE (rtlx) != MEM_ADDR_SPACE (rtly)) 2856 return 0; 2857 2858 /* Get the base and offsets of both decls. If either is a register, we 2859 know both are and are the same, so use that as the base. The only 2860 we can avoid overlap is if we can deduce that they are nonoverlapping 2861 pieces of that decl, which is very rare. */ 2862 basex = MEM_P (rtlx) ? XEXP (rtlx, 0) : rtlx; 2863 basex = strip_offset_and_add (basex, &offsetx); 2864 2865 basey = MEM_P (rtly) ? XEXP (rtly, 0) : rtly; 2866 basey = strip_offset_and_add (basey, &offsety); 2867 2868 /* If the bases are different, we know they do not overlap if both 2869 are constants or if one is a constant and the other a pointer into the 2870 stack frame. Otherwise a different base means we can't tell if they 2871 overlap or not. */ 2872 if (compare_base_decls (exprx, expry) == 0) 2873 return ((CONSTANT_P (basex) && CONSTANT_P (basey)) 2874 || (CONSTANT_P (basex) && REG_P (basey) 2875 && REGNO_PTR_FRAME_P (REGNO (basey))) 2876 || (CONSTANT_P (basey) && REG_P (basex) 2877 && REGNO_PTR_FRAME_P (REGNO (basex)))); 2878 2879 /* Offset based disambiguation not appropriate for loop invariant */ 2880 if (loop_invariant) 2881 return 0; 2882 2883 /* Offset based disambiguation is OK even if we do not know that the 2884 declarations are necessarily different 2885 (i.e. compare_base_decls (exprx, expry) == -1) */ 2886 2887 sizex = (!MEM_P (rtlx) ? poly_int64 (GET_MODE_SIZE (GET_MODE (rtlx))) 2888 : MEM_SIZE_KNOWN_P (rtlx) ? MEM_SIZE (rtlx) 2889 : -1); 2890 sizey = (!MEM_P (rtly) ? poly_int64 (GET_MODE_SIZE (GET_MODE (rtly))) 2891 : MEM_SIZE_KNOWN_P (rtly) ? MEM_SIZE (rtly) 2892 : -1); 2893 2894 /* If we have an offset for either memref, it can update the values computed 2895 above. */ 2896 if (moffsetx_known_p) 2897 offsetx += moffsetx, sizex -= moffsetx; 2898 if (moffsety_known_p) 2899 offsety += moffsety, sizey -= moffsety; 2900 2901 /* If a memref has both a size and an offset, we can use the smaller size. 2902 We can't do this if the offset isn't known because we must view this 2903 memref as being anywhere inside the DECL's MEM. */ 2904 if (MEM_SIZE_KNOWN_P (x) && moffsetx_known_p) 2905 sizex = MEM_SIZE (x); 2906 if (MEM_SIZE_KNOWN_P (y) && moffsety_known_p) 2907 sizey = MEM_SIZE (y); 2908 2909 return !ranges_maybe_overlap_p (offsetx, sizex, offsety, sizey); 2910 } 2911 2912 /* Helper for true_dependence and canon_true_dependence. 2913 Checks for true dependence: X is read after store in MEM takes place. 2914 2915 If MEM_CANONICALIZED is FALSE, then X_ADDR and MEM_ADDR should be 2916 NULL_RTX, and the canonical addresses of MEM and X are both computed 2917 here. If MEM_CANONICALIZED, then MEM must be already canonicalized. 2918 2919 If X_ADDR is non-NULL, it is used in preference of XEXP (x, 0). 2920 2921 Returns 1 if there is a true dependence, 0 otherwise. */ 2922 2923 static int 2924 true_dependence_1 (const_rtx mem, machine_mode mem_mode, rtx mem_addr, 2925 const_rtx x, rtx x_addr, bool mem_canonicalized) 2926 { 2927 rtx true_mem_addr; 2928 rtx base; 2929 int ret; 2930 2931 gcc_checking_assert (mem_canonicalized ? (mem_addr != NULL_RTX) 2932 : (mem_addr == NULL_RTX && x_addr == NULL_RTX)); 2933 2934 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem)) 2935 return 1; 2936 2937 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything. 2938 This is used in epilogue deallocation functions, and in cselib. */ 2939 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH) 2940 return 1; 2941 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH) 2942 return 1; 2943 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER 2944 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER) 2945 return 1; 2946 2947 if (! x_addr) 2948 x_addr = XEXP (x, 0); 2949 x_addr = get_addr (x_addr); 2950 2951 if (! mem_addr) 2952 { 2953 mem_addr = XEXP (mem, 0); 2954 if (mem_mode == VOIDmode) 2955 mem_mode = GET_MODE (mem); 2956 } 2957 true_mem_addr = get_addr (mem_addr); 2958 2959 /* Read-only memory is by definition never modified, and therefore can't 2960 conflict with anything. However, don't assume anything when AND 2961 addresses are involved and leave to the code below to determine 2962 dependence. We don't expect to find read-only set on MEM, but 2963 stupid user tricks can produce them, so don't die. */ 2964 if (MEM_READONLY_P (x) 2965 && GET_CODE (x_addr) != AND 2966 && GET_CODE (true_mem_addr) != AND) 2967 return 0; 2968 2969 /* If we have MEMs referring to different address spaces (which can 2970 potentially overlap), we cannot easily tell from the addresses 2971 whether the references overlap. */ 2972 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x)) 2973 return 1; 2974 2975 base = find_base_term (x_addr); 2976 if (base && (GET_CODE (base) == LABEL_REF 2977 || (GET_CODE (base) == SYMBOL_REF 2978 && CONSTANT_POOL_ADDRESS_P (base)))) 2979 return 0; 2980 2981 rtx mem_base = find_base_term (true_mem_addr); 2982 if (! base_alias_check (x_addr, base, true_mem_addr, mem_base, 2983 GET_MODE (x), mem_mode)) 2984 return 0; 2985 2986 x_addr = canon_rtx (x_addr); 2987 if (!mem_canonicalized) 2988 mem_addr = canon_rtx (true_mem_addr); 2989 2990 if ((ret = memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr, 2991 SIZE_FOR_MODE (x), x_addr, 0)) != -1) 2992 return ret; 2993 2994 if (mems_in_disjoint_alias_sets_p (x, mem)) 2995 return 0; 2996 2997 if (nonoverlapping_memrefs_p (mem, x, false)) 2998 return 0; 2999 3000 return rtx_refs_may_alias_p (x, mem, true); 3001 } 3002 3003 /* True dependence: X is read after store in MEM takes place. */ 3004 3005 int 3006 true_dependence (const_rtx mem, machine_mode mem_mode, const_rtx x) 3007 { 3008 return true_dependence_1 (mem, mem_mode, NULL_RTX, 3009 x, NULL_RTX, /*mem_canonicalized=*/false); 3010 } 3011 3012 /* Canonical true dependence: X is read after store in MEM takes place. 3013 Variant of true_dependence which assumes MEM has already been 3014 canonicalized (hence we no longer do that here). 3015 The mem_addr argument has been added, since true_dependence_1 computed 3016 this value prior to canonicalizing. */ 3017 3018 int 3019 canon_true_dependence (const_rtx mem, machine_mode mem_mode, rtx mem_addr, 3020 const_rtx x, rtx x_addr) 3021 { 3022 return true_dependence_1 (mem, mem_mode, mem_addr, 3023 x, x_addr, /*mem_canonicalized=*/true); 3024 } 3025 3026 /* Returns nonzero if a write to X might alias a previous read from 3027 (or, if WRITEP is true, a write to) MEM. 3028 If X_CANONCALIZED is true, then X_ADDR is the canonicalized address of X, 3029 and X_MODE the mode for that access. 3030 If MEM_CANONICALIZED is true, MEM is canonicalized. */ 3031 3032 static int 3033 write_dependence_p (const_rtx mem, 3034 const_rtx x, machine_mode x_mode, rtx x_addr, 3035 bool mem_canonicalized, bool x_canonicalized, bool writep) 3036 { 3037 rtx mem_addr; 3038 rtx true_mem_addr, true_x_addr; 3039 rtx base; 3040 int ret; 3041 3042 gcc_checking_assert (x_canonicalized 3043 ? (x_addr != NULL_RTX 3044 && (x_mode != VOIDmode || GET_MODE (x) == VOIDmode)) 3045 : (x_addr == NULL_RTX && x_mode == VOIDmode)); 3046 3047 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem)) 3048 return 1; 3049 3050 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything. 3051 This is used in epilogue deallocation functions. */ 3052 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH) 3053 return 1; 3054 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH) 3055 return 1; 3056 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER 3057 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER) 3058 return 1; 3059 3060 if (!x_addr) 3061 x_addr = XEXP (x, 0); 3062 true_x_addr = get_addr (x_addr); 3063 3064 mem_addr = XEXP (mem, 0); 3065 true_mem_addr = get_addr (mem_addr); 3066 3067 /* A read from read-only memory can't conflict with read-write memory. 3068 Don't assume anything when AND addresses are involved and leave to 3069 the code below to determine dependence. */ 3070 if (!writep 3071 && MEM_READONLY_P (mem) 3072 && GET_CODE (true_x_addr) != AND 3073 && GET_CODE (true_mem_addr) != AND) 3074 return 0; 3075 3076 /* If we have MEMs referring to different address spaces (which can 3077 potentially overlap), we cannot easily tell from the addresses 3078 whether the references overlap. */ 3079 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x)) 3080 return 1; 3081 3082 base = find_base_term (true_mem_addr); 3083 if (! writep 3084 && base 3085 && (GET_CODE (base) == LABEL_REF 3086 || (GET_CODE (base) == SYMBOL_REF 3087 && CONSTANT_POOL_ADDRESS_P (base)))) 3088 return 0; 3089 3090 rtx x_base = find_base_term (true_x_addr); 3091 if (! base_alias_check (true_x_addr, x_base, true_mem_addr, base, 3092 GET_MODE (x), GET_MODE (mem))) 3093 return 0; 3094 3095 if (!x_canonicalized) 3096 { 3097 x_addr = canon_rtx (true_x_addr); 3098 x_mode = GET_MODE (x); 3099 } 3100 if (!mem_canonicalized) 3101 mem_addr = canon_rtx (true_mem_addr); 3102 3103 if ((ret = memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr, 3104 GET_MODE_SIZE (x_mode), x_addr, 0)) != -1) 3105 return ret; 3106 3107 if (nonoverlapping_memrefs_p (x, mem, false)) 3108 return 0; 3109 3110 return rtx_refs_may_alias_p (x, mem, false); 3111 } 3112 3113 /* Anti dependence: X is written after read in MEM takes place. */ 3114 3115 int 3116 anti_dependence (const_rtx mem, const_rtx x) 3117 { 3118 return write_dependence_p (mem, x, VOIDmode, NULL_RTX, 3119 /*mem_canonicalized=*/false, 3120 /*x_canonicalized*/false, /*writep=*/false); 3121 } 3122 3123 /* Likewise, but we already have a canonicalized MEM, and X_ADDR for X. 3124 Also, consider X in X_MODE (which might be from an enclosing 3125 STRICT_LOW_PART / ZERO_EXTRACT). 3126 If MEM_CANONICALIZED is true, MEM is canonicalized. */ 3127 3128 int 3129 canon_anti_dependence (const_rtx mem, bool mem_canonicalized, 3130 const_rtx x, machine_mode x_mode, rtx x_addr) 3131 { 3132 return write_dependence_p (mem, x, x_mode, x_addr, 3133 mem_canonicalized, /*x_canonicalized=*/true, 3134 /*writep=*/false); 3135 } 3136 3137 /* Output dependence: X is written after store in MEM takes place. */ 3138 3139 int 3140 output_dependence (const_rtx mem, const_rtx x) 3141 { 3142 return write_dependence_p (mem, x, VOIDmode, NULL_RTX, 3143 /*mem_canonicalized=*/false, 3144 /*x_canonicalized*/false, /*writep=*/true); 3145 } 3146 3147 /* Likewise, but we already have a canonicalized MEM, and X_ADDR for X. 3148 Also, consider X in X_MODE (which might be from an enclosing 3149 STRICT_LOW_PART / ZERO_EXTRACT). 3150 If MEM_CANONICALIZED is true, MEM is canonicalized. */ 3151 3152 int 3153 canon_output_dependence (const_rtx mem, bool mem_canonicalized, 3154 const_rtx x, machine_mode x_mode, rtx x_addr) 3155 { 3156 return write_dependence_p (mem, x, x_mode, x_addr, 3157 mem_canonicalized, /*x_canonicalized=*/true, 3158 /*writep=*/true); 3159 } 3160 3161 3162 3163 /* Check whether X may be aliased with MEM. Don't do offset-based 3164 memory disambiguation & TBAA. */ 3165 int 3166 may_alias_p (const_rtx mem, const_rtx x) 3167 { 3168 rtx x_addr, mem_addr; 3169 3170 if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem)) 3171 return 1; 3172 3173 /* (mem:BLK (scratch)) is a special mechanism to conflict with everything. 3174 This is used in epilogue deallocation functions. */ 3175 if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH) 3176 return 1; 3177 if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH) 3178 return 1; 3179 if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER 3180 || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER) 3181 return 1; 3182 3183 x_addr = XEXP (x, 0); 3184 x_addr = get_addr (x_addr); 3185 3186 mem_addr = XEXP (mem, 0); 3187 mem_addr = get_addr (mem_addr); 3188 3189 /* Read-only memory is by definition never modified, and therefore can't 3190 conflict with anything. However, don't assume anything when AND 3191 addresses are involved and leave to the code below to determine 3192 dependence. We don't expect to find read-only set on MEM, but 3193 stupid user tricks can produce them, so don't die. */ 3194 if (MEM_READONLY_P (x) 3195 && GET_CODE (x_addr) != AND 3196 && GET_CODE (mem_addr) != AND) 3197 return 0; 3198 3199 /* If we have MEMs referring to different address spaces (which can 3200 potentially overlap), we cannot easily tell from the addresses 3201 whether the references overlap. */ 3202 if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x)) 3203 return 1; 3204 3205 rtx x_base = find_base_term (x_addr); 3206 rtx mem_base = find_base_term (mem_addr); 3207 if (! base_alias_check (x_addr, x_base, mem_addr, mem_base, 3208 GET_MODE (x), GET_MODE (mem_addr))) 3209 return 0; 3210 3211 if (nonoverlapping_memrefs_p (mem, x, true)) 3212 return 0; 3213 3214 /* TBAA not valid for loop_invarint */ 3215 return rtx_refs_may_alias_p (x, mem, false); 3216 } 3217 3218 void 3219 init_alias_target (void) 3220 { 3221 int i; 3222 3223 if (!arg_base_value) 3224 arg_base_value = gen_rtx_ADDRESS (VOIDmode, 0); 3225 3226 memset (static_reg_base_value, 0, sizeof static_reg_base_value); 3227 3228 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) 3229 /* Check whether this register can hold an incoming pointer 3230 argument. FUNCTION_ARG_REGNO_P tests outgoing register 3231 numbers, so translate if necessary due to register windows. */ 3232 if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (i)) 3233 && targetm.hard_regno_mode_ok (i, Pmode)) 3234 static_reg_base_value[i] = arg_base_value; 3235 3236 /* RTL code is required to be consistent about whether it uses the 3237 stack pointer, the frame pointer or the argument pointer to 3238 access a given area of the frame. We can therefore use the 3239 base address to distinguish between the different areas. */ 3240 static_reg_base_value[STACK_POINTER_REGNUM] 3241 = unique_base_value (UNIQUE_BASE_VALUE_SP); 3242 static_reg_base_value[ARG_POINTER_REGNUM] 3243 = unique_base_value (UNIQUE_BASE_VALUE_ARGP); 3244 static_reg_base_value[FRAME_POINTER_REGNUM] 3245 = unique_base_value (UNIQUE_BASE_VALUE_FP); 3246 3247 /* The above rules extend post-reload, with eliminations applying 3248 consistently to each of the three pointers. Cope with cases in 3249 which the frame pointer is eliminated to the hard frame pointer 3250 rather than the stack pointer. */ 3251 if (!HARD_FRAME_POINTER_IS_FRAME_POINTER) 3252 static_reg_base_value[HARD_FRAME_POINTER_REGNUM] 3253 = unique_base_value (UNIQUE_BASE_VALUE_HFP); 3254 } 3255 3256 /* Set MEMORY_MODIFIED when X modifies DATA (that is assumed 3257 to be memory reference. */ 3258 static bool memory_modified; 3259 static void 3260 memory_modified_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data) 3261 { 3262 if (MEM_P (x)) 3263 { 3264 if (anti_dependence (x, (const_rtx)data) || output_dependence (x, (const_rtx)data)) 3265 memory_modified = true; 3266 } 3267 } 3268 3269 3270 /* Return true when INSN possibly modify memory contents of MEM 3271 (i.e. address can be modified). */ 3272 bool 3273 memory_modified_in_insn_p (const_rtx mem, const_rtx insn) 3274 { 3275 if (!INSN_P (insn)) 3276 return false; 3277 /* Conservatively assume all non-readonly MEMs might be modified in 3278 calls. */ 3279 if (CALL_P (insn)) 3280 return true; 3281 memory_modified = false; 3282 note_stores (PATTERN (insn), memory_modified_1, CONST_CAST_RTX(mem)); 3283 return memory_modified; 3284 } 3285 3286 /* Return TRUE if the destination of a set is rtx identical to 3287 ITEM. */ 3288 static inline bool 3289 set_dest_equal_p (const_rtx set, const_rtx item) 3290 { 3291 rtx dest = SET_DEST (set); 3292 return rtx_equal_p (dest, item); 3293 } 3294 3295 /* Initialize the aliasing machinery. Initialize the REG_KNOWN_VALUE 3296 array. */ 3297 3298 void 3299 init_alias_analysis (void) 3300 { 3301 unsigned int maxreg = max_reg_num (); 3302 int changed, pass; 3303 int i; 3304 unsigned int ui; 3305 rtx_insn *insn; 3306 rtx val; 3307 int rpo_cnt; 3308 int *rpo; 3309 3310 timevar_push (TV_ALIAS_ANALYSIS); 3311 3312 vec_safe_grow_cleared (reg_known_value, maxreg - FIRST_PSEUDO_REGISTER); 3313 reg_known_equiv_p = sbitmap_alloc (maxreg - FIRST_PSEUDO_REGISTER); 3314 bitmap_clear (reg_known_equiv_p); 3315 3316 /* If we have memory allocated from the previous run, use it. */ 3317 if (old_reg_base_value) 3318 reg_base_value = old_reg_base_value; 3319 3320 if (reg_base_value) 3321 reg_base_value->truncate (0); 3322 3323 vec_safe_grow_cleared (reg_base_value, maxreg); 3324 3325 new_reg_base_value = XNEWVEC (rtx, maxreg); 3326 reg_seen = sbitmap_alloc (maxreg); 3327 3328 /* The basic idea is that each pass through this loop will use the 3329 "constant" information from the previous pass to propagate alias 3330 information through another level of assignments. 3331 3332 The propagation is done on the CFG in reverse post-order, to propagate 3333 things forward as far as possible in each iteration. 3334 3335 This could get expensive if the assignment chains are long. Maybe 3336 we should throttle the number of iterations, possibly based on 3337 the optimization level or flag_expensive_optimizations. 3338 3339 We could propagate more information in the first pass by making use 3340 of DF_REG_DEF_COUNT to determine immediately that the alias information 3341 for a pseudo is "constant". 3342 3343 A program with an uninitialized variable can cause an infinite loop 3344 here. Instead of doing a full dataflow analysis to detect such problems 3345 we just cap the number of iterations for the loop. 3346 3347 The state of the arrays for the set chain in question does not matter 3348 since the program has undefined behavior. */ 3349 3350 rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun)); 3351 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false); 3352 3353 /* The prologue/epilogue insns are not threaded onto the 3354 insn chain until after reload has completed. Thus, 3355 there is no sense wasting time checking if INSN is in 3356 the prologue/epilogue until after reload has completed. */ 3357 bool could_be_prologue_epilogue = ((targetm.have_prologue () 3358 || targetm.have_epilogue ()) 3359 && reload_completed); 3360 3361 pass = 0; 3362 do 3363 { 3364 /* Assume nothing will change this iteration of the loop. */ 3365 changed = 0; 3366 3367 /* We want to assign the same IDs each iteration of this loop, so 3368 start counting from one each iteration of the loop. */ 3369 unique_id = 1; 3370 3371 /* We're at the start of the function each iteration through the 3372 loop, so we're copying arguments. */ 3373 copying_arguments = true; 3374 3375 /* Wipe the potential alias information clean for this pass. */ 3376 memset (new_reg_base_value, 0, maxreg * sizeof (rtx)); 3377 3378 /* Wipe the reg_seen array clean. */ 3379 bitmap_clear (reg_seen); 3380 3381 /* Initialize the alias information for this pass. */ 3382 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) 3383 if (static_reg_base_value[i] 3384 /* Don't treat the hard frame pointer as special if we 3385 eliminated the frame pointer to the stack pointer instead. */ 3386 && !(i == HARD_FRAME_POINTER_REGNUM 3387 && reload_completed 3388 && !frame_pointer_needed 3389 && targetm.can_eliminate (FRAME_POINTER_REGNUM, 3390 STACK_POINTER_REGNUM))) 3391 { 3392 new_reg_base_value[i] = static_reg_base_value[i]; 3393 bitmap_set_bit (reg_seen, i); 3394 } 3395 3396 /* Walk the insns adding values to the new_reg_base_value array. */ 3397 for (i = 0; i < rpo_cnt; i++) 3398 { 3399 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]); 3400 FOR_BB_INSNS (bb, insn) 3401 { 3402 if (NONDEBUG_INSN_P (insn)) 3403 { 3404 rtx note, set; 3405 3406 if (could_be_prologue_epilogue 3407 && prologue_epilogue_contains (insn)) 3408 continue; 3409 3410 /* If this insn has a noalias note, process it, Otherwise, 3411 scan for sets. A simple set will have no side effects 3412 which could change the base value of any other register. */ 3413 3414 if (GET_CODE (PATTERN (insn)) == SET 3415 && REG_NOTES (insn) != 0 3416 && find_reg_note (insn, REG_NOALIAS, NULL_RTX)) 3417 record_set (SET_DEST (PATTERN (insn)), NULL_RTX, NULL); 3418 else 3419 note_stores (PATTERN (insn), record_set, NULL); 3420 3421 set = single_set (insn); 3422 3423 if (set != 0 3424 && REG_P (SET_DEST (set)) 3425 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER) 3426 { 3427 unsigned int regno = REGNO (SET_DEST (set)); 3428 rtx src = SET_SRC (set); 3429 rtx t; 3430 3431 note = find_reg_equal_equiv_note (insn); 3432 if (note && REG_NOTE_KIND (note) == REG_EQUAL 3433 && DF_REG_DEF_COUNT (regno) != 1) 3434 note = NULL_RTX; 3435 3436 if (note != NULL_RTX 3437 && GET_CODE (XEXP (note, 0)) != EXPR_LIST 3438 && ! rtx_varies_p (XEXP (note, 0), 1) 3439 && ! reg_overlap_mentioned_p (SET_DEST (set), 3440 XEXP (note, 0))) 3441 { 3442 set_reg_known_value (regno, XEXP (note, 0)); 3443 set_reg_known_equiv_p (regno, 3444 REG_NOTE_KIND (note) == REG_EQUIV); 3445 } 3446 else if (DF_REG_DEF_COUNT (regno) == 1 3447 && GET_CODE (src) == PLUS 3448 && REG_P (XEXP (src, 0)) 3449 && (t = get_reg_known_value (REGNO (XEXP (src, 0)))) 3450 && CONST_INT_P (XEXP (src, 1))) 3451 { 3452 t = plus_constant (GET_MODE (src), t, 3453 INTVAL (XEXP (src, 1))); 3454 set_reg_known_value (regno, t); 3455 set_reg_known_equiv_p (regno, false); 3456 } 3457 else if (DF_REG_DEF_COUNT (regno) == 1 3458 && ! rtx_varies_p (src, 1)) 3459 { 3460 set_reg_known_value (regno, src); 3461 set_reg_known_equiv_p (regno, false); 3462 } 3463 } 3464 } 3465 else if (NOTE_P (insn) 3466 && NOTE_KIND (insn) == NOTE_INSN_FUNCTION_BEG) 3467 copying_arguments = false; 3468 } 3469 } 3470 3471 /* Now propagate values from new_reg_base_value to reg_base_value. */ 3472 gcc_assert (maxreg == (unsigned int) max_reg_num ()); 3473 3474 for (ui = 0; ui < maxreg; ui++) 3475 { 3476 if (new_reg_base_value[ui] 3477 && new_reg_base_value[ui] != (*reg_base_value)[ui] 3478 && ! rtx_equal_p (new_reg_base_value[ui], (*reg_base_value)[ui])) 3479 { 3480 (*reg_base_value)[ui] = new_reg_base_value[ui]; 3481 changed = 1; 3482 } 3483 } 3484 } 3485 while (changed && ++pass < MAX_ALIAS_LOOP_PASSES); 3486 XDELETEVEC (rpo); 3487 3488 /* Fill in the remaining entries. */ 3489 FOR_EACH_VEC_ELT (*reg_known_value, i, val) 3490 { 3491 int regno = i + FIRST_PSEUDO_REGISTER; 3492 if (! val) 3493 set_reg_known_value (regno, regno_reg_rtx[regno]); 3494 } 3495 3496 /* Clean up. */ 3497 free (new_reg_base_value); 3498 new_reg_base_value = 0; 3499 sbitmap_free (reg_seen); 3500 reg_seen = 0; 3501 timevar_pop (TV_ALIAS_ANALYSIS); 3502 } 3503 3504 /* Equate REG_BASE_VALUE (reg1) to REG_BASE_VALUE (reg2). 3505 Special API for var-tracking pass purposes. */ 3506 3507 void 3508 vt_equate_reg_base_value (const_rtx reg1, const_rtx reg2) 3509 { 3510 (*reg_base_value)[REGNO (reg1)] = REG_BASE_VALUE (reg2); 3511 } 3512 3513 void 3514 end_alias_analysis (void) 3515 { 3516 old_reg_base_value = reg_base_value; 3517 vec_free (reg_known_value); 3518 sbitmap_free (reg_known_equiv_p); 3519 } 3520 3521 void 3522 dump_alias_stats_in_alias_c (FILE *s) 3523 { 3524 fprintf (s, " TBAA oracle: %llu disambiguations %llu queries\n" 3525 " %llu are in alias set 0\n" 3526 " %llu queries asked about the same object\n" 3527 " %llu queries asked about the same alias set\n" 3528 " %llu access volatile\n" 3529 " %llu are dependent in the DAG\n" 3530 " %llu are aritificially in conflict with void *\n", 3531 alias_stats.num_disambiguated, 3532 alias_stats.num_alias_zero + alias_stats.num_same_alias_set 3533 + alias_stats.num_same_objects + alias_stats.num_volatile 3534 + alias_stats.num_dag + alias_stats.num_disambiguated 3535 + alias_stats.num_universal, 3536 alias_stats.num_alias_zero, alias_stats.num_same_alias_set, 3537 alias_stats.num_same_objects, alias_stats.num_volatile, 3538 alias_stats.num_dag, alias_stats.num_universal); 3539 } 3540 #include "gt-alias.h" 3541