xref: /netbsd-src/external/gpl3/gcc/dist/gcc/alias.cc (revision b1e838363e3c6fc78a55519254d99869742dd33c)
1 /* Alias analysis for GNU C
2    Copyright (C) 1997-2022 Free Software Foundation, Inc.
3    Contributed by John Carr (jfc@mit.edu).
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "df.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "gimple-ssa.h"
33 #include "emit-rtl.h"
34 #include "alias.h"
35 #include "fold-const.h"
36 #include "varasm.h"
37 #include "cselib.h"
38 #include "langhooks.h"
39 #include "cfganal.h"
40 #include "rtl-iter.h"
41 #include "cgraph.h"
42 #include "ipa-utils.h"
43 
44 /* The aliasing API provided here solves related but different problems:
45 
46    Say there exists (in c)
47 
48    struct X {
49      struct Y y1;
50      struct Z z2;
51    } x1, *px1,  *px2;
52 
53    struct Y y2, *py;
54    struct Z z2, *pz;
55 
56 
57    py = &x1.y1;
58    px2 = &x1;
59 
60    Consider the four questions:
61 
62    Can a store to x1 interfere with px2->y1?
63    Can a store to x1 interfere with px2->z2?
64    Can a store to x1 change the value pointed to by with py?
65    Can a store to x1 change the value pointed to by with pz?
66 
67    The answer to these questions can be yes, yes, yes, and maybe.
68 
69    The first two questions can be answered with a simple examination
70    of the type system.  If structure X contains a field of type Y then
71    a store through a pointer to an X can overwrite any field that is
72    contained (recursively) in an X (unless we know that px1 != px2).
73 
74    The last two questions can be solved in the same way as the first
75    two questions but this is too conservative.  The observation is
76    that in some cases we can know which (if any) fields are addressed
77    and if those addresses are used in bad ways.  This analysis may be
78    language specific.  In C, arbitrary operations may be applied to
79    pointers.  However, there is some indication that this may be too
80    conservative for some C++ types.
81 
82    The pass ipa-type-escape does this analysis for the types whose
83    instances do not escape across the compilation boundary.
84 
85    Historically in GCC, these two problems were combined and a single
86    data structure that was used to represent the solution to these
87    problems.  We now have two similar but different data structures,
88    The data structure to solve the last two questions is similar to
89    the first, but does not contain the fields whose address are never
90    taken.  For types that do escape the compilation unit, the data
91    structures will have identical information.
92 */
93 
94 /* The alias sets assigned to MEMs assist the back-end in determining
95    which MEMs can alias which other MEMs.  In general, two MEMs in
96    different alias sets cannot alias each other, with one important
97    exception.  Consider something like:
98 
99      struct S { int i; double d; };
100 
101    a store to an `S' can alias something of either type `int' or type
102    `double'.  (However, a store to an `int' cannot alias a `double'
103    and vice versa.)  We indicate this via a tree structure that looks
104    like:
105 	   struct S
106 	    /   \
107 	   /     \
108 	 |/_     _\|
109 	 int    double
110 
111    (The arrows are directed and point downwards.)
112     In this situation we say the alias set for `struct S' is the
113    `superset' and that those for `int' and `double' are `subsets'.
114 
115    To see whether two alias sets can point to the same memory, we must
116    see if either alias set is a subset of the other. We need not trace
117    past immediate descendants, however, since we propagate all
118    grandchildren up one level.
119 
120    Alias set zero is implicitly a superset of all other alias sets.
121    However, this is no actual entry for alias set zero.  It is an
122    error to attempt to explicitly construct a subset of zero.  */
123 
124 struct alias_set_hash : int_hash <int, INT_MIN, INT_MIN + 1> {};
125 
126 struct GTY(()) alias_set_entry {
127   /* The alias set number, as stored in MEM_ALIAS_SET.  */
128   alias_set_type alias_set;
129 
130   /* Nonzero if would have a child of zero: this effectively makes this
131      alias set the same as alias set zero.  */
132   bool has_zero_child;
133   /* Nonzero if alias set corresponds to pointer type itself (i.e. not to
134      aggregate contaiing pointer.
135      This is used for a special case where we need an universal pointer type
136      compatible with all other pointer types.  */
137   bool is_pointer;
138   /* Nonzero if is_pointer or if one of childs have has_pointer set.  */
139   bool has_pointer;
140 
141   /* The children of the alias set.  These are not just the immediate
142      children, but, in fact, all descendants.  So, if we have:
143 
144        struct T { struct S s; float f; }
145 
146      continuing our example above, the children here will be all of
147      `int', `double', `float', and `struct S'.  */
148   hash_map<alias_set_hash, int> *children;
149 };
150 
151 static int rtx_equal_for_memref_p (const_rtx, const_rtx);
152 static void record_set (rtx, const_rtx, void *);
153 static int base_alias_check (rtx, rtx, rtx, rtx, machine_mode,
154 			     machine_mode);
155 static rtx find_base_value (rtx);
156 static int mems_in_disjoint_alias_sets_p (const_rtx, const_rtx);
157 static alias_set_entry *get_alias_set_entry (alias_set_type);
158 static tree decl_for_component_ref (tree);
159 static int write_dependence_p (const_rtx,
160 			       const_rtx, machine_mode, rtx,
161 			       bool, bool, bool);
162 static int compare_base_symbol_refs (const_rtx, const_rtx,
163 				     HOST_WIDE_INT * = NULL);
164 
165 static void memory_modified_1 (rtx, const_rtx, void *);
166 
167 /* Query statistics for the different low-level disambiguators.
168    A high-level query may trigger multiple of them.  */
169 
170 static struct {
171   unsigned long long num_alias_zero;
172   unsigned long long num_same_alias_set;
173   unsigned long long num_same_objects;
174   unsigned long long num_volatile;
175   unsigned long long num_dag;
176   unsigned long long num_universal;
177   unsigned long long num_disambiguated;
178 } alias_stats;
179 
180 
181 /* Set up all info needed to perform alias analysis on memory references.  */
182 
183 /* Returns the size in bytes of the mode of X.  */
184 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
185 
186 /* Cap the number of passes we make over the insns propagating alias
187    information through set chains.
188    ??? 10 is a completely arbitrary choice.  This should be based on the
189    maximum loop depth in the CFG, but we do not have this information
190    available (even if current_loops _is_ available).  */
191 #define MAX_ALIAS_LOOP_PASSES 10
192 
193 /* reg_base_value[N] gives an address to which register N is related.
194    If all sets after the first add or subtract to the current value
195    or otherwise modify it so it does not point to a different top level
196    object, reg_base_value[N] is equal to the address part of the source
197    of the first set.
198 
199    A base address can be an ADDRESS, SYMBOL_REF, or LABEL_REF.  ADDRESS
200    expressions represent three types of base:
201 
202      1. incoming arguments.  There is just one ADDRESS to represent all
203 	arguments, since we do not know at this level whether accesses
204 	based on different arguments can alias.  The ADDRESS has id 0.
205 
206      2. stack_pointer_rtx, frame_pointer_rtx, hard_frame_pointer_rtx
207 	(if distinct from frame_pointer_rtx) and arg_pointer_rtx.
208 	Each of these rtxes has a separate ADDRESS associated with it,
209 	each with a negative id.
210 
211 	GCC is (and is required to be) precise in which register it
212 	chooses to access a particular region of stack.  We can therefore
213 	assume that accesses based on one of these rtxes do not alias
214 	accesses based on another of these rtxes.
215 
216      3. bases that are derived from malloc()ed memory (REG_NOALIAS).
217 	Each such piece of memory has a separate ADDRESS associated
218 	with it, each with an id greater than 0.
219 
220    Accesses based on one ADDRESS do not alias accesses based on other
221    ADDRESSes.  Accesses based on ADDRESSes in groups (2) and (3) do not
222    alias globals either; the ADDRESSes have Pmode to indicate this.
223    The ADDRESS in group (1) _may_ alias globals; it has VOIDmode to
224    indicate this.  */
225 
226 static GTY(()) vec<rtx, va_gc> *reg_base_value;
227 static rtx *new_reg_base_value;
228 
229 /* The single VOIDmode ADDRESS that represents all argument bases.
230    It has id 0.  */
231 static GTY(()) rtx arg_base_value;
232 
233 /* Used to allocate unique ids to each REG_NOALIAS ADDRESS.  */
234 static int unique_id;
235 
236 /* We preserve the copy of old array around to avoid amount of garbage
237    produced.  About 8% of garbage produced were attributed to this
238    array.  */
239 static GTY((deletable)) vec<rtx, va_gc> *old_reg_base_value;
240 
241 /* Values of XINT (address, 0) of Pmode ADDRESS rtxes for special
242    registers.  */
243 #define UNIQUE_BASE_VALUE_SP	-1
244 #define UNIQUE_BASE_VALUE_ARGP	-2
245 #define UNIQUE_BASE_VALUE_FP	-3
246 #define UNIQUE_BASE_VALUE_HFP	-4
247 
248 #define static_reg_base_value \
249   (this_target_rtl->x_static_reg_base_value)
250 
251 #define REG_BASE_VALUE(X)					\
252   (REGNO (X) < vec_safe_length (reg_base_value)			\
253    ? (*reg_base_value)[REGNO (X)] : 0)
254 
255 /* Vector indexed by N giving the initial (unchanging) value known for
256    pseudo-register N.  This vector is initialized in init_alias_analysis,
257    and does not change until end_alias_analysis is called.  */
258 static GTY(()) vec<rtx, va_gc> *reg_known_value;
259 
260 /* Vector recording for each reg_known_value whether it is due to a
261    REG_EQUIV note.  Future passes (viz., reload) may replace the
262    pseudo with the equivalent expression and so we account for the
263    dependences that would be introduced if that happens.
264 
265    The REG_EQUIV notes created in assign_parms may mention the arg
266    pointer, and there are explicit insns in the RTL that modify the
267    arg pointer.  Thus we must ensure that such insns don't get
268    scheduled across each other because that would invalidate the
269    REG_EQUIV notes.  One could argue that the REG_EQUIV notes are
270    wrong, but solving the problem in the scheduler will likely give
271    better code, so we do it here.  */
272 static sbitmap reg_known_equiv_p;
273 
274 /* True when scanning insns from the start of the rtl to the
275    NOTE_INSN_FUNCTION_BEG note.  */
276 static bool copying_arguments;
277 
278 
279 /* The splay-tree used to store the various alias set entries.  */
280 static GTY (()) vec<alias_set_entry *, va_gc> *alias_sets;
281 
282 /* Build a decomposed reference object for querying the alias-oracle
283    from the MEM rtx and store it in *REF.
284    Returns false if MEM is not suitable for the alias-oracle.  */
285 
286 static bool
ao_ref_from_mem(ao_ref * ref,const_rtx mem)287 ao_ref_from_mem (ao_ref *ref, const_rtx mem)
288 {
289   tree expr = MEM_EXPR (mem);
290   tree base;
291 
292   if (!expr)
293     return false;
294 
295   ao_ref_init (ref, expr);
296 
297   /* Get the base of the reference and see if we have to reject or
298      adjust it.  */
299   base = ao_ref_base (ref);
300   if (base == NULL_TREE)
301     return false;
302 
303   /* The tree oracle doesn't like bases that are neither decls
304      nor indirect references of SSA names.  */
305   if (!(DECL_P (base)
306 	|| (TREE_CODE (base) == MEM_REF
307 	    && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
308 	|| (TREE_CODE (base) == TARGET_MEM_REF
309 	    && TREE_CODE (TMR_BASE (base)) == SSA_NAME)))
310     return false;
311 
312   ref->ref_alias_set = MEM_ALIAS_SET (mem);
313 
314   /* If MEM_OFFSET or MEM_SIZE are unknown what we got from MEM_EXPR
315      is conservative, so trust it.  */
316   if (!MEM_OFFSET_KNOWN_P (mem)
317       || !MEM_SIZE_KNOWN_P (mem))
318     return true;
319 
320   /* If MEM_OFFSET/MEM_SIZE get us outside of ref->offset/ref->max_size
321      drop ref->ref.  */
322   if (maybe_lt (MEM_OFFSET (mem), 0)
323       || (ref->max_size_known_p ()
324 	  && maybe_gt ((MEM_OFFSET (mem) + MEM_SIZE (mem)) * BITS_PER_UNIT,
325 		       ref->max_size)))
326     ref->ref = NULL_TREE;
327 
328   /* Refine size and offset we got from analyzing MEM_EXPR by using
329      MEM_SIZE and MEM_OFFSET.  */
330 
331   ref->offset += MEM_OFFSET (mem) * BITS_PER_UNIT;
332   ref->size = MEM_SIZE (mem) * BITS_PER_UNIT;
333 
334   /* The MEM may extend into adjacent fields, so adjust max_size if
335      necessary.  */
336   if (ref->max_size_known_p ())
337     ref->max_size = upper_bound (ref->max_size, ref->size);
338 
339   /* If MEM_OFFSET and MEM_SIZE might get us outside of the base object of
340      the MEM_EXPR punt.  This happens for STRICT_ALIGNMENT targets a lot.  */
341   if (MEM_EXPR (mem) != get_spill_slot_decl (false)
342       && (maybe_lt (ref->offset, 0)
343 	  || (DECL_P (ref->base)
344 	      && (DECL_SIZE (ref->base) == NULL_TREE
345 		  || !poly_int_tree_p (DECL_SIZE (ref->base))
346 		  || maybe_lt (wi::to_poly_offset (DECL_SIZE (ref->base)),
347 			       ref->offset + ref->size)))))
348     return false;
349 
350   return true;
351 }
352 
353 /* Query the alias-oracle on whether the two memory rtx X and MEM may
354    alias.  If TBAA_P is set also apply TBAA.  Returns true if the
355    two rtxen may alias, false otherwise.  */
356 
357 static bool
rtx_refs_may_alias_p(const_rtx x,const_rtx mem,bool tbaa_p)358 rtx_refs_may_alias_p (const_rtx x, const_rtx mem, bool tbaa_p)
359 {
360   ao_ref ref1, ref2;
361 
362   if (!ao_ref_from_mem (&ref1, x)
363       || !ao_ref_from_mem (&ref2, mem))
364     return true;
365 
366   return refs_may_alias_p_1 (&ref1, &ref2,
367 			     tbaa_p
368 			     && MEM_ALIAS_SET (x) != 0
369 			     && MEM_ALIAS_SET (mem) != 0);
370 }
371 
372 /* Return true if the ref EARLIER behaves the same as LATER with respect
373    to TBAA for every memory reference that might follow LATER.  */
374 
375 bool
refs_same_for_tbaa_p(tree earlier,tree later)376 refs_same_for_tbaa_p (tree earlier, tree later)
377 {
378   ao_ref earlier_ref, later_ref;
379   ao_ref_init (&earlier_ref, earlier);
380   ao_ref_init (&later_ref, later);
381   alias_set_type earlier_set = ao_ref_alias_set (&earlier_ref);
382   alias_set_type later_set = ao_ref_alias_set (&later_ref);
383   if (!(earlier_set == later_set
384 	|| alias_set_subset_of (later_set, earlier_set)))
385     return false;
386   alias_set_type later_base_set = ao_ref_base_alias_set (&later_ref);
387   alias_set_type earlier_base_set = ao_ref_base_alias_set (&earlier_ref);
388   return (earlier_base_set == later_base_set
389 	  || alias_set_subset_of (later_base_set, earlier_base_set));
390 }
391 
392 /* Similar to refs_same_for_tbaa_p() but for use on MEM rtxs.  */
393 bool
mems_same_for_tbaa_p(rtx earlier,rtx later)394 mems_same_for_tbaa_p (rtx earlier, rtx later)
395 {
396   gcc_assert (MEM_P (earlier));
397   gcc_assert (MEM_P (later));
398 
399   return ((MEM_ALIAS_SET (earlier) == MEM_ALIAS_SET (later)
400 	   || alias_set_subset_of (MEM_ALIAS_SET (later),
401 				   MEM_ALIAS_SET (earlier)))
402 	  && (!MEM_EXPR (earlier)
403 	      || refs_same_for_tbaa_p (MEM_EXPR (earlier), MEM_EXPR (later))));
404 }
405 
406 /* Returns a pointer to the alias set entry for ALIAS_SET, if there is
407    such an entry, or NULL otherwise.  */
408 
409 static inline alias_set_entry *
get_alias_set_entry(alias_set_type alias_set)410 get_alias_set_entry (alias_set_type alias_set)
411 {
412   return (*alias_sets)[alias_set];
413 }
414 
415 /* Returns nonzero if the alias sets for MEM1 and MEM2 are such that
416    the two MEMs cannot alias each other.  */
417 
418 static inline int
mems_in_disjoint_alias_sets_p(const_rtx mem1,const_rtx mem2)419 mems_in_disjoint_alias_sets_p (const_rtx mem1, const_rtx mem2)
420 {
421   return (flag_strict_aliasing
422 	  && ! alias_sets_conflict_p (MEM_ALIAS_SET (mem1),
423 				      MEM_ALIAS_SET (mem2)));
424 }
425 
426 /* Return true if the first alias set is a subset of the second.  */
427 
428 bool
alias_set_subset_of(alias_set_type set1,alias_set_type set2)429 alias_set_subset_of (alias_set_type set1, alias_set_type set2)
430 {
431   alias_set_entry *ase2;
432 
433   /* Disable TBAA oracle with !flag_strict_aliasing.  */
434   if (!flag_strict_aliasing)
435     return true;
436 
437   /* Everything is a subset of the "aliases everything" set.  */
438   if (set2 == 0)
439     return true;
440 
441   /* Check if set1 is a subset of set2.  */
442   ase2 = get_alias_set_entry (set2);
443   if (ase2 != 0
444       && (ase2->has_zero_child
445 	  || (ase2->children && ase2->children->get (set1))))
446     return true;
447 
448   /* As a special case we consider alias set of "void *" to be both subset
449      and superset of every alias set of a pointer.  This extra symmetry does
450      not matter for alias_sets_conflict_p but it makes aliasing_component_refs_p
451      to return true on the following testcase:
452 
453      void *ptr;
454      char **ptr2=(char **)&ptr;
455      *ptr2 = ...
456 
457      Additionally if a set contains universal pointer, we consider every pointer
458      to be a subset of it, but we do not represent this explicitely - doing so
459      would require us to update transitive closure each time we introduce new
460      pointer type.  This makes aliasing_component_refs_p to return true
461      on the following testcase:
462 
463      struct a {void *ptr;}
464      char **ptr = (char **)&a.ptr;
465      ptr = ...
466 
467      This makes void * truly universal pointer type.  See pointer handling in
468      get_alias_set for more details.  */
469   if (ase2 && ase2->has_pointer)
470     {
471       alias_set_entry *ase1 = get_alias_set_entry (set1);
472 
473       if (ase1 && ase1->is_pointer)
474 	{
475           alias_set_type voidptr_set = TYPE_ALIAS_SET (ptr_type_node);
476 	  /* If one is ptr_type_node and other is pointer, then we consider
477  	     them subset of each other.  */
478 	  if (set1 == voidptr_set || set2 == voidptr_set)
479 	    return true;
480 	  /* If SET2 contains universal pointer's alias set, then we consdier
481  	     every (non-universal) pointer.  */
482 	  if (ase2->children && set1 != voidptr_set
483 	      && ase2->children->get (voidptr_set))
484 	    return true;
485 	}
486     }
487   return false;
488 }
489 
490 /* Return 1 if the two specified alias sets may conflict.  */
491 
492 int
alias_sets_conflict_p(alias_set_type set1,alias_set_type set2)493 alias_sets_conflict_p (alias_set_type set1, alias_set_type set2)
494 {
495   alias_set_entry *ase1;
496   alias_set_entry *ase2;
497 
498   /* The easy case.  */
499   if (alias_sets_must_conflict_p (set1, set2))
500     return 1;
501 
502   /* See if the first alias set is a subset of the second.  */
503   ase1 = get_alias_set_entry (set1);
504   if (ase1 != 0
505       && ase1->children && ase1->children->get (set2))
506     {
507       ++alias_stats.num_dag;
508       return 1;
509     }
510 
511   /* Now do the same, but with the alias sets reversed.  */
512   ase2 = get_alias_set_entry (set2);
513   if (ase2 != 0
514       && ase2->children && ase2->children->get (set1))
515     {
516       ++alias_stats.num_dag;
517       return 1;
518     }
519 
520   /* We want void * to be compatible with any other pointer without
521      really dropping it to alias set 0. Doing so would make it
522      compatible with all non-pointer types too.
523 
524      This is not strictly necessary by the C/C++ language
525      standards, but avoids common type punning mistakes.  In
526      addition to that, we need the existence of such universal
527      pointer to implement Fortran's C_PTR type (which is defined as
528      type compatible with all C pointers).  */
529   if (ase1 && ase2 && ase1->has_pointer && ase2->has_pointer)
530     {
531       alias_set_type voidptr_set = TYPE_ALIAS_SET (ptr_type_node);
532 
533       /* If one of the sets corresponds to universal pointer,
534  	 we consider it to conflict with anything that is
535 	 or contains pointer.  */
536       if (set1 == voidptr_set || set2 == voidptr_set)
537 	{
538 	  ++alias_stats.num_universal;
539 	  return true;
540 	}
541      /* If one of sets is (non-universal) pointer and the other
542  	contains universal pointer, we also get conflict.  */
543      if (ase1->is_pointer && set2 != voidptr_set
544 	 && ase2->children && ase2->children->get (voidptr_set))
545 	{
546 	  ++alias_stats.num_universal;
547 	  return true;
548 	}
549      if (ase2->is_pointer && set1 != voidptr_set
550 	 && ase1->children && ase1->children->get (voidptr_set))
551 	{
552 	  ++alias_stats.num_universal;
553 	  return true;
554 	}
555     }
556 
557   ++alias_stats.num_disambiguated;
558 
559   /* The two alias sets are distinct and neither one is the
560      child of the other.  Therefore, they cannot conflict.  */
561   return 0;
562 }
563 
564 /* Return 1 if the two specified alias sets will always conflict.  */
565 
566 int
alias_sets_must_conflict_p(alias_set_type set1,alias_set_type set2)567 alias_sets_must_conflict_p (alias_set_type set1, alias_set_type set2)
568 {
569   /* Disable TBAA oracle with !flag_strict_aliasing.  */
570   if (!flag_strict_aliasing)
571     return 1;
572   if (set1 == 0 || set2 == 0)
573     {
574       ++alias_stats.num_alias_zero;
575       return 1;
576     }
577   if (set1 == set2)
578     {
579       ++alias_stats.num_same_alias_set;
580       return 1;
581     }
582 
583   return 0;
584 }
585 
586 /* Return 1 if any MEM object of type T1 will always conflict (using the
587    dependency routines in this file) with any MEM object of type T2.
588    This is used when allocating temporary storage.  If T1 and/or T2 are
589    NULL_TREE, it means we know nothing about the storage.  */
590 
591 int
objects_must_conflict_p(tree t1,tree t2)592 objects_must_conflict_p (tree t1, tree t2)
593 {
594   alias_set_type set1, set2;
595 
596   /* If neither has a type specified, we don't know if they'll conflict
597      because we may be using them to store objects of various types, for
598      example the argument and local variables areas of inlined functions.  */
599   if (t1 == 0 && t2 == 0)
600     return 0;
601 
602   /* If they are the same type, they must conflict.  */
603   if (t1 == t2)
604     {
605       ++alias_stats.num_same_objects;
606       return 1;
607     }
608   /* Likewise if both are volatile.  */
609   if (t1 != 0 && TYPE_VOLATILE (t1) && t2 != 0 && TYPE_VOLATILE (t2))
610     {
611       ++alias_stats.num_volatile;
612       return 1;
613     }
614 
615   set1 = t1 ? get_alias_set (t1) : 0;
616   set2 = t2 ? get_alias_set (t2) : 0;
617 
618   /* We can't use alias_sets_conflict_p because we must make sure
619      that every subtype of t1 will conflict with every subtype of
620      t2 for which a pair of subobjects of these respective subtypes
621      overlaps on the stack.  */
622   return alias_sets_must_conflict_p (set1, set2);
623 }
624 
625 /* Return true if T is an end of the access path which can be used
626    by type based alias oracle.  */
627 
628 bool
ends_tbaa_access_path_p(const_tree t)629 ends_tbaa_access_path_p (const_tree t)
630 {
631   switch (TREE_CODE (t))
632     {
633     case COMPONENT_REF:
634       if (DECL_NONADDRESSABLE_P (TREE_OPERAND (t, 1)))
635 	return true;
636       /* Permit type-punning when accessing a union, provided the access
637 	 is directly through the union.  For example, this code does not
638 	 permit taking the address of a union member and then storing
639 	 through it.  Even the type-punning allowed here is a GCC
640 	 extension, albeit a common and useful one; the C standard says
641 	 that such accesses have implementation-defined behavior.  */
642       else if (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == UNION_TYPE)
643 	return true;
644       break;
645 
646     case ARRAY_REF:
647     case ARRAY_RANGE_REF:
648       if (TYPE_NONALIASED_COMPONENT (TREE_TYPE (TREE_OPERAND (t, 0))))
649 	return true;
650       break;
651 
652     case REALPART_EXPR:
653     case IMAGPART_EXPR:
654       break;
655 
656     case BIT_FIELD_REF:
657     case VIEW_CONVERT_EXPR:
658       /* Bitfields and casts are never addressable.  */
659       return true;
660       break;
661 
662     default:
663       gcc_unreachable ();
664     }
665   return false;
666 }
667 
668 /* Return the outermost parent of component present in the chain of
669    component references handled by get_inner_reference in T with the
670    following property:
671      - the component is non-addressable
672    or NULL_TREE if no such parent exists.  In the former cases, the alias
673    set of this parent is the alias set that must be used for T itself.  */
674 
675 tree
component_uses_parent_alias_set_from(const_tree t)676 component_uses_parent_alias_set_from (const_tree t)
677 {
678   const_tree found = NULL_TREE;
679 
680   while (handled_component_p (t))
681     {
682       if (ends_tbaa_access_path_p (t))
683 	found = t;
684 
685       t = TREE_OPERAND (t, 0);
686     }
687 
688   if (found)
689     return TREE_OPERAND (found, 0);
690 
691   return NULL_TREE;
692 }
693 
694 
695 /* Return whether the pointer-type T effective for aliasing may
696    access everything and thus the reference has to be assigned
697    alias-set zero.  */
698 
699 static bool
ref_all_alias_ptr_type_p(const_tree t)700 ref_all_alias_ptr_type_p (const_tree t)
701 {
702   return (TREE_CODE (TREE_TYPE (t)) == VOID_TYPE
703 	  || TYPE_REF_CAN_ALIAS_ALL (t));
704 }
705 
706 /* Return the alias set for the memory pointed to by T, which may be
707    either a type or an expression.  Return -1 if there is nothing
708    special about dereferencing T.  */
709 
710 static alias_set_type
get_deref_alias_set_1(tree t)711 get_deref_alias_set_1 (tree t)
712 {
713   /* All we care about is the type.  */
714   if (! TYPE_P (t))
715     t = TREE_TYPE (t);
716 
717   /* If we have an INDIRECT_REF via a void pointer, we don't
718      know anything about what that might alias.  Likewise if the
719      pointer is marked that way.  */
720   if (ref_all_alias_ptr_type_p (t))
721     return 0;
722 
723   return -1;
724 }
725 
726 /* Return the alias set for the memory pointed to by T, which may be
727    either a type or an expression.  */
728 
729 alias_set_type
get_deref_alias_set(tree t)730 get_deref_alias_set (tree t)
731 {
732   /* If we're not doing any alias analysis, just assume everything
733      aliases everything else.  */
734   if (!flag_strict_aliasing)
735     return 0;
736 
737   alias_set_type set = get_deref_alias_set_1 (t);
738 
739   /* Fall back to the alias-set of the pointed-to type.  */
740   if (set == -1)
741     {
742       if (! TYPE_P (t))
743 	t = TREE_TYPE (t);
744       set = get_alias_set (TREE_TYPE (t));
745     }
746 
747   return set;
748 }
749 
750 /* Return the pointer-type relevant for TBAA purposes from the
751    memory reference tree *T or NULL_TREE in which case *T is
752    adjusted to point to the outermost component reference that
753    can be used for assigning an alias set.  */
754 
755 tree
reference_alias_ptr_type_1(tree * t)756 reference_alias_ptr_type_1 (tree *t)
757 {
758   tree inner;
759 
760   /* Get the base object of the reference.  */
761   inner = *t;
762   while (handled_component_p (inner))
763     {
764       /* If there is a VIEW_CONVERT_EXPR in the chain we cannot use
765 	 the type of any component references that wrap it to
766 	 determine the alias-set.  */
767       if (TREE_CODE (inner) == VIEW_CONVERT_EXPR)
768 	*t = TREE_OPERAND (inner, 0);
769       inner = TREE_OPERAND (inner, 0);
770     }
771 
772   /* Handle pointer dereferences here, they can override the
773      alias-set.  */
774   if (INDIRECT_REF_P (inner)
775       && ref_all_alias_ptr_type_p (TREE_TYPE (TREE_OPERAND (inner, 0))))
776     return TREE_TYPE (TREE_OPERAND (inner, 0));
777   else if (TREE_CODE (inner) == TARGET_MEM_REF)
778     return TREE_TYPE (TMR_OFFSET (inner));
779   else if (TREE_CODE (inner) == MEM_REF
780 	   && ref_all_alias_ptr_type_p (TREE_TYPE (TREE_OPERAND (inner, 1))))
781     return TREE_TYPE (TREE_OPERAND (inner, 1));
782 
783   /* If the innermost reference is a MEM_REF that has a
784      conversion embedded treat it like a VIEW_CONVERT_EXPR above,
785      using the memory access type for determining the alias-set.  */
786   if (TREE_CODE (inner) == MEM_REF
787       && (TYPE_MAIN_VARIANT (TREE_TYPE (inner))
788 	  != TYPE_MAIN_VARIANT
789 	       (TREE_TYPE (TREE_TYPE (TREE_OPERAND (inner, 1))))))
790     return TREE_TYPE (TREE_OPERAND (inner, 1));
791 
792   /* Otherwise, pick up the outermost object that we could have
793      a pointer to.  */
794   tree tem = component_uses_parent_alias_set_from (*t);
795   if (tem)
796     *t = tem;
797 
798   return NULL_TREE;
799 }
800 
801 /* Return the pointer-type relevant for TBAA purposes from the
802    gimple memory reference tree T.  This is the type to be used for
803    the offset operand of MEM_REF or TARGET_MEM_REF replacements of T
804    and guarantees that get_alias_set will return the same alias
805    set for T and the replacement.  */
806 
807 tree
reference_alias_ptr_type(tree t)808 reference_alias_ptr_type (tree t)
809 {
810   /* If the frontend assigns this alias-set zero, preserve that.  */
811   if (lang_hooks.get_alias_set (t) == 0)
812     return ptr_type_node;
813 
814   tree ptype = reference_alias_ptr_type_1 (&t);
815   /* If there is a given pointer type for aliasing purposes, return it.  */
816   if (ptype != NULL_TREE)
817     return ptype;
818 
819   /* Otherwise build one from the outermost component reference we
820      may use.  */
821   if (TREE_CODE (t) == MEM_REF
822       || TREE_CODE (t) == TARGET_MEM_REF)
823     return TREE_TYPE (TREE_OPERAND (t, 1));
824   else
825     return build_pointer_type (TYPE_MAIN_VARIANT (TREE_TYPE (t)));
826 }
827 
828 /* Return whether the pointer-types T1 and T2 used to determine
829    two alias sets of two references will yield the same answer
830    from get_deref_alias_set.  */
831 
832 bool
alias_ptr_types_compatible_p(tree t1,tree t2)833 alias_ptr_types_compatible_p (tree t1, tree t2)
834 {
835   if (TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2))
836     return true;
837 
838   if (ref_all_alias_ptr_type_p (t1)
839       || ref_all_alias_ptr_type_p (t2))
840     return false;
841 
842     /* This function originally abstracts from simply comparing
843        get_deref_alias_set so that we are sure this still computes
844        the same result after LTO type merging is applied.
845        When in LTO type merging is done we can actually do this compare.
846     */
847   if (in_lto_p)
848     return get_deref_alias_set (t1) == get_deref_alias_set (t2);
849   else
850     return (TYPE_MAIN_VARIANT (TREE_TYPE (t1))
851 	    == TYPE_MAIN_VARIANT (TREE_TYPE (t2)));
852 }
853 
854 /* Create emptry alias set entry.  */
855 
856 alias_set_entry *
init_alias_set_entry(alias_set_type set)857 init_alias_set_entry (alias_set_type set)
858 {
859   alias_set_entry *ase = ggc_alloc<alias_set_entry> ();
860   ase->alias_set = set;
861   ase->children = NULL;
862   ase->has_zero_child = false;
863   ase->is_pointer = false;
864   ase->has_pointer = false;
865   gcc_checking_assert (!get_alias_set_entry (set));
866   (*alias_sets)[set] = ase;
867   return ase;
868 }
869 
870 /* Return the alias set for T, which may be either a type or an
871    expression.  Call language-specific routine for help, if needed.  */
872 
873 alias_set_type
get_alias_set(tree t)874 get_alias_set (tree t)
875 {
876   alias_set_type set;
877 
878   /* We cannot give up with -fno-strict-aliasing because we need to build
879      proper type representations for possible functions which are built with
880      -fstrict-aliasing.  */
881 
882   /* return 0 if this or its type is an error.  */
883   if (t == error_mark_node
884       || (! TYPE_P (t)
885 	  && (TREE_TYPE (t) == 0 || TREE_TYPE (t) == error_mark_node)))
886     return 0;
887 
888   /* We can be passed either an expression or a type.  This and the
889      language-specific routine may make mutually-recursive calls to each other
890      to figure out what to do.  At each juncture, we see if this is a tree
891      that the language may need to handle specially.  First handle things that
892      aren't types.  */
893   if (! TYPE_P (t))
894     {
895       /* Give the language a chance to do something with this tree
896 	 before we look at it.  */
897       STRIP_NOPS (t);
898       set = lang_hooks.get_alias_set (t);
899       if (set != -1)
900 	return set;
901 
902       /* Get the alias pointer-type to use or the outermost object
903          that we could have a pointer to.  */
904       tree ptype = reference_alias_ptr_type_1 (&t);
905       if (ptype != NULL)
906 	return get_deref_alias_set (ptype);
907 
908       /* If we've already determined the alias set for a decl, just return
909 	 it.  This is necessary for C++ anonymous unions, whose component
910 	 variables don't look like union members (boo!).  */
911       if (VAR_P (t)
912 	  && DECL_RTL_SET_P (t) && MEM_P (DECL_RTL (t)))
913 	return MEM_ALIAS_SET (DECL_RTL (t));
914 
915       /* Now all we care about is the type.  */
916       t = TREE_TYPE (t);
917     }
918 
919   /* Variant qualifiers don't affect the alias set, so get the main
920      variant.  */
921   t = TYPE_MAIN_VARIANT (t);
922 
923   if (AGGREGATE_TYPE_P (t)
924       && TYPE_TYPELESS_STORAGE (t))
925     return 0;
926 
927   /* Always use the canonical type as well.  If this is a type that
928      requires structural comparisons to identify compatible types
929      use alias set zero.  */
930   if (TYPE_STRUCTURAL_EQUALITY_P (t))
931     {
932       /* Allow the language to specify another alias set for this
933 	 type.  */
934       set = lang_hooks.get_alias_set (t);
935       if (set != -1)
936 	return set;
937       /* Handle structure type equality for pointer types, arrays and vectors.
938 	 This is easy to do, because the code below ignores canonical types on
939 	 these anyway.  This is important for LTO, where TYPE_CANONICAL for
940 	 pointers cannot be meaningfully computed by the frontend.  */
941       if (canonical_type_used_p (t))
942 	{
943 	  /* In LTO we set canonical types for all types where it makes
944 	     sense to do so.  Double check we did not miss some type.  */
945 	  gcc_checking_assert (!in_lto_p || !type_with_alias_set_p (t));
946           return 0;
947 	}
948     }
949   else
950     {
951       t = TYPE_CANONICAL (t);
952       gcc_checking_assert (!TYPE_STRUCTURAL_EQUALITY_P (t));
953     }
954 
955   /* If this is a type with a known alias set, return it.  */
956   gcc_checking_assert (t == TYPE_MAIN_VARIANT (t));
957   if (TYPE_ALIAS_SET_KNOWN_P (t))
958     return TYPE_ALIAS_SET (t);
959 
960   /* We don't want to set TYPE_ALIAS_SET for incomplete types.  */
961   if (!COMPLETE_TYPE_P (t))
962     {
963       /* For arrays with unknown size the conservative answer is the
964 	 alias set of the element type.  */
965       if (TREE_CODE (t) == ARRAY_TYPE)
966 	return get_alias_set (TREE_TYPE (t));
967 
968       /* But return zero as a conservative answer for incomplete types.  */
969       return 0;
970     }
971 
972   /* See if the language has special handling for this type.  */
973   set = lang_hooks.get_alias_set (t);
974   if (set != -1)
975     return set;
976 
977   /* There are no objects of FUNCTION_TYPE, so there's no point in
978      using up an alias set for them.  (There are, of course, pointers
979      and references to functions, but that's different.)  */
980   else if (TREE_CODE (t) == FUNCTION_TYPE || TREE_CODE (t) == METHOD_TYPE)
981     set = 0;
982 
983   /* Unless the language specifies otherwise, let vector types alias
984      their components.  This avoids some nasty type punning issues in
985      normal usage.  And indeed lets vectors be treated more like an
986      array slice.  */
987   else if (TREE_CODE (t) == VECTOR_TYPE)
988     set = get_alias_set (TREE_TYPE (t));
989 
990   /* Unless the language specifies otherwise, treat array types the
991      same as their components.  This avoids the asymmetry we get
992      through recording the components.  Consider accessing a
993      character(kind=1) through a reference to a character(kind=1)[1:1].
994      Or consider if we want to assign integer(kind=4)[0:D.1387] and
995      integer(kind=4)[4] the same alias set or not.
996      Just be pragmatic here and make sure the array and its element
997      type get the same alias set assigned.  */
998   else if (TREE_CODE (t) == ARRAY_TYPE
999 	   && (!TYPE_NONALIASED_COMPONENT (t)
1000 	       || TYPE_STRUCTURAL_EQUALITY_P (t)))
1001     set = get_alias_set (TREE_TYPE (t));
1002 
1003   /* From the former common C and C++ langhook implementation:
1004 
1005      Unfortunately, there is no canonical form of a pointer type.
1006      In particular, if we have `typedef int I', then `int *', and
1007      `I *' are different types.  So, we have to pick a canonical
1008      representative.  We do this below.
1009 
1010      Technically, this approach is actually more conservative that
1011      it needs to be.  In particular, `const int *' and `int *'
1012      should be in different alias sets, according to the C and C++
1013      standard, since their types are not the same, and so,
1014      technically, an `int **' and `const int **' cannot point at
1015      the same thing.
1016 
1017      But, the standard is wrong.  In particular, this code is
1018      legal C++:
1019 
1020      int *ip;
1021      int **ipp = &ip;
1022      const int* const* cipp = ipp;
1023      And, it doesn't make sense for that to be legal unless you
1024      can dereference IPP and CIPP.  So, we ignore cv-qualifiers on
1025      the pointed-to types.  This issue has been reported to the
1026      C++ committee.
1027 
1028      For this reason go to canonical type of the unqalified pointer type.
1029      Until GCC 6 this code set all pointers sets to have alias set of
1030      ptr_type_node but that is a bad idea, because it prevents disabiguations
1031      in between pointers.  For Firefox this accounts about 20% of all
1032      disambiguations in the program.  */
1033   else if (POINTER_TYPE_P (t) && t != ptr_type_node)
1034     {
1035       tree p;
1036       auto_vec <bool, 8> reference;
1037 
1038       /* Unnest all pointers and references.
1039 	 We also want to make pointer to array/vector equivalent to pointer to
1040 	 its element (see the reasoning above). Skip all those types, too.  */
1041       for (p = t; POINTER_TYPE_P (p)
1042 	   || (TREE_CODE (p) == ARRAY_TYPE
1043 	       && (!TYPE_NONALIASED_COMPONENT (p)
1044 		   || !COMPLETE_TYPE_P (p)
1045 		   || TYPE_STRUCTURAL_EQUALITY_P (p)))
1046 	   || TREE_CODE (p) == VECTOR_TYPE;
1047 	   p = TREE_TYPE (p))
1048 	{
1049 	  /* Ada supports recursive pointers.  Instead of doing recursion
1050 	     check, just give up once the preallocated space of 8 elements
1051 	     is up.  In this case just punt to void * alias set.  */
1052 	  if (reference.length () == 8)
1053 	    {
1054 	      p = ptr_type_node;
1055 	      break;
1056 	    }
1057 	  if (TREE_CODE (p) == REFERENCE_TYPE)
1058 	    /* In LTO we want languages that use references to be compatible
1059  	       with languages that use pointers.  */
1060 	    reference.safe_push (true && !in_lto_p);
1061 	  if (TREE_CODE (p) == POINTER_TYPE)
1062 	    reference.safe_push (false);
1063 	}
1064       p = TYPE_MAIN_VARIANT (p);
1065 
1066       /* In LTO for C++ programs we can turn incomplete types to complete
1067 	 using ODR name lookup.  */
1068       if (in_lto_p && TYPE_STRUCTURAL_EQUALITY_P (p) && odr_type_p (p))
1069 	{
1070 	  p = prevailing_odr_type (p);
1071 	  gcc_checking_assert (TYPE_MAIN_VARIANT (p) == p);
1072 	}
1073 
1074       /* Make void * compatible with char * and also void **.
1075 	 Programs are commonly violating TBAA by this.
1076 
1077 	 We also make void * to conflict with every pointer
1078 	 (see record_component_aliases) and thus it is safe it to use it for
1079 	 pointers to types with TYPE_STRUCTURAL_EQUALITY_P.  */
1080       if (TREE_CODE (p) == VOID_TYPE || TYPE_STRUCTURAL_EQUALITY_P (p))
1081 	set = get_alias_set (ptr_type_node);
1082       else
1083 	{
1084 	  /* Rebuild pointer type starting from canonical types using
1085 	     unqualified pointers and references only.  This way all such
1086 	     pointers will have the same alias set and will conflict with
1087 	     each other.
1088 
1089 	     Most of time we already have pointers or references of a given type.
1090 	     If not we build new one just to be sure that if someone later
1091 	     (probably only middle-end can, as we should assign all alias
1092 	     classes only after finishing translation unit) builds the pointer
1093 	     type, the canonical type will match.  */
1094 	  p = TYPE_CANONICAL (p);
1095 	  while (!reference.is_empty ())
1096 	    {
1097 	      if (reference.pop ())
1098 		p = build_reference_type (p);
1099 	      else
1100 		p = build_pointer_type (p);
1101 	      gcc_checking_assert (p == TYPE_MAIN_VARIANT (p));
1102 	      /* build_pointer_type should always return the canonical type.
1103 		 For LTO TYPE_CANOINCAL may be NULL, because we do not compute
1104 		 them.  Be sure that frontends do not glob canonical types of
1105 		 pointers in unexpected way and that p == TYPE_CANONICAL (p)
1106 		 in all other cases.  */
1107 	      gcc_checking_assert (!TYPE_CANONICAL (p)
1108 				   || p == TYPE_CANONICAL (p));
1109 	    }
1110 
1111 	  /* Assign the alias set to both p and t.
1112 	     We cannot call get_alias_set (p) here as that would trigger
1113 	     infinite recursion when p == t.  In other cases it would just
1114 	     trigger unnecesary legwork of rebuilding the pointer again.  */
1115 	  gcc_checking_assert (p == TYPE_MAIN_VARIANT (p));
1116 	  if (TYPE_ALIAS_SET_KNOWN_P (p))
1117 	    set = TYPE_ALIAS_SET (p);
1118 	  else
1119 	    {
1120 	      set = new_alias_set ();
1121 	      TYPE_ALIAS_SET (p) = set;
1122 	    }
1123 	}
1124     }
1125   /* Alias set of ptr_type_node is special and serve as universal pointer which
1126      is TBAA compatible with every other pointer type.  Be sure we have the
1127      alias set built even for LTO which otherwise keeps all TYPE_CANONICAL
1128      of pointer types NULL.  */
1129   else if (t == ptr_type_node)
1130     set = new_alias_set ();
1131 
1132   /* Otherwise make a new alias set for this type.  */
1133   else
1134     {
1135       /* Each canonical type gets its own alias set, so canonical types
1136 	 shouldn't form a tree.  It doesn't really matter for types
1137 	 we handle specially above, so only check it where it possibly
1138 	 would result in a bogus alias set.  */
1139       gcc_checking_assert (TYPE_CANONICAL (t) == t);
1140 
1141       set = new_alias_set ();
1142     }
1143 
1144   TYPE_ALIAS_SET (t) = set;
1145 
1146   /* If this is an aggregate type or a complex type, we must record any
1147      component aliasing information.  */
1148   if (AGGREGATE_TYPE_P (t) || TREE_CODE (t) == COMPLEX_TYPE)
1149     record_component_aliases (t);
1150 
1151   /* We treat pointer types specially in alias_set_subset_of.  */
1152   if (POINTER_TYPE_P (t) && set)
1153     {
1154       alias_set_entry *ase = get_alias_set_entry (set);
1155       if (!ase)
1156 	ase = init_alias_set_entry (set);
1157       ase->is_pointer = true;
1158       ase->has_pointer = true;
1159     }
1160 
1161   return set;
1162 }
1163 
1164 /* Return a brand-new alias set.  */
1165 
1166 alias_set_type
new_alias_set(void)1167 new_alias_set (void)
1168 {
1169   if (alias_sets == 0)
1170     vec_safe_push (alias_sets, (alias_set_entry *) NULL);
1171   vec_safe_push (alias_sets, (alias_set_entry *) NULL);
1172   return alias_sets->length () - 1;
1173 }
1174 
1175 /* Indicate that things in SUBSET can alias things in SUPERSET, but that
1176    not everything that aliases SUPERSET also aliases SUBSET.  For example,
1177    in C, a store to an `int' can alias a load of a structure containing an
1178    `int', and vice versa.  But it can't alias a load of a 'double' member
1179    of the same structure.  Here, the structure would be the SUPERSET and
1180    `int' the SUBSET.  This relationship is also described in the comment at
1181    the beginning of this file.
1182 
1183    This function should be called only once per SUPERSET/SUBSET pair.
1184 
1185    It is illegal for SUPERSET to be zero; everything is implicitly a
1186    subset of alias set zero.  */
1187 
1188 void
record_alias_subset(alias_set_type superset,alias_set_type subset)1189 record_alias_subset (alias_set_type superset, alias_set_type subset)
1190 {
1191   alias_set_entry *superset_entry;
1192   alias_set_entry *subset_entry;
1193 
1194   /* It is possible in complex type situations for both sets to be the same,
1195      in which case we can ignore this operation.  */
1196   if (superset == subset)
1197     return;
1198 
1199   gcc_assert (superset);
1200 
1201   superset_entry = get_alias_set_entry (superset);
1202   if (superset_entry == 0)
1203     {
1204       /* Create an entry for the SUPERSET, so that we have a place to
1205 	 attach the SUBSET.  */
1206       superset_entry = init_alias_set_entry (superset);
1207     }
1208 
1209   if (subset == 0)
1210     superset_entry->has_zero_child = 1;
1211   else
1212     {
1213       if (!superset_entry->children)
1214 	superset_entry->children
1215 	  = hash_map<alias_set_hash, int>::create_ggc (64);
1216 
1217       /* Enter the SUBSET itself as a child of the SUPERSET.  If it was
1218 	 already there we're done.  */
1219       if (superset_entry->children->put (subset, 0))
1220 	return;
1221 
1222       subset_entry = get_alias_set_entry (subset);
1223       /* If there is an entry for the subset, enter all of its children
1224 	 (if they are not already present) as children of the SUPERSET.  */
1225       if (subset_entry)
1226 	{
1227 	  if (subset_entry->has_zero_child)
1228 	    superset_entry->has_zero_child = true;
1229           if (subset_entry->has_pointer)
1230 	    superset_entry->has_pointer = true;
1231 
1232 	  if (subset_entry->children)
1233 	    {
1234 	      hash_map<alias_set_hash, int>::iterator iter
1235 		= subset_entry->children->begin ();
1236 	      for (; iter != subset_entry->children->end (); ++iter)
1237 		superset_entry->children->put ((*iter).first, (*iter).second);
1238 	    }
1239 	}
1240     }
1241 }
1242 
1243 /* Record that component types of TYPE, if any, are part of SUPERSET for
1244    aliasing purposes.  For record types, we only record component types
1245    for fields that are not marked non-addressable.  For array types, we
1246    only record the component type if it is not marked non-aliased.  */
1247 
1248 void
record_component_aliases(tree type,alias_set_type superset)1249 record_component_aliases (tree type, alias_set_type superset)
1250 {
1251   tree field;
1252 
1253   if (superset == 0)
1254     return;
1255 
1256   switch (TREE_CODE (type))
1257     {
1258     case RECORD_TYPE:
1259     case UNION_TYPE:
1260     case QUAL_UNION_TYPE:
1261       {
1262 	/* LTO non-ODR type merging does not make any difference between
1263 	   component pointer types.  We may have
1264 
1265 	   struct foo {int *a;};
1266 
1267 	   as TYPE_CANONICAL of
1268 
1269 	   struct bar {float *a;};
1270 
1271 	   Because accesses to int * and float * do not alias, we would get
1272 	   false negative when accessing the same memory location by
1273 	   float ** and bar *. We thus record the canonical type as:
1274 
1275 	   struct {void *a;};
1276 
1277 	   void * is special cased and works as a universal pointer type.
1278 	   Accesses to it conflicts with accesses to any other pointer
1279 	   type.  */
1280 	bool void_pointers = in_lto_p
1281 			     && (!odr_type_p (type)
1282 				 || !odr_based_tbaa_p (type));
1283 	for (field = TYPE_FIELDS (type); field != 0; field = DECL_CHAIN (field))
1284 	  if (TREE_CODE (field) == FIELD_DECL && !DECL_NONADDRESSABLE_P (field))
1285 	    {
1286 	      tree t = TREE_TYPE (field);
1287 	      if (void_pointers)
1288 		{
1289 		  /* VECTOR_TYPE and ARRAY_TYPE share the alias set with their
1290 		     element type and that type has to be normalized to void *,
1291 		     too, in the case it is a pointer. */
1292 		  while (!canonical_type_used_p (t) && !POINTER_TYPE_P (t))
1293 		    {
1294 		      gcc_checking_assert (TYPE_STRUCTURAL_EQUALITY_P (t));
1295 		      t = TREE_TYPE (t);
1296 		    }
1297 		  if (POINTER_TYPE_P (t))
1298 		    t = ptr_type_node;
1299 		  else if (flag_checking)
1300 		    gcc_checking_assert (get_alias_set (t)
1301 					 == get_alias_set (TREE_TYPE (field)));
1302 		}
1303 
1304 	      alias_set_type set = get_alias_set (t);
1305 	      record_alias_subset (superset, set);
1306 	      /* If the field has alias-set zero make sure to still record
1307 		 any componets of it.  This makes sure that for
1308 		   struct A {
1309 		     struct B {
1310 		       int i;
1311 		       char c[4];
1312 		     } b;
1313 		   };
1314 		 in C++ even though 'B' has alias-set zero because
1315 		 TYPE_TYPELESS_STORAGE is set, 'A' has the alias-set of
1316 		 'int' as subset.  */
1317 	      if (set == 0)
1318 		record_component_aliases (t, superset);
1319 	    }
1320       }
1321       break;
1322 
1323     case COMPLEX_TYPE:
1324       record_alias_subset (superset, get_alias_set (TREE_TYPE (type)));
1325       break;
1326 
1327     /* VECTOR_TYPE and ARRAY_TYPE share the alias set with their
1328        element type.  */
1329 
1330     default:
1331       break;
1332     }
1333 }
1334 
1335 /* Record that component types of TYPE, if any, are part of that type for
1336    aliasing purposes.  For record types, we only record component types
1337    for fields that are not marked non-addressable.  For array types, we
1338    only record the component type if it is not marked non-aliased.  */
1339 
1340 void
record_component_aliases(tree type)1341 record_component_aliases (tree type)
1342 {
1343   alias_set_type superset = get_alias_set (type);
1344   record_component_aliases (type, superset);
1345 }
1346 
1347 
1348 /* Allocate an alias set for use in storing and reading from the varargs
1349    spill area.  */
1350 
1351 static GTY(()) alias_set_type varargs_set = -1;
1352 
1353 alias_set_type
get_varargs_alias_set(void)1354 get_varargs_alias_set (void)
1355 {
1356 #if 1
1357   /* We now lower VA_ARG_EXPR, and there's currently no way to attach the
1358      varargs alias set to an INDIRECT_REF (FIXME!), so we can't
1359      consistently use the varargs alias set for loads from the varargs
1360      area.  So don't use it anywhere.  */
1361   return 0;
1362 #else
1363   if (varargs_set == -1)
1364     varargs_set = new_alias_set ();
1365 
1366   return varargs_set;
1367 #endif
1368 }
1369 
1370 /* Likewise, but used for the fixed portions of the frame, e.g., register
1371    save areas.  */
1372 
1373 static GTY(()) alias_set_type frame_set = -1;
1374 
1375 alias_set_type
get_frame_alias_set(void)1376 get_frame_alias_set (void)
1377 {
1378   if (frame_set == -1)
1379     frame_set = new_alias_set ();
1380 
1381   return frame_set;
1382 }
1383 
1384 /* Create a new, unique base with id ID.  */
1385 
1386 static rtx
unique_base_value(HOST_WIDE_INT id)1387 unique_base_value (HOST_WIDE_INT id)
1388 {
1389   return gen_rtx_ADDRESS (Pmode, id);
1390 }
1391 
1392 /* Return true if accesses based on any other base value cannot alias
1393    those based on X.  */
1394 
1395 static bool
unique_base_value_p(rtx x)1396 unique_base_value_p (rtx x)
1397 {
1398   return GET_CODE (x) == ADDRESS && GET_MODE (x) == Pmode;
1399 }
1400 
1401 /* Return true if X is known to be a base value.  */
1402 
1403 static bool
known_base_value_p(rtx x)1404 known_base_value_p (rtx x)
1405 {
1406   switch (GET_CODE (x))
1407     {
1408     case LABEL_REF:
1409     case SYMBOL_REF:
1410       return true;
1411 
1412     case ADDRESS:
1413       /* Arguments may or may not be bases; we don't know for sure.  */
1414       return GET_MODE (x) != VOIDmode;
1415 
1416     default:
1417       return false;
1418     }
1419 }
1420 
1421 /* Inside SRC, the source of a SET, find a base address.  */
1422 
1423 static rtx
find_base_value(rtx src)1424 find_base_value (rtx src)
1425 {
1426   unsigned int regno;
1427   scalar_int_mode int_mode;
1428 
1429 #if defined (FIND_BASE_TERM)
1430   /* Try machine-dependent ways to find the base term.  */
1431   src = FIND_BASE_TERM (src);
1432 #endif
1433 
1434   switch (GET_CODE (src))
1435     {
1436     case SYMBOL_REF:
1437     case LABEL_REF:
1438       return src;
1439 
1440     case REG:
1441       regno = REGNO (src);
1442       /* At the start of a function, argument registers have known base
1443 	 values which may be lost later.  Returning an ADDRESS
1444 	 expression here allows optimization based on argument values
1445 	 even when the argument registers are used for other purposes.  */
1446       if (regno < FIRST_PSEUDO_REGISTER && copying_arguments)
1447 	return new_reg_base_value[regno];
1448 
1449       /* If a pseudo has a known base value, return it.  Do not do this
1450 	 for non-fixed hard regs since it can result in a circular
1451 	 dependency chain for registers which have values at function entry.
1452 
1453 	 The test above is not sufficient because the scheduler may move
1454 	 a copy out of an arg reg past the NOTE_INSN_FUNCTION_BEGIN.  */
1455       if ((regno >= FIRST_PSEUDO_REGISTER || fixed_regs[regno])
1456 	  && regno < vec_safe_length (reg_base_value))
1457 	{
1458 	  /* If we're inside init_alias_analysis, use new_reg_base_value
1459 	     to reduce the number of relaxation iterations.  */
1460 	  if (new_reg_base_value && new_reg_base_value[regno]
1461 	      && DF_REG_DEF_COUNT (regno) == 1)
1462 	    return new_reg_base_value[regno];
1463 
1464 	  if ((*reg_base_value)[regno])
1465 	    return (*reg_base_value)[regno];
1466 	}
1467 
1468       return 0;
1469 
1470     case MEM:
1471       /* Check for an argument passed in memory.  Only record in the
1472 	 copying-arguments block; it is too hard to track changes
1473 	 otherwise.  */
1474       if (copying_arguments
1475 	  && (XEXP (src, 0) == arg_pointer_rtx
1476 	      || (GET_CODE (XEXP (src, 0)) == PLUS
1477 		  && XEXP (XEXP (src, 0), 0) == arg_pointer_rtx)))
1478 	return arg_base_value;
1479       return 0;
1480 
1481     case CONST:
1482       src = XEXP (src, 0);
1483       if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS)
1484 	break;
1485 
1486       /* fall through */
1487 
1488     case PLUS:
1489     case MINUS:
1490       {
1491 	rtx temp, src_0 = XEXP (src, 0), src_1 = XEXP (src, 1);
1492 
1493 	/* If either operand is a REG that is a known pointer, then it
1494 	   is the base.  */
1495 	if (REG_P (src_0) && REG_POINTER (src_0))
1496 	  return find_base_value (src_0);
1497 	if (REG_P (src_1) && REG_POINTER (src_1))
1498 	  return find_base_value (src_1);
1499 
1500 	/* If either operand is a REG, then see if we already have
1501 	   a known value for it.  */
1502 	if (REG_P (src_0))
1503 	  {
1504 	    temp = find_base_value (src_0);
1505 	    if (temp != 0)
1506 	      src_0 = temp;
1507 	  }
1508 
1509 	if (REG_P (src_1))
1510 	  {
1511 	    temp = find_base_value (src_1);
1512 	    if (temp!= 0)
1513 	      src_1 = temp;
1514 	  }
1515 
1516 	/* If either base is named object or a special address
1517 	   (like an argument or stack reference), then use it for the
1518 	   base term.  */
1519 	if (src_0 != 0 && known_base_value_p (src_0))
1520 	  return src_0;
1521 
1522 	if (src_1 != 0 && known_base_value_p (src_1))
1523 	  return src_1;
1524 
1525 	/* Guess which operand is the base address:
1526 	   If either operand is a symbol, then it is the base.  If
1527 	   either operand is a CONST_INT, then the other is the base.  */
1528 	if (CONST_INT_P (src_1) || CONSTANT_P (src_0))
1529 	  return find_base_value (src_0);
1530 	else if (CONST_INT_P (src_0) || CONSTANT_P (src_1))
1531 	  return find_base_value (src_1);
1532 
1533 	return 0;
1534       }
1535 
1536     case LO_SUM:
1537       /* The standard form is (lo_sum reg sym) so look only at the
1538 	 second operand.  */
1539       return find_base_value (XEXP (src, 1));
1540 
1541     case AND:
1542       /* Look through aligning ANDs.  And AND with zero or one with
1543          the LSB set isn't one (see for example PR92462).  */
1544       if (CONST_INT_P (XEXP (src, 1))
1545 	  && INTVAL (XEXP (src, 1)) != 0
1546 	  && (INTVAL (XEXP (src, 1)) & 1) == 0)
1547 	return find_base_value (XEXP (src, 0));
1548       return 0;
1549 
1550     case TRUNCATE:
1551       /* As we do not know which address space the pointer is referring to, we can
1552 	 handle this only if the target does not support different pointer or
1553 	 address modes depending on the address space.  */
1554       if (!target_default_pointer_address_modes_p ())
1555 	break;
1556       if (!is_a <scalar_int_mode> (GET_MODE (src), &int_mode)
1557 	  || GET_MODE_PRECISION (int_mode) < GET_MODE_PRECISION (Pmode))
1558 	break;
1559       /* Fall through.  */
1560     case HIGH:
1561     case PRE_INC:
1562     case PRE_DEC:
1563     case POST_INC:
1564     case POST_DEC:
1565     case PRE_MODIFY:
1566     case POST_MODIFY:
1567       return find_base_value (XEXP (src, 0));
1568 
1569     case ZERO_EXTEND:
1570     case SIGN_EXTEND:	/* used for NT/Alpha pointers */
1571       /* As we do not know which address space the pointer is referring to, we can
1572 	 handle this only if the target does not support different pointer or
1573 	 address modes depending on the address space.  */
1574       if (!target_default_pointer_address_modes_p ())
1575 	break;
1576 
1577       {
1578 	rtx temp = find_base_value (XEXP (src, 0));
1579 
1580 	if (temp != 0 && CONSTANT_P (temp))
1581 	  temp = convert_memory_address (Pmode, temp);
1582 
1583 	return temp;
1584       }
1585 
1586     default:
1587       break;
1588     }
1589 
1590   return 0;
1591 }
1592 
1593 /* Called from init_alias_analysis indirectly through note_stores,
1594    or directly if DEST is a register with a REG_NOALIAS note attached.
1595    SET is null in the latter case.  */
1596 
1597 /* While scanning insns to find base values, reg_seen[N] is nonzero if
1598    register N has been set in this function.  */
1599 static sbitmap reg_seen;
1600 
1601 static void
record_set(rtx dest,const_rtx set,void * data ATTRIBUTE_UNUSED)1602 record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED)
1603 {
1604   unsigned regno;
1605   rtx src;
1606   int n;
1607 
1608   if (!REG_P (dest))
1609     return;
1610 
1611   regno = REGNO (dest);
1612 
1613   gcc_checking_assert (regno < reg_base_value->length ());
1614 
1615   n = REG_NREGS (dest);
1616   if (n != 1)
1617     {
1618       while (--n >= 0)
1619 	{
1620 	  bitmap_set_bit (reg_seen, regno + n);
1621 	  new_reg_base_value[regno + n] = 0;
1622 	}
1623       return;
1624     }
1625 
1626   if (set)
1627     {
1628       /* A CLOBBER wipes out any old value but does not prevent a previously
1629 	 unset register from acquiring a base address (i.e. reg_seen is not
1630 	 set).  */
1631       if (GET_CODE (set) == CLOBBER)
1632 	{
1633 	  new_reg_base_value[regno] = 0;
1634 	  return;
1635 	}
1636 
1637       src = SET_SRC (set);
1638     }
1639   else
1640     {
1641       /* There's a REG_NOALIAS note against DEST.  */
1642       if (bitmap_bit_p (reg_seen, regno))
1643 	{
1644 	  new_reg_base_value[regno] = 0;
1645 	  return;
1646 	}
1647       bitmap_set_bit (reg_seen, regno);
1648       new_reg_base_value[regno] = unique_base_value (unique_id++);
1649       return;
1650     }
1651 
1652   /* If this is not the first set of REGNO, see whether the new value
1653      is related to the old one.  There are two cases of interest:
1654 
1655 	(1) The register might be assigned an entirely new value
1656 	    that has the same base term as the original set.
1657 
1658 	(2) The set might be a simple self-modification that
1659 	    cannot change REGNO's base value.
1660 
1661      If neither case holds, reject the original base value as invalid.
1662      Note that the following situation is not detected:
1663 
1664 	 extern int x, y;  int *p = &x; p += (&y-&x);
1665 
1666      ANSI C does not allow computing the difference of addresses
1667      of distinct top level objects.  */
1668   if (new_reg_base_value[regno] != 0
1669       && find_base_value (src) != new_reg_base_value[regno])
1670     switch (GET_CODE (src))
1671       {
1672       case LO_SUM:
1673       case MINUS:
1674 	if (XEXP (src, 0) != dest && XEXP (src, 1) != dest)
1675 	  new_reg_base_value[regno] = 0;
1676 	break;
1677       case PLUS:
1678 	/* If the value we add in the PLUS is also a valid base value,
1679 	   this might be the actual base value, and the original value
1680 	   an index.  */
1681 	{
1682 	  rtx other = NULL_RTX;
1683 
1684 	  if (XEXP (src, 0) == dest)
1685 	    other = XEXP (src, 1);
1686 	  else if (XEXP (src, 1) == dest)
1687 	    other = XEXP (src, 0);
1688 
1689 	  if (! other || find_base_value (other))
1690 	    new_reg_base_value[regno] = 0;
1691 	  break;
1692 	}
1693       case AND:
1694 	if (XEXP (src, 0) != dest || !CONST_INT_P (XEXP (src, 1)))
1695 	  new_reg_base_value[regno] = 0;
1696 	break;
1697       default:
1698 	new_reg_base_value[regno] = 0;
1699 	break;
1700       }
1701   /* If this is the first set of a register, record the value.  */
1702   else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno])
1703 	   && ! bitmap_bit_p (reg_seen, regno) && new_reg_base_value[regno] == 0)
1704     new_reg_base_value[regno] = find_base_value (src);
1705 
1706   bitmap_set_bit (reg_seen, regno);
1707 }
1708 
1709 /* Return REG_BASE_VALUE for REGNO.  Selective scheduler uses this to avoid
1710    using hard registers with non-null REG_BASE_VALUE for renaming.  */
1711 rtx
get_reg_base_value(unsigned int regno)1712 get_reg_base_value (unsigned int regno)
1713 {
1714   return (*reg_base_value)[regno];
1715 }
1716 
1717 /* If a value is known for REGNO, return it.  */
1718 
1719 rtx
get_reg_known_value(unsigned int regno)1720 get_reg_known_value (unsigned int regno)
1721 {
1722   if (regno >= FIRST_PSEUDO_REGISTER)
1723     {
1724       regno -= FIRST_PSEUDO_REGISTER;
1725       if (regno < vec_safe_length (reg_known_value))
1726 	return (*reg_known_value)[regno];
1727     }
1728   return NULL;
1729 }
1730 
1731 /* Set it.  */
1732 
1733 static void
set_reg_known_value(unsigned int regno,rtx val)1734 set_reg_known_value (unsigned int regno, rtx val)
1735 {
1736   if (regno >= FIRST_PSEUDO_REGISTER)
1737     {
1738       regno -= FIRST_PSEUDO_REGISTER;
1739       if (regno < vec_safe_length (reg_known_value))
1740 	(*reg_known_value)[regno] = val;
1741     }
1742 }
1743 
1744 /* Similarly for reg_known_equiv_p.  */
1745 
1746 bool
get_reg_known_equiv_p(unsigned int regno)1747 get_reg_known_equiv_p (unsigned int regno)
1748 {
1749   if (regno >= FIRST_PSEUDO_REGISTER)
1750     {
1751       regno -= FIRST_PSEUDO_REGISTER;
1752       if (regno < vec_safe_length (reg_known_value))
1753 	return bitmap_bit_p (reg_known_equiv_p, regno);
1754     }
1755   return false;
1756 }
1757 
1758 static void
set_reg_known_equiv_p(unsigned int regno,bool val)1759 set_reg_known_equiv_p (unsigned int regno, bool val)
1760 {
1761   if (regno >= FIRST_PSEUDO_REGISTER)
1762     {
1763       regno -= FIRST_PSEUDO_REGISTER;
1764       if (regno < vec_safe_length (reg_known_value))
1765 	{
1766 	  if (val)
1767 	    bitmap_set_bit (reg_known_equiv_p, regno);
1768 	  else
1769 	    bitmap_clear_bit (reg_known_equiv_p, regno);
1770 	}
1771     }
1772 }
1773 
1774 
1775 /* Returns a canonical version of X, from the point of view alias
1776    analysis.  (For example, if X is a MEM whose address is a register,
1777    and the register has a known value (say a SYMBOL_REF), then a MEM
1778    whose address is the SYMBOL_REF is returned.)  */
1779 
1780 rtx
canon_rtx(rtx x)1781 canon_rtx (rtx x)
1782 {
1783   /* Recursively look for equivalences.  */
1784   if (REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1785     {
1786       rtx t = get_reg_known_value (REGNO (x));
1787       if (t == x)
1788 	return x;
1789       if (t)
1790 	return canon_rtx (t);
1791     }
1792 
1793   if (GET_CODE (x) == PLUS)
1794     {
1795       rtx x0 = canon_rtx (XEXP (x, 0));
1796       rtx x1 = canon_rtx (XEXP (x, 1));
1797 
1798       if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
1799 	return simplify_gen_binary (PLUS, GET_MODE (x), x0, x1);
1800     }
1801 
1802   /* This gives us much better alias analysis when called from
1803      the loop optimizer.   Note we want to leave the original
1804      MEM alone, but need to return the canonicalized MEM with
1805      all the flags with their original values.  */
1806   else if (MEM_P (x))
1807     x = replace_equiv_address_nv (x, canon_rtx (XEXP (x, 0)));
1808 
1809   return x;
1810 }
1811 
1812 /* Return 1 if X and Y are identical-looking rtx's.
1813    Expect that X and Y has been already canonicalized.
1814 
1815    We use the data in reg_known_value above to see if two registers with
1816    different numbers are, in fact, equivalent.  */
1817 
1818 static int
rtx_equal_for_memref_p(const_rtx x,const_rtx y)1819 rtx_equal_for_memref_p (const_rtx x, const_rtx y)
1820 {
1821   int i;
1822   int j;
1823   enum rtx_code code;
1824   const char *fmt;
1825 
1826   if (x == 0 && y == 0)
1827     return 1;
1828   if (x == 0 || y == 0)
1829     return 0;
1830 
1831   if (x == y)
1832     return 1;
1833 
1834   code = GET_CODE (x);
1835   /* Rtx's of different codes cannot be equal.  */
1836   if (code != GET_CODE (y))
1837     return 0;
1838 
1839   /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1840      (REG:SI x) and (REG:HI x) are NOT equivalent.  */
1841 
1842   if (GET_MODE (x) != GET_MODE (y))
1843     return 0;
1844 
1845   /* Some RTL can be compared without a recursive examination.  */
1846   switch (code)
1847     {
1848     case REG:
1849       return REGNO (x) == REGNO (y);
1850 
1851     case LABEL_REF:
1852       return label_ref_label (x) == label_ref_label (y);
1853 
1854     case SYMBOL_REF:
1855       {
1856 	HOST_WIDE_INT distance = 0;
1857 	return (compare_base_symbol_refs (x, y, &distance) == 1
1858 		&& distance == 0);
1859       }
1860 
1861     case ENTRY_VALUE:
1862       /* This is magic, don't go through canonicalization et al.  */
1863       return rtx_equal_p (ENTRY_VALUE_EXP (x), ENTRY_VALUE_EXP (y));
1864 
1865     case VALUE:
1866     CASE_CONST_UNIQUE:
1867       /* Pointer equality guarantees equality for these nodes.  */
1868       return 0;
1869 
1870     default:
1871       break;
1872     }
1873 
1874   /* canon_rtx knows how to handle plus.  No need to canonicalize.  */
1875   if (code == PLUS)
1876     return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
1877 	     && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)))
1878 	    || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1))
1879 		&& rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0))));
1880   /* For commutative operations, the RTX match if the operand match in any
1881      order.  Also handle the simple binary and unary cases without a loop.  */
1882   if (COMMUTATIVE_P (x))
1883     {
1884       rtx xop0 = canon_rtx (XEXP (x, 0));
1885       rtx yop0 = canon_rtx (XEXP (y, 0));
1886       rtx yop1 = canon_rtx (XEXP (y, 1));
1887 
1888       return ((rtx_equal_for_memref_p (xop0, yop0)
1889 	       && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop1))
1890 	      || (rtx_equal_for_memref_p (xop0, yop1)
1891 		  && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop0)));
1892     }
1893   else if (NON_COMMUTATIVE_P (x))
1894     {
1895       return (rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1896 				      canon_rtx (XEXP (y, 0)))
1897 	      && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)),
1898 					 canon_rtx (XEXP (y, 1))));
1899     }
1900   else if (UNARY_P (x))
1901     return rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1902 				   canon_rtx (XEXP (y, 0)));
1903 
1904   /* Compare the elements.  If any pair of corresponding elements
1905      fail to match, return 0 for the whole things.
1906 
1907      Limit cases to types which actually appear in addresses.  */
1908 
1909   fmt = GET_RTX_FORMAT (code);
1910   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1911     {
1912       switch (fmt[i])
1913 	{
1914 	case 'i':
1915 	  if (XINT (x, i) != XINT (y, i))
1916 	    return 0;
1917 	  break;
1918 
1919 	case 'p':
1920 	  if (maybe_ne (SUBREG_BYTE (x), SUBREG_BYTE (y)))
1921 	    return 0;
1922 	  break;
1923 
1924 	case 'E':
1925 	  /* Two vectors must have the same length.  */
1926 	  if (XVECLEN (x, i) != XVECLEN (y, i))
1927 	    return 0;
1928 
1929 	  /* And the corresponding elements must match.  */
1930 	  for (j = 0; j < XVECLEN (x, i); j++)
1931 	    if (rtx_equal_for_memref_p (canon_rtx (XVECEXP (x, i, j)),
1932 					canon_rtx (XVECEXP (y, i, j))) == 0)
1933 	      return 0;
1934 	  break;
1935 
1936 	case 'e':
1937 	  if (rtx_equal_for_memref_p (canon_rtx (XEXP (x, i)),
1938 				      canon_rtx (XEXP (y, i))) == 0)
1939 	    return 0;
1940 	  break;
1941 
1942 	  /* This can happen for asm operands.  */
1943 	case 's':
1944 	  if (strcmp (XSTR (x, i), XSTR (y, i)))
1945 	    return 0;
1946 	  break;
1947 
1948 	/* This can happen for an asm which clobbers memory.  */
1949 	case '0':
1950 	  break;
1951 
1952 	  /* It is believed that rtx's at this level will never
1953 	     contain anything but integers and other rtx's,
1954 	     except for within LABEL_REFs and SYMBOL_REFs.  */
1955 	default:
1956 	  gcc_unreachable ();
1957 	}
1958     }
1959   return 1;
1960 }
1961 
1962 static rtx
find_base_term(rtx x,vec<std::pair<cselib_val *,struct elt_loc_list * >> & visited_vals)1963 find_base_term (rtx x, vec<std::pair<cselib_val *,
1964 				     struct elt_loc_list *> > &visited_vals)
1965 {
1966   cselib_val *val;
1967   struct elt_loc_list *l, *f;
1968   rtx ret;
1969   scalar_int_mode int_mode;
1970 
1971 #if defined (FIND_BASE_TERM)
1972   /* Try machine-dependent ways to find the base term.  */
1973   x = FIND_BASE_TERM (x);
1974 #endif
1975 
1976   switch (GET_CODE (x))
1977     {
1978     case REG:
1979       return REG_BASE_VALUE (x);
1980 
1981     case TRUNCATE:
1982       /* As we do not know which address space the pointer is referring to, we can
1983 	 handle this only if the target does not support different pointer or
1984 	 address modes depending on the address space.  */
1985       if (!target_default_pointer_address_modes_p ())
1986 	return 0;
1987       if (!is_a <scalar_int_mode> (GET_MODE (x), &int_mode)
1988 	  || GET_MODE_PRECISION (int_mode) < GET_MODE_PRECISION (Pmode))
1989 	return 0;
1990       /* Fall through.  */
1991     case HIGH:
1992     case PRE_INC:
1993     case PRE_DEC:
1994     case POST_INC:
1995     case POST_DEC:
1996     case PRE_MODIFY:
1997     case POST_MODIFY:
1998       return find_base_term (XEXP (x, 0), visited_vals);
1999 
2000     case ZERO_EXTEND:
2001     case SIGN_EXTEND:	/* Used for Alpha/NT pointers */
2002       /* As we do not know which address space the pointer is referring to, we can
2003 	 handle this only if the target does not support different pointer or
2004 	 address modes depending on the address space.  */
2005       if (!target_default_pointer_address_modes_p ())
2006 	return 0;
2007 
2008       {
2009 	rtx temp = find_base_term (XEXP (x, 0), visited_vals);
2010 
2011 	if (temp != 0 && CONSTANT_P (temp))
2012 	  temp = convert_memory_address (Pmode, temp);
2013 
2014 	return temp;
2015       }
2016 
2017     case VALUE:
2018       val = CSELIB_VAL_PTR (x);
2019       ret = NULL_RTX;
2020 
2021       if (!val)
2022 	return ret;
2023 
2024       if (cselib_sp_based_value_p (val))
2025 	return static_reg_base_value[STACK_POINTER_REGNUM];
2026 
2027       if (visited_vals.length () > (unsigned) param_max_find_base_term_values)
2028 	return ret;
2029 
2030       f = val->locs;
2031       /* Reset val->locs to avoid infinite recursion.  */
2032       if (f)
2033 	visited_vals.safe_push (std::make_pair (val, f));
2034       val->locs = NULL;
2035 
2036       for (l = f; l; l = l->next)
2037 	if (GET_CODE (l->loc) == VALUE
2038 	    && CSELIB_VAL_PTR (l->loc)->locs
2039 	    && !CSELIB_VAL_PTR (l->loc)->locs->next
2040 	    && CSELIB_VAL_PTR (l->loc)->locs->loc == x)
2041 	  continue;
2042 	else if ((ret = find_base_term (l->loc, visited_vals)) != 0)
2043 	  break;
2044 
2045       return ret;
2046 
2047     case LO_SUM:
2048       /* The standard form is (lo_sum reg sym) so look only at the
2049          second operand.  */
2050       return find_base_term (XEXP (x, 1), visited_vals);
2051 
2052     case CONST:
2053       x = XEXP (x, 0);
2054       if (GET_CODE (x) != PLUS && GET_CODE (x) != MINUS)
2055 	return 0;
2056       /* Fall through.  */
2057     case PLUS:
2058     case MINUS:
2059       {
2060 	rtx tmp1 = XEXP (x, 0);
2061 	rtx tmp2 = XEXP (x, 1);
2062 
2063 	/* This is a little bit tricky since we have to determine which of
2064 	   the two operands represents the real base address.  Otherwise this
2065 	   routine may return the index register instead of the base register.
2066 
2067 	   That may cause us to believe no aliasing was possible, when in
2068 	   fact aliasing is possible.
2069 
2070 	   We use a few simple tests to guess the base register.  Additional
2071 	   tests can certainly be added.  For example, if one of the operands
2072 	   is a shift or multiply, then it must be the index register and the
2073 	   other operand is the base register.  */
2074 
2075 	if (tmp1 == pic_offset_table_rtx && CONSTANT_P (tmp2))
2076 	  return find_base_term (tmp2, visited_vals);
2077 
2078 	/* If either operand is known to be a pointer, then prefer it
2079 	   to determine the base term.  */
2080 	if (REG_P (tmp1) && REG_POINTER (tmp1))
2081 	  ;
2082 	else if (REG_P (tmp2) && REG_POINTER (tmp2))
2083 	  std::swap (tmp1, tmp2);
2084 	/* If second argument is constant which has base term, prefer it
2085 	   over variable tmp1.  See PR64025.  */
2086 	else if (CONSTANT_P (tmp2) && !CONST_INT_P (tmp2))
2087 	  std::swap (tmp1, tmp2);
2088 
2089 	/* Go ahead and find the base term for both operands.  If either base
2090 	   term is from a pointer or is a named object or a special address
2091 	   (like an argument or stack reference), then use it for the
2092 	   base term.  */
2093 	rtx base = find_base_term (tmp1, visited_vals);
2094 	if (base != NULL_RTX
2095 	    && ((REG_P (tmp1) && REG_POINTER (tmp1))
2096 		 || known_base_value_p (base)))
2097 	  return base;
2098 	base = find_base_term (tmp2, visited_vals);
2099 	if (base != NULL_RTX
2100 	    && ((REG_P (tmp2) && REG_POINTER (tmp2))
2101 		 || known_base_value_p (base)))
2102 	  return base;
2103 
2104 	/* We could not determine which of the two operands was the
2105 	   base register and which was the index.  So we can determine
2106 	   nothing from the base alias check.  */
2107 	return 0;
2108       }
2109 
2110     case AND:
2111       /* Look through aligning ANDs.  And AND with zero or one with
2112          the LSB set isn't one (see for example PR92462).  */
2113       if (CONST_INT_P (XEXP (x, 1))
2114 	  && INTVAL (XEXP (x, 1)) != 0
2115 	  && (INTVAL (XEXP (x, 1)) & 1) == 0)
2116 	return find_base_term (XEXP (x, 0), visited_vals);
2117       return 0;
2118 
2119     case SYMBOL_REF:
2120     case LABEL_REF:
2121       return x;
2122 
2123     default:
2124       return 0;
2125     }
2126 }
2127 
2128 /* Wrapper around the worker above which removes locs from visited VALUEs
2129    to avoid visiting them multiple times.  We unwind that changes here.  */
2130 
2131 static rtx
find_base_term(rtx x)2132 find_base_term (rtx x)
2133 {
2134   auto_vec<std::pair<cselib_val *, struct elt_loc_list *>, 32> visited_vals;
2135   rtx res = find_base_term (x, visited_vals);
2136   for (unsigned i = 0; i < visited_vals.length (); ++i)
2137     visited_vals[i].first->locs = visited_vals[i].second;
2138   return res;
2139 }
2140 
2141 /* Return true if accesses to address X may alias accesses based
2142    on the stack pointer.  */
2143 
2144 bool
may_be_sp_based_p(rtx x)2145 may_be_sp_based_p (rtx x)
2146 {
2147   rtx base = find_base_term (x);
2148   return !base || base == static_reg_base_value[STACK_POINTER_REGNUM];
2149 }
2150 
2151 /* BASE1 and BASE2 are decls.  Return 1 if they refer to same object, 0
2152    if they refer to different objects and -1 if we cannot decide.  */
2153 
2154 int
compare_base_decls(tree base1,tree base2)2155 compare_base_decls (tree base1, tree base2)
2156 {
2157   int ret;
2158   gcc_checking_assert (DECL_P (base1) && DECL_P (base2));
2159   if (base1 == base2)
2160     return 1;
2161 
2162   /* If we have two register decls with register specification we
2163      cannot decide unless their assembler names are the same.  */
2164   if (VAR_P (base1)
2165       && VAR_P (base2)
2166       && DECL_HARD_REGISTER (base1)
2167       && DECL_HARD_REGISTER (base2)
2168       && DECL_ASSEMBLER_NAME_SET_P (base1)
2169       && DECL_ASSEMBLER_NAME_SET_P (base2))
2170     {
2171       if (DECL_ASSEMBLER_NAME_RAW (base1) == DECL_ASSEMBLER_NAME_RAW (base2))
2172 	return 1;
2173       return -1;
2174     }
2175 
2176   /* Declarations of non-automatic variables may have aliases.  All other
2177      decls are unique.  */
2178   if (!decl_in_symtab_p (base1)
2179       || !decl_in_symtab_p (base2))
2180     return 0;
2181 
2182   /* Don't cause symbols to be inserted by the act of checking.  */
2183   symtab_node *node1 = symtab_node::get (base1);
2184   if (!node1)
2185     return 0;
2186   symtab_node *node2 = symtab_node::get (base2);
2187   if (!node2)
2188     return 0;
2189 
2190   ret = node1->equal_address_to (node2, true);
2191   return ret;
2192 }
2193 
2194 /* Compare SYMBOL_REFs X_BASE and Y_BASE.
2195 
2196    - Return 1 if Y_BASE - X_BASE is constant, adding that constant
2197      to *DISTANCE if DISTANCE is nonnull.
2198 
2199    - Return 0 if no accesses based on X_BASE can alias Y_BASE.
2200 
2201    - Return -1 if one of the two results applies, but we can't tell
2202      which at compile time.  Update DISTANCE in the same way as
2203      for a return value of 1, for the case in which that holds.  */
2204 
2205 static int
compare_base_symbol_refs(const_rtx x_base,const_rtx y_base,HOST_WIDE_INT * distance)2206 compare_base_symbol_refs (const_rtx x_base, const_rtx y_base,
2207 			  HOST_WIDE_INT *distance)
2208 {
2209   tree x_decl = SYMBOL_REF_DECL (x_base);
2210   tree y_decl = SYMBOL_REF_DECL (y_base);
2211   bool binds_def = true;
2212   bool swap = false;
2213 
2214   if (XSTR (x_base, 0) == XSTR (y_base, 0))
2215     return 1;
2216   if (x_decl && y_decl)
2217     return compare_base_decls (x_decl, y_decl);
2218   if (x_decl || y_decl)
2219     {
2220       if (!x_decl)
2221 	{
2222 	  swap = true;
2223 	  std::swap (x_decl, y_decl);
2224 	  std::swap (x_base, y_base);
2225 	}
2226       /* We handle specially only section anchors.  Other symbols are
2227 	 either equal (via aliasing) or refer to different objects.  */
2228       if (!SYMBOL_REF_HAS_BLOCK_INFO_P (y_base))
2229         return -1;
2230       /* Anchors contains static VAR_DECLs and CONST_DECLs.  We are safe
2231 	 to ignore CONST_DECLs because they are readonly.  */
2232       if (!VAR_P (x_decl)
2233 	  || (!TREE_STATIC (x_decl) && !TREE_PUBLIC (x_decl)))
2234 	return 0;
2235 
2236       symtab_node *x_node = symtab_node::get_create (x_decl)
2237 			    ->ultimate_alias_target ();
2238       /* External variable cannot be in section anchor.  */
2239       if (!x_node->definition)
2240 	return 0;
2241       x_base = XEXP (DECL_RTL (x_node->decl), 0);
2242       /* If not in anchor, we can disambiguate.  */
2243       if (!SYMBOL_REF_HAS_BLOCK_INFO_P (x_base))
2244 	return 0;
2245 
2246       /* We have an alias of anchored variable.  If it can be interposed;
2247  	 we must assume it may or may not alias its anchor.  */
2248       binds_def = decl_binds_to_current_def_p (x_decl);
2249     }
2250   /* If we have variable in section anchor, we can compare by offset.  */
2251   if (SYMBOL_REF_HAS_BLOCK_INFO_P (x_base)
2252       && SYMBOL_REF_HAS_BLOCK_INFO_P (y_base))
2253     {
2254       if (SYMBOL_REF_BLOCK (x_base) != SYMBOL_REF_BLOCK (y_base))
2255 	return 0;
2256       if (distance)
2257 	*distance += (swap ? -1 : 1) * (SYMBOL_REF_BLOCK_OFFSET (y_base)
2258 					- SYMBOL_REF_BLOCK_OFFSET (x_base));
2259       return binds_def ? 1 : -1;
2260     }
2261   /* Either the symbols are equal (via aliasing) or they refer to
2262      different objects.  */
2263   return -1;
2264 }
2265 
2266 /* Return 0 if the addresses X and Y are known to point to different
2267    objects, 1 if they might be pointers to the same object.  */
2268 
2269 static int
base_alias_check(rtx x,rtx x_base,rtx y,rtx y_base,machine_mode x_mode,machine_mode y_mode)2270 base_alias_check (rtx x, rtx x_base, rtx y, rtx y_base,
2271 		  machine_mode x_mode, machine_mode y_mode)
2272 {
2273   /* If the address itself has no known base see if a known equivalent
2274      value has one.  If either address still has no known base, nothing
2275      is known about aliasing.  */
2276   if (x_base == 0)
2277     {
2278       rtx x_c;
2279 
2280       if (! flag_expensive_optimizations || (x_c = canon_rtx (x)) == x)
2281 	return 1;
2282 
2283       x_base = find_base_term (x_c);
2284       if (x_base == 0)
2285 	return 1;
2286     }
2287 
2288   if (y_base == 0)
2289     {
2290       rtx y_c;
2291       if (! flag_expensive_optimizations || (y_c = canon_rtx (y)) == y)
2292 	return 1;
2293 
2294       y_base = find_base_term (y_c);
2295       if (y_base == 0)
2296 	return 1;
2297     }
2298 
2299   /* If the base addresses are equal nothing is known about aliasing.  */
2300   if (rtx_equal_p (x_base, y_base))
2301     return 1;
2302 
2303   /* The base addresses are different expressions.  If they are not accessed
2304      via AND, there is no conflict.  We can bring knowledge of object
2305      alignment into play here.  For example, on alpha, "char a, b;" can
2306      alias one another, though "char a; long b;" cannot.  AND addresses may
2307      implicitly alias surrounding objects; i.e. unaligned access in DImode
2308      via AND address can alias all surrounding object types except those
2309      with aligment 8 or higher.  */
2310   if (GET_CODE (x) == AND && GET_CODE (y) == AND)
2311     return 1;
2312   if (GET_CODE (x) == AND
2313       && (!CONST_INT_P (XEXP (x, 1))
2314 	  || (int) GET_MODE_UNIT_SIZE (y_mode) < -INTVAL (XEXP (x, 1))))
2315     return 1;
2316   if (GET_CODE (y) == AND
2317       && (!CONST_INT_P (XEXP (y, 1))
2318 	  || (int) GET_MODE_UNIT_SIZE (x_mode) < -INTVAL (XEXP (y, 1))))
2319     return 1;
2320 
2321   /* Differing symbols not accessed via AND never alias.  */
2322   if (GET_CODE (x_base) == SYMBOL_REF && GET_CODE (y_base) == SYMBOL_REF)
2323     return compare_base_symbol_refs (x_base, y_base) != 0;
2324 
2325   if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS)
2326     return 0;
2327 
2328   if (unique_base_value_p (x_base) || unique_base_value_p (y_base))
2329     return 0;
2330 
2331   return 1;
2332 }
2333 
2334 /* Return TRUE if EXPR refers to a VALUE whose uid is greater than
2335    (or equal to) that of V.  */
2336 
2337 static bool
refs_newer_value_p(const_rtx expr,rtx v)2338 refs_newer_value_p (const_rtx expr, rtx v)
2339 {
2340   int minuid = CSELIB_VAL_PTR (v)->uid;
2341   subrtx_iterator::array_type array;
2342   FOR_EACH_SUBRTX (iter, array, expr, NONCONST)
2343     if (GET_CODE (*iter) == VALUE && CSELIB_VAL_PTR (*iter)->uid >= minuid)
2344       return true;
2345   return false;
2346 }
2347 
2348 /* Convert the address X into something we can use.  This is done by returning
2349    it unchanged unless it is a VALUE or VALUE +/- constant; for VALUE
2350    we call cselib to get a more useful rtx.  */
2351 
2352 rtx
get_addr(rtx x)2353 get_addr (rtx x)
2354 {
2355   cselib_val *v;
2356   struct elt_loc_list *l;
2357 
2358   if (GET_CODE (x) != VALUE)
2359     {
2360       if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2361 	  && GET_CODE (XEXP (x, 0)) == VALUE
2362 	  && CONST_SCALAR_INT_P (XEXP (x, 1)))
2363 	{
2364 	  rtx op0 = get_addr (XEXP (x, 0));
2365 	  if (op0 != XEXP (x, 0))
2366 	    {
2367 	      poly_int64 c;
2368 	      if (GET_CODE (x) == PLUS
2369 		  && poly_int_rtx_p (XEXP (x, 1), &c))
2370 		return plus_constant (GET_MODE (x), op0, c);
2371 	      return simplify_gen_binary (GET_CODE (x), GET_MODE (x),
2372 					  op0, XEXP (x, 1));
2373 	    }
2374 	}
2375       return x;
2376     }
2377   v = CSELIB_VAL_PTR (x);
2378   if (v)
2379     {
2380       bool have_equivs = cselib_have_permanent_equivalences ();
2381       if (have_equivs)
2382 	v = canonical_cselib_val (v);
2383       for (l = v->locs; l; l = l->next)
2384 	if (CONSTANT_P (l->loc))
2385 	  return l->loc;
2386       for (l = v->locs; l; l = l->next)
2387 	if (!REG_P (l->loc) && !MEM_P (l->loc)
2388 	    /* Avoid infinite recursion when potentially dealing with
2389 	       var-tracking artificial equivalences, by skipping the
2390 	       equivalences themselves, and not choosing expressions
2391 	       that refer to newer VALUEs.  */
2392 	    && (!have_equivs
2393 		|| (GET_CODE (l->loc) != VALUE
2394 		    && !refs_newer_value_p (l->loc, x))))
2395 	  return l->loc;
2396       if (have_equivs)
2397 	{
2398 	  for (l = v->locs; l; l = l->next)
2399 	    if (REG_P (l->loc)
2400 		|| (GET_CODE (l->loc) != VALUE
2401 		    && !refs_newer_value_p (l->loc, x)))
2402 	      return l->loc;
2403 	  /* Return the canonical value.  */
2404 	  return v->val_rtx;
2405 	}
2406       if (v->locs)
2407 	return v->locs->loc;
2408     }
2409   return x;
2410 }
2411 
2412 /*  Return the address of the (N_REFS + 1)th memory reference to ADDR
2413     where SIZE is the size in bytes of the memory reference.  If ADDR
2414     is not modified by the memory reference then ADDR is returned.  */
2415 
2416 static rtx
addr_side_effect_eval(rtx addr,poly_int64 size,int n_refs)2417 addr_side_effect_eval (rtx addr, poly_int64 size, int n_refs)
2418 {
2419   poly_int64 offset = 0;
2420 
2421   switch (GET_CODE (addr))
2422     {
2423     case PRE_INC:
2424       offset = (n_refs + 1) * size;
2425       break;
2426     case PRE_DEC:
2427       offset = -(n_refs + 1) * size;
2428       break;
2429     case POST_INC:
2430       offset = n_refs * size;
2431       break;
2432     case POST_DEC:
2433       offset = -n_refs * size;
2434       break;
2435 
2436     default:
2437       return addr;
2438     }
2439 
2440   addr = plus_constant (GET_MODE (addr), XEXP (addr, 0), offset);
2441   addr = canon_rtx (addr);
2442 
2443   return addr;
2444 }
2445 
2446 /* Return TRUE if an object X sized at XSIZE bytes and another object
2447    Y sized at YSIZE bytes, starting C bytes after X, may overlap.  If
2448    any of the sizes is zero, assume an overlap, otherwise use the
2449    absolute value of the sizes as the actual sizes.  */
2450 
2451 static inline bool
offset_overlap_p(poly_int64 c,poly_int64 xsize,poly_int64 ysize)2452 offset_overlap_p (poly_int64 c, poly_int64 xsize, poly_int64 ysize)
2453 {
2454   if (known_eq (xsize, 0) || known_eq (ysize, 0))
2455     return true;
2456 
2457   if (maybe_ge (c, 0))
2458     return maybe_gt (maybe_lt (xsize, 0) ? -xsize : xsize, c);
2459   else
2460     return maybe_gt (maybe_lt (ysize, 0) ? -ysize : ysize, -c);
2461 }
2462 
2463 /* Return one if X and Y (memory addresses) reference the
2464    same location in memory or if the references overlap.
2465    Return zero if they do not overlap, else return
2466    minus one in which case they still might reference the same location.
2467 
2468    C is an offset accumulator.  When
2469    C is nonzero, we are testing aliases between X and Y + C.
2470    XSIZE is the size in bytes of the X reference,
2471    similarly YSIZE is the size in bytes for Y.
2472    Expect that canon_rtx has been already called for X and Y.
2473 
2474    If XSIZE or YSIZE is zero, we do not know the amount of memory being
2475    referenced (the reference was BLKmode), so make the most pessimistic
2476    assumptions.
2477 
2478    If XSIZE or YSIZE is negative, we may access memory outside the object
2479    being referenced as a side effect.  This can happen when using AND to
2480    align memory references, as is done on the Alpha.
2481 
2482    Nice to notice that varying addresses cannot conflict with fp if no
2483    local variables had their addresses taken, but that's too hard now.
2484 
2485    ???  Contrary to the tree alias oracle this does not return
2486    one for X + non-constant and Y + non-constant when X and Y are equal.
2487    If that is fixed the TBAA hack for union type-punning can be removed.  */
2488 
2489 static int
memrefs_conflict_p(poly_int64 xsize,rtx x,poly_int64 ysize,rtx y,poly_int64 c)2490 memrefs_conflict_p (poly_int64 xsize, rtx x, poly_int64 ysize, rtx y,
2491 		    poly_int64 c)
2492 {
2493   if (GET_CODE (x) == VALUE)
2494     {
2495       if (REG_P (y))
2496 	{
2497 	  struct elt_loc_list *l = NULL;
2498 	  if (CSELIB_VAL_PTR (x))
2499 	    for (l = canonical_cselib_val (CSELIB_VAL_PTR (x))->locs;
2500 		 l; l = l->next)
2501 	      if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, y))
2502 		break;
2503 	  if (l)
2504 	    x = y;
2505 	  else
2506 	    x = get_addr (x);
2507 	}
2508       /* Don't call get_addr if y is the same VALUE.  */
2509       else if (x != y)
2510 	x = get_addr (x);
2511     }
2512   if (GET_CODE (y) == VALUE)
2513     {
2514       if (REG_P (x))
2515 	{
2516 	  struct elt_loc_list *l = NULL;
2517 	  if (CSELIB_VAL_PTR (y))
2518 	    for (l = canonical_cselib_val (CSELIB_VAL_PTR (y))->locs;
2519 		 l; l = l->next)
2520 	      if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, x))
2521 		break;
2522 	  if (l)
2523 	    y = x;
2524 	  else
2525 	    y = get_addr (y);
2526 	}
2527       /* Don't call get_addr if x is the same VALUE.  */
2528       else if (y != x)
2529 	y = get_addr (y);
2530     }
2531   if (GET_CODE (x) == HIGH)
2532     x = XEXP (x, 0);
2533   else if (GET_CODE (x) == LO_SUM)
2534     x = XEXP (x, 1);
2535   else
2536     x = addr_side_effect_eval (x, maybe_lt (xsize, 0) ? -xsize : xsize, 0);
2537   if (GET_CODE (y) == HIGH)
2538     y = XEXP (y, 0);
2539   else if (GET_CODE (y) == LO_SUM)
2540     y = XEXP (y, 1);
2541   else
2542     y = addr_side_effect_eval (y, maybe_lt (ysize, 0) ? -ysize : ysize, 0);
2543 
2544   if (GET_CODE (x) == SYMBOL_REF && GET_CODE (y) == SYMBOL_REF)
2545     {
2546       HOST_WIDE_INT distance = 0;
2547       int cmp = compare_base_symbol_refs (x, y, &distance);
2548 
2549       /* If both decls are the same, decide by offsets.  */
2550       if (cmp == 1)
2551 	return offset_overlap_p (c + distance, xsize, ysize);
2552       /* Assume a potential overlap for symbolic addresses that went
2553 	 through alignment adjustments (i.e., that have negative
2554 	 sizes), because we can't know how far they are from each
2555 	 other.  */
2556       if (maybe_lt (xsize, 0) || maybe_lt (ysize, 0))
2557 	return -1;
2558       /* If decls are different or we know by offsets that there is no overlap,
2559 	 we win.  */
2560       if (!cmp || !offset_overlap_p (c + distance, xsize, ysize))
2561 	return 0;
2562       /* Decls may or may not be different and offsets overlap....*/
2563       return -1;
2564     }
2565   else if (rtx_equal_for_memref_p (x, y))
2566     {
2567       return offset_overlap_p (c, xsize, ysize);
2568     }
2569 
2570   /* This code used to check for conflicts involving stack references and
2571      globals but the base address alias code now handles these cases.  */
2572 
2573   if (GET_CODE (x) == PLUS)
2574     {
2575       /* The fact that X is canonicalized means that this
2576 	 PLUS rtx is canonicalized.  */
2577       rtx x0 = XEXP (x, 0);
2578       rtx x1 = XEXP (x, 1);
2579 
2580       /* However, VALUEs might end up in different positions even in
2581 	 canonical PLUSes.  Comparing their addresses is enough.  */
2582       if (x0 == y)
2583 	return memrefs_conflict_p (xsize, x1, ysize, const0_rtx, c);
2584       else if (x1 == y)
2585 	return memrefs_conflict_p (xsize, x0, ysize, const0_rtx, c);
2586 
2587       poly_int64 cx1, cy1;
2588       if (GET_CODE (y) == PLUS)
2589 	{
2590 	  /* The fact that Y is canonicalized means that this
2591 	     PLUS rtx is canonicalized.  */
2592 	  rtx y0 = XEXP (y, 0);
2593 	  rtx y1 = XEXP (y, 1);
2594 
2595 	  if (x0 == y1)
2596 	    return memrefs_conflict_p (xsize, x1, ysize, y0, c);
2597 	  if (x1 == y0)
2598 	    return memrefs_conflict_p (xsize, x0, ysize, y1, c);
2599 
2600 	  if (rtx_equal_for_memref_p (x1, y1))
2601 	    return memrefs_conflict_p (xsize, x0, ysize, y0, c);
2602 	  if (rtx_equal_for_memref_p (x0, y0))
2603 	    return memrefs_conflict_p (xsize, x1, ysize, y1, c);
2604 	  if (poly_int_rtx_p (x1, &cx1))
2605 	    {
2606 	      if (poly_int_rtx_p (y1, &cy1))
2607 		return memrefs_conflict_p (xsize, x0, ysize, y0,
2608 					   c - cx1 + cy1);
2609 	      else
2610 		return memrefs_conflict_p (xsize, x0, ysize, y, c - cx1);
2611 	    }
2612 	  else if (poly_int_rtx_p (y1, &cy1))
2613 	    return memrefs_conflict_p (xsize, x, ysize, y0, c + cy1);
2614 
2615 	  return -1;
2616 	}
2617       else if (poly_int_rtx_p (x1, &cx1))
2618 	return memrefs_conflict_p (xsize, x0, ysize, y, c - cx1);
2619     }
2620   else if (GET_CODE (y) == PLUS)
2621     {
2622       /* The fact that Y is canonicalized means that this
2623 	 PLUS rtx is canonicalized.  */
2624       rtx y0 = XEXP (y, 0);
2625       rtx y1 = XEXP (y, 1);
2626 
2627       if (x == y0)
2628 	return memrefs_conflict_p (xsize, const0_rtx, ysize, y1, c);
2629       if (x == y1)
2630 	return memrefs_conflict_p (xsize, const0_rtx, ysize, y0, c);
2631 
2632       poly_int64 cy1;
2633       if (poly_int_rtx_p (y1, &cy1))
2634 	return memrefs_conflict_p (xsize, x, ysize, y0, c + cy1);
2635       else
2636 	return -1;
2637     }
2638 
2639   if (GET_CODE (x) == GET_CODE (y))
2640     switch (GET_CODE (x))
2641       {
2642       case MULT:
2643 	{
2644 	  /* Handle cases where we expect the second operands to be the
2645 	     same, and check only whether the first operand would conflict
2646 	     or not.  */
2647 	  rtx x0, y0;
2648 	  rtx x1 = canon_rtx (XEXP (x, 1));
2649 	  rtx y1 = canon_rtx (XEXP (y, 1));
2650 	  if (! rtx_equal_for_memref_p (x1, y1))
2651 	    return -1;
2652 	  x0 = canon_rtx (XEXP (x, 0));
2653 	  y0 = canon_rtx (XEXP (y, 0));
2654 	  if (rtx_equal_for_memref_p (x0, y0))
2655 	    return offset_overlap_p (c, xsize, ysize);
2656 
2657 	  /* Can't properly adjust our sizes.  */
2658 	  poly_int64 c1;
2659 	  if (!poly_int_rtx_p (x1, &c1)
2660 	      || !can_div_trunc_p (xsize, c1, &xsize)
2661 	      || !can_div_trunc_p (ysize, c1, &ysize)
2662 	      || !can_div_trunc_p (c, c1, &c))
2663 	    return -1;
2664 	  return memrefs_conflict_p (xsize, x0, ysize, y0, c);
2665 	}
2666 
2667       default:
2668 	break;
2669       }
2670 
2671   /* Deal with alignment ANDs by adjusting offset and size so as to
2672      cover the maximum range, without taking any previously known
2673      alignment into account.  Make a size negative after such an
2674      adjustments, so that, if we end up with e.g. two SYMBOL_REFs, we
2675      assume a potential overlap, because they may end up in contiguous
2676      memory locations and the stricter-alignment access may span over
2677      part of both.  */
2678   if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1)))
2679     {
2680       HOST_WIDE_INT sc = INTVAL (XEXP (x, 1));
2681       unsigned HOST_WIDE_INT uc = sc;
2682       if (sc < 0 && pow2_or_zerop (-uc))
2683 	{
2684 	  if (maybe_gt (xsize, 0))
2685 	    xsize = -xsize;
2686 	  if (maybe_ne (xsize, 0))
2687 	    xsize += sc + 1;
2688 	  c -= sc + 1;
2689 	  return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2690 				     ysize, y, c);
2691 	}
2692     }
2693   if (GET_CODE (y) == AND && CONST_INT_P (XEXP (y, 1)))
2694     {
2695       HOST_WIDE_INT sc = INTVAL (XEXP (y, 1));
2696       unsigned HOST_WIDE_INT uc = sc;
2697       if (sc < 0 && pow2_or_zerop (-uc))
2698 	{
2699 	  if (maybe_gt (ysize, 0))
2700 	    ysize = -ysize;
2701 	  if (maybe_ne (ysize, 0))
2702 	    ysize += sc + 1;
2703 	  c += sc + 1;
2704 	  return memrefs_conflict_p (xsize, x,
2705 				     ysize, canon_rtx (XEXP (y, 0)), c);
2706 	}
2707     }
2708 
2709   if (CONSTANT_P (x))
2710     {
2711       poly_int64 cx, cy;
2712       if (poly_int_rtx_p (x, &cx) && poly_int_rtx_p (y, &cy))
2713 	{
2714 	  c += cy - cx;
2715 	  return offset_overlap_p (c, xsize, ysize);
2716 	}
2717 
2718       if (GET_CODE (x) == CONST)
2719 	{
2720 	  if (GET_CODE (y) == CONST)
2721 	    return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2722 				       ysize, canon_rtx (XEXP (y, 0)), c);
2723 	  else
2724 	    return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2725 				       ysize, y, c);
2726 	}
2727       if (GET_CODE (y) == CONST)
2728 	return memrefs_conflict_p (xsize, x, ysize,
2729 				   canon_rtx (XEXP (y, 0)), c);
2730 
2731       /* Assume a potential overlap for symbolic addresses that went
2732 	 through alignment adjustments (i.e., that have negative
2733 	 sizes), because we can't know how far they are from each
2734 	 other.  */
2735       if (CONSTANT_P (y))
2736 	return (maybe_lt (xsize, 0)
2737 		|| maybe_lt (ysize, 0)
2738 		|| offset_overlap_p (c, xsize, ysize));
2739 
2740       return -1;
2741     }
2742 
2743   return -1;
2744 }
2745 
2746 /* Functions to compute memory dependencies.
2747 
2748    Since we process the insns in execution order, we can build tables
2749    to keep track of what registers are fixed (and not aliased), what registers
2750    are varying in known ways, and what registers are varying in unknown
2751    ways.
2752 
2753    If both memory references are volatile, then there must always be a
2754    dependence between the two references, since their order cannot be
2755    changed.  A volatile and non-volatile reference can be interchanged
2756    though.
2757 
2758    We also must allow AND addresses, because they may generate accesses
2759    outside the object being referenced.  This is used to generate aligned
2760    addresses from unaligned addresses, for instance, the alpha
2761    storeqi_unaligned pattern.  */
2762 
2763 /* Read dependence: X is read after read in MEM takes place.  There can
2764    only be a dependence here if both reads are volatile, or if either is
2765    an explicit barrier.  */
2766 
2767 int
read_dependence(const_rtx mem,const_rtx x)2768 read_dependence (const_rtx mem, const_rtx x)
2769 {
2770   if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2771     return true;
2772   if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2773       || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2774     return true;
2775   return false;
2776 }
2777 
2778 /* Look at the bottom of the COMPONENT_REF list for a DECL, and return it.  */
2779 
2780 static tree
decl_for_component_ref(tree x)2781 decl_for_component_ref (tree x)
2782 {
2783   do
2784     {
2785       x = TREE_OPERAND (x, 0);
2786     }
2787   while (x && TREE_CODE (x) == COMPONENT_REF);
2788 
2789   return x && DECL_P (x) ? x : NULL_TREE;
2790 }
2791 
2792 /* Walk up the COMPONENT_REF list in X and adjust *OFFSET to compensate
2793    for the offset of the field reference.  *KNOWN_P says whether the
2794    offset is known.  */
2795 
2796 static void
adjust_offset_for_component_ref(tree x,bool * known_p,poly_int64 * offset)2797 adjust_offset_for_component_ref (tree x, bool *known_p,
2798 				 poly_int64 *offset)
2799 {
2800   if (!*known_p)
2801     return;
2802   do
2803     {
2804       tree xoffset = component_ref_field_offset (x);
2805       tree field = TREE_OPERAND (x, 1);
2806       if (!poly_int_tree_p (xoffset))
2807 	{
2808 	  *known_p = false;
2809 	  return;
2810 	}
2811 
2812       poly_offset_int woffset
2813 	= (wi::to_poly_offset (xoffset)
2814 	   + (wi::to_offset (DECL_FIELD_BIT_OFFSET (field))
2815 	      >> LOG2_BITS_PER_UNIT)
2816 	   + *offset);
2817       if (!woffset.to_shwi (offset))
2818 	{
2819 	  *known_p = false;
2820 	  return;
2821 	}
2822 
2823       x = TREE_OPERAND (x, 0);
2824     }
2825   while (x && TREE_CODE (x) == COMPONENT_REF);
2826 }
2827 
2828 /* Return nonzero if we can determine the exprs corresponding to memrefs
2829    X and Y and they do not overlap.
2830    If LOOP_VARIANT is set, skip offset-based disambiguation */
2831 
2832 int
nonoverlapping_memrefs_p(const_rtx x,const_rtx y,bool loop_invariant)2833 nonoverlapping_memrefs_p (const_rtx x, const_rtx y, bool loop_invariant)
2834 {
2835   tree exprx = MEM_EXPR (x), expry = MEM_EXPR (y);
2836   rtx rtlx, rtly;
2837   rtx basex, basey;
2838   bool moffsetx_known_p, moffsety_known_p;
2839   poly_int64 moffsetx = 0, moffsety = 0;
2840   poly_int64 offsetx = 0, offsety = 0, sizex, sizey;
2841 
2842   /* Unless both have exprs, we can't tell anything.  */
2843   if (exprx == 0 || expry == 0)
2844     return 0;
2845 
2846   /* For spill-slot accesses make sure we have valid offsets.  */
2847   if ((exprx == get_spill_slot_decl (false)
2848        && ! MEM_OFFSET_KNOWN_P (x))
2849       || (expry == get_spill_slot_decl (false)
2850 	  && ! MEM_OFFSET_KNOWN_P (y)))
2851     return 0;
2852 
2853   /* If the field reference test failed, look at the DECLs involved.  */
2854   moffsetx_known_p = MEM_OFFSET_KNOWN_P (x);
2855   if (moffsetx_known_p)
2856     moffsetx = MEM_OFFSET (x);
2857   if (TREE_CODE (exprx) == COMPONENT_REF)
2858     {
2859       tree t = decl_for_component_ref (exprx);
2860       if (! t)
2861 	return 0;
2862       adjust_offset_for_component_ref (exprx, &moffsetx_known_p, &moffsetx);
2863       exprx = t;
2864     }
2865 
2866   moffsety_known_p = MEM_OFFSET_KNOWN_P (y);
2867   if (moffsety_known_p)
2868     moffsety = MEM_OFFSET (y);
2869   if (TREE_CODE (expry) == COMPONENT_REF)
2870     {
2871       tree t = decl_for_component_ref (expry);
2872       if (! t)
2873 	return 0;
2874       adjust_offset_for_component_ref (expry, &moffsety_known_p, &moffsety);
2875       expry = t;
2876     }
2877 
2878   if (! DECL_P (exprx) || ! DECL_P (expry))
2879     return 0;
2880 
2881   /* If we refer to different gimple registers, or one gimple register
2882      and one non-gimple-register, we know they can't overlap.  First,
2883      gimple registers don't have their addresses taken.  Now, there
2884      could be more than one stack slot for (different versions of) the
2885      same gimple register, but we can presumably tell they don't
2886      overlap based on offsets from stack base addresses elsewhere.
2887      It's important that we don't proceed to DECL_RTL, because gimple
2888      registers may not pass DECL_RTL_SET_P, and make_decl_rtl won't be
2889      able to do anything about them since no SSA information will have
2890      remained to guide it.  */
2891   if (is_gimple_reg (exprx) || is_gimple_reg (expry))
2892     return exprx != expry
2893       || (moffsetx_known_p && moffsety_known_p
2894 	  && MEM_SIZE_KNOWN_P (x) && MEM_SIZE_KNOWN_P (y)
2895 	  && !offset_overlap_p (moffsety - moffsetx,
2896 				MEM_SIZE (x), MEM_SIZE (y)));
2897 
2898   /* With invalid code we can end up storing into the constant pool.
2899      Bail out to avoid ICEing when creating RTL for this.
2900      See gfortran.dg/lto/20091028-2_0.f90.  */
2901   if (TREE_CODE (exprx) == CONST_DECL
2902       || TREE_CODE (expry) == CONST_DECL)
2903     return 1;
2904 
2905   /* If one decl is known to be a function or label in a function and
2906      the other is some kind of data, they can't overlap.  */
2907   if ((TREE_CODE (exprx) == FUNCTION_DECL
2908        || TREE_CODE (exprx) == LABEL_DECL)
2909       != (TREE_CODE (expry) == FUNCTION_DECL
2910 	  || TREE_CODE (expry) == LABEL_DECL))
2911     return 1;
2912 
2913   /* If either of the decls doesn't have DECL_RTL set (e.g. marked as
2914      living in multiple places), we can't tell anything.  Exception
2915      are FUNCTION_DECLs for which we can create DECL_RTL on demand.  */
2916   if ((!DECL_RTL_SET_P (exprx) && TREE_CODE (exprx) != FUNCTION_DECL)
2917       || (!DECL_RTL_SET_P (expry) && TREE_CODE (expry) != FUNCTION_DECL))
2918     return 0;
2919 
2920   rtlx = DECL_RTL (exprx);
2921   rtly = DECL_RTL (expry);
2922 
2923   /* If either RTL is not a MEM, it must be a REG or CONCAT, meaning they
2924      can't overlap unless they are the same because we never reuse that part
2925      of the stack frame used for locals for spilled pseudos.  */
2926   if ((!MEM_P (rtlx) || !MEM_P (rtly))
2927       && ! rtx_equal_p (rtlx, rtly))
2928     return 1;
2929 
2930   /* If we have MEMs referring to different address spaces (which can
2931      potentially overlap), we cannot easily tell from the addresses
2932      whether the references overlap.  */
2933   if (MEM_P (rtlx) && MEM_P (rtly)
2934       && MEM_ADDR_SPACE (rtlx) != MEM_ADDR_SPACE (rtly))
2935     return 0;
2936 
2937   /* Get the base and offsets of both decls.  If either is a register, we
2938      know both are and are the same, so use that as the base.  The only
2939      we can avoid overlap is if we can deduce that they are nonoverlapping
2940      pieces of that decl, which is very rare.  */
2941   basex = MEM_P (rtlx) ? XEXP (rtlx, 0) : rtlx;
2942   basex = strip_offset_and_add (basex, &offsetx);
2943 
2944   basey = MEM_P (rtly) ? XEXP (rtly, 0) : rtly;
2945   basey = strip_offset_and_add (basey, &offsety);
2946 
2947   /* If the bases are different, we know they do not overlap if both
2948      are constants or if one is a constant and the other a pointer into the
2949      stack frame.  Otherwise a different base means we can't tell if they
2950      overlap or not.  */
2951   if (compare_base_decls (exprx, expry) == 0)
2952     return ((CONSTANT_P (basex) && CONSTANT_P (basey))
2953 	    || (CONSTANT_P (basex) && REG_P (basey)
2954 		&& REGNO_PTR_FRAME_P (REGNO (basey)))
2955 	    || (CONSTANT_P (basey) && REG_P (basex)
2956 		&& REGNO_PTR_FRAME_P (REGNO (basex))));
2957 
2958   /* Offset based disambiguation not appropriate for loop invariant */
2959   if (loop_invariant)
2960     return 0;
2961 
2962   /* Offset based disambiguation is OK even if we do not know that the
2963      declarations are necessarily different
2964     (i.e. compare_base_decls (exprx, expry) == -1)  */
2965 
2966   sizex = (!MEM_P (rtlx) ? poly_int64 (GET_MODE_SIZE (GET_MODE (rtlx)))
2967 	   : MEM_SIZE_KNOWN_P (rtlx) ? MEM_SIZE (rtlx)
2968 	   : -1);
2969   sizey = (!MEM_P (rtly) ? poly_int64 (GET_MODE_SIZE (GET_MODE (rtly)))
2970 	   : MEM_SIZE_KNOWN_P (rtly) ? MEM_SIZE (rtly)
2971 	   : -1);
2972 
2973   /* If we have an offset for either memref, it can update the values computed
2974      above.  */
2975   if (moffsetx_known_p)
2976     offsetx += moffsetx, sizex -= moffsetx;
2977   if (moffsety_known_p)
2978     offsety += moffsety, sizey -= moffsety;
2979 
2980   /* If a memref has both a size and an offset, we can use the smaller size.
2981      We can't do this if the offset isn't known because we must view this
2982      memref as being anywhere inside the DECL's MEM.  */
2983   if (MEM_SIZE_KNOWN_P (x) && moffsetx_known_p)
2984     sizex = MEM_SIZE (x);
2985   if (MEM_SIZE_KNOWN_P (y) && moffsety_known_p)
2986     sizey = MEM_SIZE (y);
2987 
2988   return !ranges_maybe_overlap_p (offsetx, sizex, offsety, sizey);
2989 }
2990 
2991 /* Helper for true_dependence and canon_true_dependence.
2992    Checks for true dependence: X is read after store in MEM takes place.
2993 
2994    If MEM_CANONICALIZED is FALSE, then X_ADDR and MEM_ADDR should be
2995    NULL_RTX, and the canonical addresses of MEM and X are both computed
2996    here.  If MEM_CANONICALIZED, then MEM must be already canonicalized.
2997 
2998    If X_ADDR is non-NULL, it is used in preference of XEXP (x, 0).
2999 
3000    Returns 1 if there is a true dependence, 0 otherwise.  */
3001 
3002 static int
true_dependence_1(const_rtx mem,machine_mode mem_mode,rtx mem_addr,const_rtx x,rtx x_addr,bool mem_canonicalized)3003 true_dependence_1 (const_rtx mem, machine_mode mem_mode, rtx mem_addr,
3004 		   const_rtx x, rtx x_addr, bool mem_canonicalized)
3005 {
3006   rtx true_mem_addr;
3007   rtx base;
3008   int ret;
3009 
3010   gcc_checking_assert (mem_canonicalized ? (mem_addr != NULL_RTX)
3011 		       : (mem_addr == NULL_RTX && x_addr == NULL_RTX));
3012 
3013   if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
3014     return 1;
3015 
3016   /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
3017      This is used in epilogue deallocation functions, and in cselib.  */
3018   if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
3019     return 1;
3020   if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
3021     return 1;
3022   if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
3023       || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
3024     return 1;
3025 
3026   if (! x_addr)
3027     x_addr = XEXP (x, 0);
3028   x_addr = get_addr (x_addr);
3029 
3030   if (! mem_addr)
3031     {
3032       mem_addr = XEXP (mem, 0);
3033       if (mem_mode == VOIDmode)
3034 	mem_mode = GET_MODE (mem);
3035     }
3036   true_mem_addr = get_addr (mem_addr);
3037 
3038   /* Read-only memory is by definition never modified, and therefore can't
3039      conflict with anything.  However, don't assume anything when AND
3040      addresses are involved and leave to the code below to determine
3041      dependence.  We don't expect to find read-only set on MEM, but
3042      stupid user tricks can produce them, so don't die.  */
3043   if (MEM_READONLY_P (x)
3044       && GET_CODE (x_addr) != AND
3045       && GET_CODE (true_mem_addr) != AND)
3046     return 0;
3047 
3048   /* If we have MEMs referring to different address spaces (which can
3049      potentially overlap), we cannot easily tell from the addresses
3050      whether the references overlap.  */
3051   if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
3052     return 1;
3053 
3054   base = find_base_term (x_addr);
3055   if (base && (GET_CODE (base) == LABEL_REF
3056 	       || (GET_CODE (base) == SYMBOL_REF
3057 		   && CONSTANT_POOL_ADDRESS_P (base))))
3058     return 0;
3059 
3060   rtx mem_base = find_base_term (true_mem_addr);
3061   if (! base_alias_check (x_addr, base, true_mem_addr, mem_base,
3062 			  GET_MODE (x), mem_mode))
3063     return 0;
3064 
3065   x_addr = canon_rtx (x_addr);
3066   if (!mem_canonicalized)
3067     mem_addr = canon_rtx (true_mem_addr);
3068 
3069   if ((ret = memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
3070 				 SIZE_FOR_MODE (x), x_addr, 0)) != -1)
3071     return ret;
3072 
3073   if (mems_in_disjoint_alias_sets_p (x, mem))
3074     return 0;
3075 
3076   if (nonoverlapping_memrefs_p (mem, x, false))
3077     return 0;
3078 
3079   return rtx_refs_may_alias_p (x, mem, true);
3080 }
3081 
3082 /* True dependence: X is read after store in MEM takes place.  */
3083 
3084 int
true_dependence(const_rtx mem,machine_mode mem_mode,const_rtx x)3085 true_dependence (const_rtx mem, machine_mode mem_mode, const_rtx x)
3086 {
3087   return true_dependence_1 (mem, mem_mode, NULL_RTX,
3088 			    x, NULL_RTX, /*mem_canonicalized=*/false);
3089 }
3090 
3091 /* Canonical true dependence: X is read after store in MEM takes place.
3092    Variant of true_dependence which assumes MEM has already been
3093    canonicalized (hence we no longer do that here).
3094    The mem_addr argument has been added, since true_dependence_1 computed
3095    this value prior to canonicalizing.  */
3096 
3097 int
canon_true_dependence(const_rtx mem,machine_mode mem_mode,rtx mem_addr,const_rtx x,rtx x_addr)3098 canon_true_dependence (const_rtx mem, machine_mode mem_mode, rtx mem_addr,
3099 		       const_rtx x, rtx x_addr)
3100 {
3101   return true_dependence_1 (mem, mem_mode, mem_addr,
3102 			    x, x_addr, /*mem_canonicalized=*/true);
3103 }
3104 
3105 /* Returns nonzero if a write to X might alias a previous read from
3106    (or, if WRITEP is true, a write to) MEM.
3107    If X_CANONCALIZED is true, then X_ADDR is the canonicalized address of X,
3108    and X_MODE the mode for that access.
3109    If MEM_CANONICALIZED is true, MEM is canonicalized.  */
3110 
3111 static int
write_dependence_p(const_rtx mem,const_rtx x,machine_mode x_mode,rtx x_addr,bool mem_canonicalized,bool x_canonicalized,bool writep)3112 write_dependence_p (const_rtx mem,
3113 		    const_rtx x, machine_mode x_mode, rtx x_addr,
3114 		    bool mem_canonicalized, bool x_canonicalized, bool writep)
3115 {
3116   rtx mem_addr;
3117   rtx true_mem_addr, true_x_addr;
3118   rtx base;
3119   int ret;
3120 
3121   gcc_checking_assert (x_canonicalized
3122 		       ? (x_addr != NULL_RTX
3123 			  && (x_mode != VOIDmode || GET_MODE (x) == VOIDmode))
3124 		       : (x_addr == NULL_RTX && x_mode == VOIDmode));
3125 
3126   if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
3127     return 1;
3128 
3129   /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
3130      This is used in epilogue deallocation functions.  */
3131   if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
3132     return 1;
3133   if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
3134     return 1;
3135   if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
3136       || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
3137     return 1;
3138 
3139   if (!x_addr)
3140     x_addr = XEXP (x, 0);
3141   true_x_addr = get_addr (x_addr);
3142 
3143   mem_addr = XEXP (mem, 0);
3144   true_mem_addr = get_addr (mem_addr);
3145 
3146   /* A read from read-only memory can't conflict with read-write memory.
3147      Don't assume anything when AND addresses are involved and leave to
3148      the code below to determine dependence.  */
3149   if (!writep
3150       && MEM_READONLY_P (mem)
3151       && GET_CODE (true_x_addr) != AND
3152       && GET_CODE (true_mem_addr) != AND)
3153     return 0;
3154 
3155   /* If we have MEMs referring to different address spaces (which can
3156      potentially overlap), we cannot easily tell from the addresses
3157      whether the references overlap.  */
3158   if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
3159     return 1;
3160 
3161   base = find_base_term (true_mem_addr);
3162   if (! writep
3163       && base
3164       && (GET_CODE (base) == LABEL_REF
3165 	  || (GET_CODE (base) == SYMBOL_REF
3166 	      && CONSTANT_POOL_ADDRESS_P (base))))
3167     return 0;
3168 
3169   rtx x_base = find_base_term (true_x_addr);
3170   if (! base_alias_check (true_x_addr, x_base, true_mem_addr, base,
3171 			  GET_MODE (x), GET_MODE (mem)))
3172     return 0;
3173 
3174   if (!x_canonicalized)
3175     {
3176       x_addr = canon_rtx (true_x_addr);
3177       x_mode = GET_MODE (x);
3178     }
3179   if (!mem_canonicalized)
3180     mem_addr = canon_rtx (true_mem_addr);
3181 
3182   if ((ret = memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr,
3183 				 GET_MODE_SIZE (x_mode), x_addr, 0)) != -1)
3184     return ret;
3185 
3186   if (nonoverlapping_memrefs_p (x, mem, false))
3187     return 0;
3188 
3189   return rtx_refs_may_alias_p (x, mem, false);
3190 }
3191 
3192 /* Anti dependence: X is written after read in MEM takes place.  */
3193 
3194 int
anti_dependence(const_rtx mem,const_rtx x)3195 anti_dependence (const_rtx mem, const_rtx x)
3196 {
3197   return write_dependence_p (mem, x, VOIDmode, NULL_RTX,
3198 			     /*mem_canonicalized=*/false,
3199 			     /*x_canonicalized*/false, /*writep=*/false);
3200 }
3201 
3202 /* Likewise, but we already have a canonicalized MEM, and X_ADDR for X.
3203    Also, consider X in X_MODE (which might be from an enclosing
3204    STRICT_LOW_PART / ZERO_EXTRACT).
3205    If MEM_CANONICALIZED is true, MEM is canonicalized.  */
3206 
3207 int
canon_anti_dependence(const_rtx mem,bool mem_canonicalized,const_rtx x,machine_mode x_mode,rtx x_addr)3208 canon_anti_dependence (const_rtx mem, bool mem_canonicalized,
3209 		       const_rtx x, machine_mode x_mode, rtx x_addr)
3210 {
3211   return write_dependence_p (mem, x, x_mode, x_addr,
3212 			     mem_canonicalized, /*x_canonicalized=*/true,
3213 			     /*writep=*/false);
3214 }
3215 
3216 /* Output dependence: X is written after store in MEM takes place.  */
3217 
3218 int
output_dependence(const_rtx mem,const_rtx x)3219 output_dependence (const_rtx mem, const_rtx x)
3220 {
3221   return write_dependence_p (mem, x, VOIDmode, NULL_RTX,
3222 			     /*mem_canonicalized=*/false,
3223 			     /*x_canonicalized*/false, /*writep=*/true);
3224 }
3225 
3226 /* Likewise, but we already have a canonicalized MEM, and X_ADDR for X.
3227    Also, consider X in X_MODE (which might be from an enclosing
3228    STRICT_LOW_PART / ZERO_EXTRACT).
3229    If MEM_CANONICALIZED is true, MEM is canonicalized.  */
3230 
3231 int
canon_output_dependence(const_rtx mem,bool mem_canonicalized,const_rtx x,machine_mode x_mode,rtx x_addr)3232 canon_output_dependence (const_rtx mem, bool mem_canonicalized,
3233 			 const_rtx x, machine_mode x_mode, rtx x_addr)
3234 {
3235   return write_dependence_p (mem, x, x_mode, x_addr,
3236 			     mem_canonicalized, /*x_canonicalized=*/true,
3237 			     /*writep=*/true);
3238 }
3239 
3240 
3241 
3242 /* Check whether X may be aliased with MEM.  Don't do offset-based
3243   memory disambiguation & TBAA.  */
3244 int
may_alias_p(const_rtx mem,const_rtx x)3245 may_alias_p (const_rtx mem, const_rtx x)
3246 {
3247   rtx x_addr, mem_addr;
3248 
3249   if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
3250     return 1;
3251 
3252   /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
3253      This is used in epilogue deallocation functions.  */
3254   if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
3255     return 1;
3256   if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
3257     return 1;
3258   if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
3259       || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
3260     return 1;
3261 
3262   x_addr = XEXP (x, 0);
3263   x_addr = get_addr (x_addr);
3264 
3265   mem_addr = XEXP (mem, 0);
3266   mem_addr = get_addr (mem_addr);
3267 
3268   /* Read-only memory is by definition never modified, and therefore can't
3269      conflict with anything.  However, don't assume anything when AND
3270      addresses are involved and leave to the code below to determine
3271      dependence.  We don't expect to find read-only set on MEM, but
3272      stupid user tricks can produce them, so don't die.  */
3273   if (MEM_READONLY_P (x)
3274       && GET_CODE (x_addr) != AND
3275       && GET_CODE (mem_addr) != AND)
3276     return 0;
3277 
3278   /* If we have MEMs referring to different address spaces (which can
3279      potentially overlap), we cannot easily tell from the addresses
3280      whether the references overlap.  */
3281   if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
3282     return 1;
3283 
3284   rtx x_base = find_base_term (x_addr);
3285   rtx mem_base = find_base_term (mem_addr);
3286   if (! base_alias_check (x_addr, x_base, mem_addr, mem_base,
3287 			  GET_MODE (x), GET_MODE (mem_addr)))
3288     return 0;
3289 
3290   if (nonoverlapping_memrefs_p (mem, x, true))
3291     return 0;
3292 
3293   /* TBAA not valid for loop_invarint */
3294   return rtx_refs_may_alias_p (x, mem, false);
3295 }
3296 
3297 void
init_alias_target(void)3298 init_alias_target (void)
3299 {
3300   int i;
3301 
3302   if (!arg_base_value)
3303     arg_base_value = gen_rtx_ADDRESS (VOIDmode, 0);
3304 
3305   memset (static_reg_base_value, 0, sizeof static_reg_base_value);
3306 
3307   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3308     /* Check whether this register can hold an incoming pointer
3309        argument.  FUNCTION_ARG_REGNO_P tests outgoing register
3310        numbers, so translate if necessary due to register windows.  */
3311     if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (i))
3312 	&& targetm.hard_regno_mode_ok (i, Pmode))
3313       static_reg_base_value[i] = arg_base_value;
3314 
3315   /* RTL code is required to be consistent about whether it uses the
3316      stack pointer, the frame pointer or the argument pointer to
3317      access a given area of the frame.  We can therefore use the
3318      base address to distinguish between the different areas.  */
3319   static_reg_base_value[STACK_POINTER_REGNUM]
3320     = unique_base_value (UNIQUE_BASE_VALUE_SP);
3321   static_reg_base_value[ARG_POINTER_REGNUM]
3322     = unique_base_value (UNIQUE_BASE_VALUE_ARGP);
3323   static_reg_base_value[FRAME_POINTER_REGNUM]
3324     = unique_base_value (UNIQUE_BASE_VALUE_FP);
3325 
3326   /* The above rules extend post-reload, with eliminations applying
3327      consistently to each of the three pointers.  Cope with cases in
3328      which the frame pointer is eliminated to the hard frame pointer
3329      rather than the stack pointer.  */
3330   if (!HARD_FRAME_POINTER_IS_FRAME_POINTER)
3331     static_reg_base_value[HARD_FRAME_POINTER_REGNUM]
3332       = unique_base_value (UNIQUE_BASE_VALUE_HFP);
3333 }
3334 
3335 /* Set MEMORY_MODIFIED when X modifies DATA (that is assumed
3336    to be memory reference.  */
3337 static bool memory_modified;
3338 static void
memory_modified_1(rtx x,const_rtx pat ATTRIBUTE_UNUSED,void * data)3339 memory_modified_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
3340 {
3341   if (MEM_P (x))
3342     {
3343       if (anti_dependence (x, (const_rtx)data) || output_dependence (x, (const_rtx)data))
3344 	memory_modified = true;
3345     }
3346 }
3347 
3348 
3349 /* Return true when INSN possibly modify memory contents of MEM
3350    (i.e. address can be modified).  */
3351 bool
memory_modified_in_insn_p(const_rtx mem,const_rtx insn)3352 memory_modified_in_insn_p (const_rtx mem, const_rtx insn)
3353 {
3354   if (!INSN_P (insn))
3355     return false;
3356   /* Conservatively assume all non-readonly MEMs might be modified in
3357      calls.  */
3358   if (CALL_P (insn))
3359     return true;
3360   memory_modified = false;
3361   note_stores (as_a<const rtx_insn *> (insn), memory_modified_1,
3362 	       CONST_CAST_RTX(mem));
3363   return memory_modified;
3364 }
3365 
3366 /* Initialize the aliasing machinery.  Initialize the REG_KNOWN_VALUE
3367    array.  */
3368 
3369 void
init_alias_analysis(void)3370 init_alias_analysis (void)
3371 {
3372   const bool frame_pointer_eliminated
3373     = reload_completed
3374       && !frame_pointer_needed
3375       && targetm.can_eliminate (FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM);
3376   unsigned int maxreg = max_reg_num ();
3377   int changed, pass;
3378   int i;
3379   unsigned int ui;
3380   rtx_insn *insn;
3381   rtx val;
3382   int rpo_cnt;
3383   int *rpo;
3384 
3385   timevar_push (TV_ALIAS_ANALYSIS);
3386 
3387   vec_safe_grow_cleared (reg_known_value, maxreg - FIRST_PSEUDO_REGISTER,
3388 			 true);
3389   reg_known_equiv_p = sbitmap_alloc (maxreg - FIRST_PSEUDO_REGISTER);
3390   bitmap_clear (reg_known_equiv_p);
3391 
3392   /* If we have memory allocated from the previous run, use it.  */
3393   if (old_reg_base_value)
3394     reg_base_value = old_reg_base_value;
3395 
3396   if (reg_base_value)
3397     reg_base_value->truncate (0);
3398 
3399   vec_safe_grow_cleared (reg_base_value, maxreg, true);
3400 
3401   new_reg_base_value = XNEWVEC (rtx, maxreg);
3402   reg_seen = sbitmap_alloc (maxreg);
3403 
3404   /* The basic idea is that each pass through this loop will use the
3405      "constant" information from the previous pass to propagate alias
3406      information through another level of assignments.
3407 
3408      The propagation is done on the CFG in reverse post-order, to propagate
3409      things forward as far as possible in each iteration.
3410 
3411      This could get expensive if the assignment chains are long.  Maybe
3412      we should throttle the number of iterations, possibly based on
3413      the optimization level or flag_expensive_optimizations.
3414 
3415      We could propagate more information in the first pass by making use
3416      of DF_REG_DEF_COUNT to determine immediately that the alias information
3417      for a pseudo is "constant".
3418 
3419      A program with an uninitialized variable can cause an infinite loop
3420      here.  Instead of doing a full dataflow analysis to detect such problems
3421      we just cap the number of iterations for the loop.
3422 
3423      The state of the arrays for the set chain in question does not matter
3424      since the program has undefined behavior.  */
3425 
3426   rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
3427   rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3428 
3429   pass = 0;
3430   do
3431     {
3432       /* Assume nothing will change this iteration of the loop.  */
3433       changed = 0;
3434 
3435       /* We want to assign the same IDs each iteration of this loop, so
3436 	 start counting from one each iteration of the loop.  */
3437       unique_id = 1;
3438 
3439       /* We're at the start of the function each iteration through the
3440 	 loop, so we're copying arguments.  */
3441       copying_arguments = true;
3442 
3443       /* Wipe the potential alias information clean for this pass.  */
3444       memset (new_reg_base_value, 0, maxreg * sizeof (rtx));
3445 
3446       /* Wipe the reg_seen array clean.  */
3447       bitmap_clear (reg_seen);
3448 
3449       /* Initialize the alias information for this pass.  */
3450       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3451 	if (static_reg_base_value[i]
3452 	    /* Don't treat the hard frame pointer as special if we
3453 	       eliminated the frame pointer to the stack pointer.  */
3454 	    && !(i == HARD_FRAME_POINTER_REGNUM && frame_pointer_eliminated))
3455 	  {
3456 	    new_reg_base_value[i] = static_reg_base_value[i];
3457 	    bitmap_set_bit (reg_seen, i);
3458 	  }
3459 
3460       /* Walk the insns adding values to the new_reg_base_value array.  */
3461       for (i = 0; i < rpo_cnt; i++)
3462 	{
3463 	  basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
3464 	  FOR_BB_INSNS (bb, insn)
3465 	    {
3466 	      if (NONDEBUG_INSN_P (insn))
3467 		{
3468 		  rtx note, set;
3469 
3470 		  /* Treat the hard frame pointer as special unless we
3471 		     eliminated the frame pointer to the stack pointer.  */
3472 		  if (!frame_pointer_eliminated
3473 		      && modified_in_p (hard_frame_pointer_rtx, insn))
3474 		    continue;
3475 
3476 		  /* If this insn has a noalias note, process it,  Otherwise,
3477 		     scan for sets.  A simple set will have no side effects
3478 		     which could change the base value of any other register.  */
3479 		  if (GET_CODE (PATTERN (insn)) == SET
3480 		      && REG_NOTES (insn) != 0
3481 		      && find_reg_note (insn, REG_NOALIAS, NULL_RTX))
3482 		    record_set (SET_DEST (PATTERN (insn)), NULL_RTX, NULL);
3483 		  else
3484 		    note_stores (insn, record_set, NULL);
3485 
3486 		  set = single_set (insn);
3487 
3488 		  if (set != 0
3489 		      && REG_P (SET_DEST (set))
3490 		      && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3491 		    {
3492 		      unsigned int regno = REGNO (SET_DEST (set));
3493 		      rtx src = SET_SRC (set);
3494 		      rtx t;
3495 
3496 		      note = find_reg_equal_equiv_note (insn);
3497 		      if (note && REG_NOTE_KIND (note) == REG_EQUAL
3498 			  && DF_REG_DEF_COUNT (regno) != 1)
3499 			note = NULL_RTX;
3500 
3501 		      poly_int64 offset;
3502 		      if (note != NULL_RTX
3503 			  && GET_CODE (XEXP (note, 0)) != EXPR_LIST
3504 			  && ! rtx_varies_p (XEXP (note, 0), 1)
3505 			  && ! reg_overlap_mentioned_p (SET_DEST (set),
3506 							XEXP (note, 0)))
3507 			{
3508 			  set_reg_known_value (regno, XEXP (note, 0));
3509 			  set_reg_known_equiv_p (regno,
3510 						 REG_NOTE_KIND (note) == REG_EQUIV);
3511 			}
3512 		      else if (DF_REG_DEF_COUNT (regno) == 1
3513 			       && GET_CODE (src) == PLUS
3514 			       && REG_P (XEXP (src, 0))
3515 			       && (t = get_reg_known_value (REGNO (XEXP (src, 0))))
3516 			       && poly_int_rtx_p (XEXP (src, 1), &offset))
3517 			{
3518 			  t = plus_constant (GET_MODE (src), t, offset);
3519 			  set_reg_known_value (regno, t);
3520 			  set_reg_known_equiv_p (regno, false);
3521 			}
3522 		      else if (DF_REG_DEF_COUNT (regno) == 1
3523 			       && ! rtx_varies_p (src, 1))
3524 			{
3525 			  set_reg_known_value (regno, src);
3526 			  set_reg_known_equiv_p (regno, false);
3527 			}
3528 		    }
3529 		}
3530 	      else if (NOTE_P (insn)
3531 		       && NOTE_KIND (insn) == NOTE_INSN_FUNCTION_BEG)
3532 		copying_arguments = false;
3533 	    }
3534 	}
3535 
3536       /* Now propagate values from new_reg_base_value to reg_base_value.  */
3537       gcc_assert (maxreg == (unsigned int) max_reg_num ());
3538 
3539       for (ui = 0; ui < maxreg; ui++)
3540 	{
3541 	  if (new_reg_base_value[ui]
3542 	      && new_reg_base_value[ui] != (*reg_base_value)[ui]
3543 	      && ! rtx_equal_p (new_reg_base_value[ui], (*reg_base_value)[ui]))
3544 	    {
3545 	      (*reg_base_value)[ui] = new_reg_base_value[ui];
3546 	      changed = 1;
3547 	    }
3548 	}
3549     }
3550   while (changed && ++pass < MAX_ALIAS_LOOP_PASSES);
3551   XDELETEVEC (rpo);
3552 
3553   /* Fill in the remaining entries.  */
3554   FOR_EACH_VEC_ELT (*reg_known_value, i, val)
3555     {
3556       int regno = i + FIRST_PSEUDO_REGISTER;
3557       if (! val)
3558 	set_reg_known_value (regno, regno_reg_rtx[regno]);
3559     }
3560 
3561   /* Clean up.  */
3562   free (new_reg_base_value);
3563   new_reg_base_value = 0;
3564   sbitmap_free (reg_seen);
3565   reg_seen = 0;
3566   timevar_pop (TV_ALIAS_ANALYSIS);
3567 }
3568 
3569 /* Equate REG_BASE_VALUE (reg1) to REG_BASE_VALUE (reg2).
3570    Special API for var-tracking pass purposes.  */
3571 
3572 void
vt_equate_reg_base_value(const_rtx reg1,const_rtx reg2)3573 vt_equate_reg_base_value (const_rtx reg1, const_rtx reg2)
3574 {
3575   (*reg_base_value)[REGNO (reg1)] = REG_BASE_VALUE (reg2);
3576 }
3577 
3578 void
end_alias_analysis(void)3579 end_alias_analysis (void)
3580 {
3581   old_reg_base_value = reg_base_value;
3582   vec_free (reg_known_value);
3583   sbitmap_free (reg_known_equiv_p);
3584 }
3585 
3586 void
dump_alias_stats_in_alias_c(FILE * s)3587 dump_alias_stats_in_alias_c (FILE *s)
3588 {
3589   fprintf (s, "  TBAA oracle: %llu disambiguations %llu queries\n"
3590 	      "               %llu are in alias set 0\n"
3591 	      "               %llu queries asked about the same object\n"
3592 	      "               %llu queries asked about the same alias set\n"
3593 	      "               %llu access volatile\n"
3594 	      "               %llu are dependent in the DAG\n"
3595 	      "               %llu are aritificially in conflict with void *\n",
3596 	   alias_stats.num_disambiguated,
3597 	   alias_stats.num_alias_zero + alias_stats.num_same_alias_set
3598 	   + alias_stats.num_same_objects + alias_stats.num_volatile
3599 	   + alias_stats.num_dag + alias_stats.num_disambiguated
3600 	   + alias_stats.num_universal,
3601 	   alias_stats.num_alias_zero, alias_stats.num_same_alias_set,
3602 	   alias_stats.num_same_objects, alias_stats.num_volatile,
3603 	   alias_stats.num_dag, alias_stats.num_universal);
3604 }
3605 #include "gt-alias.h"
3606