xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/alias.c (revision d909946ca08dceb44d7d0f22ec9488679695d976)
1 /* Alias analysis for GNU C
2    Copyright (C) 1997-2013 Free Software Foundation, Inc.
3    Contributed by John Carr (jfc@mit.edu).
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tm_p.h"
28 #include "function.h"
29 #include "alias.h"
30 #include "emit-rtl.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "basic-block.h"
34 #include "flags.h"
35 #include "diagnostic-core.h"
36 #include "cselib.h"
37 #include "splay-tree.h"
38 #include "ggc.h"
39 #include "langhooks.h"
40 #include "timevar.h"
41 #include "dumpfile.h"
42 #include "target.h"
43 #include "cgraph.h"
44 #include "df.h"
45 #include "tree-ssa-alias.h"
46 #include "pointer-set.h"
47 #include "tree-flow.h"
48 
49 /* The aliasing API provided here solves related but different problems:
50 
51    Say there exists (in c)
52 
53    struct X {
54      struct Y y1;
55      struct Z z2;
56    } x1, *px1,  *px2;
57 
58    struct Y y2, *py;
59    struct Z z2, *pz;
60 
61 
62    py = &x1.y1;
63    px2 = &x1;
64 
65    Consider the four questions:
66 
67    Can a store to x1 interfere with px2->y1?
68    Can a store to x1 interfere with px2->z2?
69    Can a store to x1 change the value pointed to by with py?
70    Can a store to x1 change the value pointed to by with pz?
71 
72    The answer to these questions can be yes, yes, yes, and maybe.
73 
74    The first two questions can be answered with a simple examination
75    of the type system.  If structure X contains a field of type Y then
76    a store through a pointer to an X can overwrite any field that is
77    contained (recursively) in an X (unless we know that px1 != px2).
78 
79    The last two questions can be solved in the same way as the first
80    two questions but this is too conservative.  The observation is
81    that in some cases we can know which (if any) fields are addressed
82    and if those addresses are used in bad ways.  This analysis may be
83    language specific.  In C, arbitrary operations may be applied to
84    pointers.  However, there is some indication that this may be too
85    conservative for some C++ types.
86 
87    The pass ipa-type-escape does this analysis for the types whose
88    instances do not escape across the compilation boundary.
89 
90    Historically in GCC, these two problems were combined and a single
91    data structure that was used to represent the solution to these
92    problems.  We now have two similar but different data structures,
93    The data structure to solve the last two questions is similar to
94    the first, but does not contain the fields whose address are never
95    taken.  For types that do escape the compilation unit, the data
96    structures will have identical information.
97 */
98 
99 /* The alias sets assigned to MEMs assist the back-end in determining
100    which MEMs can alias which other MEMs.  In general, two MEMs in
101    different alias sets cannot alias each other, with one important
102    exception.  Consider something like:
103 
104      struct S { int i; double d; };
105 
106    a store to an `S' can alias something of either type `int' or type
107    `double'.  (However, a store to an `int' cannot alias a `double'
108    and vice versa.)  We indicate this via a tree structure that looks
109    like:
110 	   struct S
111 	    /   \
112 	   /     \
113 	 |/_     _\|
114 	 int    double
115 
116    (The arrows are directed and point downwards.)
117     In this situation we say the alias set for `struct S' is the
118    `superset' and that those for `int' and `double' are `subsets'.
119 
120    To see whether two alias sets can point to the same memory, we must
121    see if either alias set is a subset of the other. We need not trace
122    past immediate descendants, however, since we propagate all
123    grandchildren up one level.
124 
125    Alias set zero is implicitly a superset of all other alias sets.
126    However, this is no actual entry for alias set zero.  It is an
127    error to attempt to explicitly construct a subset of zero.  */
128 
129 struct GTY(()) alias_set_entry_d {
130   /* The alias set number, as stored in MEM_ALIAS_SET.  */
131   alias_set_type alias_set;
132 
133   /* Nonzero if would have a child of zero: this effectively makes this
134      alias set the same as alias set zero.  */
135   int has_zero_child;
136 
137   /* The children of the alias set.  These are not just the immediate
138      children, but, in fact, all descendants.  So, if we have:
139 
140        struct T { struct S s; float f; }
141 
142      continuing our example above, the children here will be all of
143      `int', `double', `float', and `struct S'.  */
144   splay_tree GTY((param1_is (int), param2_is (int))) children;
145 };
146 typedef struct alias_set_entry_d *alias_set_entry;
147 
148 static int rtx_equal_for_memref_p (const_rtx, const_rtx);
149 static int memrefs_conflict_p (int, rtx, int, rtx, HOST_WIDE_INT);
150 static void record_set (rtx, const_rtx, void *);
151 static int base_alias_check (rtx, rtx, rtx, rtx, enum machine_mode,
152 			     enum machine_mode);
153 static rtx find_base_value (rtx);
154 static int mems_in_disjoint_alias_sets_p (const_rtx, const_rtx);
155 static int insert_subset_children (splay_tree_node, void*);
156 static alias_set_entry get_alias_set_entry (alias_set_type);
157 static bool nonoverlapping_component_refs_p (const_rtx, const_rtx);
158 static tree decl_for_component_ref (tree);
159 static int write_dependence_p (const_rtx,
160 	    		              const_rtx, enum machine_mode, rtx,
161 				      		             bool, bool, bool);
162 
163 static void memory_modified_1 (rtx, const_rtx, void *);
164 
165 /* Set up all info needed to perform alias analysis on memory references.  */
166 
167 /* Returns the size in bytes of the mode of X.  */
168 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
169 
170 /* Cap the number of passes we make over the insns propagating alias
171    information through set chains.
172    ??? 10 is a completely arbitrary choice.  This should be based on the
173    maximum loop depth in the CFG, but we do not have this information
174    available (even if current_loops _is_ available).  */
175 #define MAX_ALIAS_LOOP_PASSES 10
176 
177 /* reg_base_value[N] gives an address to which register N is related.
178    If all sets after the first add or subtract to the current value
179    or otherwise modify it so it does not point to a different top level
180    object, reg_base_value[N] is equal to the address part of the source
181    of the first set.
182 
183    A base address can be an ADDRESS, SYMBOL_REF, or LABEL_REF.  ADDRESS
184    expressions represent three types of base:
185 
186      1. incoming arguments.  There is just one ADDRESS to represent all
187 	arguments, since we do not know at this level whether accesses
188 	based on different arguments can alias.  The ADDRESS has id 0.
189 
190      2. stack_pointer_rtx, frame_pointer_rtx, hard_frame_pointer_rtx
191 	(if distinct from frame_pointer_rtx) and arg_pointer_rtx.
192 	Each of these rtxes has a separate ADDRESS associated with it,
193 	each with a negative id.
194 
195 	GCC is (and is required to be) precise in which register it
196 	chooses to access a particular region of stack.  We can therefore
197 	assume that accesses based on one of these rtxes do not alias
198 	accesses based on another of these rtxes.
199 
200      3. bases that are derived from malloc()ed memory (REG_NOALIAS).
201 	Each such piece of memory has a separate ADDRESS associated
202 	with it, each with an id greater than 0.
203 
204    Accesses based on one ADDRESS do not alias accesses based on other
205    ADDRESSes.  Accesses based on ADDRESSes in groups (2) and (3) do not
206    alias globals either; the ADDRESSes have Pmode to indicate this.
207    The ADDRESS in group (1) _may_ alias globals; it has VOIDmode to
208    indicate this.  */
209 
210 static GTY(()) vec<rtx, va_gc> *reg_base_value;
211 static rtx *new_reg_base_value;
212 
213 /* The single VOIDmode ADDRESS that represents all argument bases.
214    It has id 0.  */
215 static GTY(()) rtx arg_base_value;
216 
217 /* Used to allocate unique ids to each REG_NOALIAS ADDRESS.  */
218 static int unique_id;
219 
220 /* We preserve the copy of old array around to avoid amount of garbage
221    produced.  About 8% of garbage produced were attributed to this
222    array.  */
223 static GTY((deletable)) vec<rtx, va_gc> *old_reg_base_value;
224 
225 /* Values of XINT (address, 0) of Pmode ADDRESS rtxes for special
226    registers.  */
227 #define UNIQUE_BASE_VALUE_SP	-1
228 #define UNIQUE_BASE_VALUE_ARGP	-2
229 #define UNIQUE_BASE_VALUE_FP	-3
230 #define UNIQUE_BASE_VALUE_HFP	-4
231 
232 #define static_reg_base_value \
233   (this_target_rtl->x_static_reg_base_value)
234 
235 #define REG_BASE_VALUE(X)					\
236   (REGNO (X) < vec_safe_length (reg_base_value)			\
237    ? (*reg_base_value)[REGNO (X)] : 0)
238 
239 /* Vector indexed by N giving the initial (unchanging) value known for
240    pseudo-register N.  This vector is initialized in init_alias_analysis,
241    and does not change until end_alias_analysis is called.  */
242 static GTY(()) vec<rtx, va_gc> *reg_known_value;
243 
244 /* Vector recording for each reg_known_value whether it is due to a
245    REG_EQUIV note.  Future passes (viz., reload) may replace the
246    pseudo with the equivalent expression and so we account for the
247    dependences that would be introduced if that happens.
248 
249    The REG_EQUIV notes created in assign_parms may mention the arg
250    pointer, and there are explicit insns in the RTL that modify the
251    arg pointer.  Thus we must ensure that such insns don't get
252    scheduled across each other because that would invalidate the
253    REG_EQUIV notes.  One could argue that the REG_EQUIV notes are
254    wrong, but solving the problem in the scheduler will likely give
255    better code, so we do it here.  */
256 static sbitmap reg_known_equiv_p;
257 
258 /* True when scanning insns from the start of the rtl to the
259    NOTE_INSN_FUNCTION_BEG note.  */
260 static bool copying_arguments;
261 
262 
263 /* The splay-tree used to store the various alias set entries.  */
264 static GTY (()) vec<alias_set_entry, va_gc> *alias_sets;
265 
266 /* Build a decomposed reference object for querying the alias-oracle
267    from the MEM rtx and store it in *REF.
268    Returns false if MEM is not suitable for the alias-oracle.  */
269 
270 static bool
271 ao_ref_from_mem (ao_ref *ref, const_rtx mem)
272 {
273   tree expr = MEM_EXPR (mem);
274   tree base;
275 
276   if (!expr)
277     return false;
278 
279   ao_ref_init (ref, expr);
280 
281   /* Get the base of the reference and see if we have to reject or
282      adjust it.  */
283   base = ao_ref_base (ref);
284   if (base == NULL_TREE)
285     return false;
286 
287   /* The tree oracle doesn't like bases that are neither decls
288      nor indirect references of SSA names.  */
289   if (!(DECL_P (base)
290 	|| (TREE_CODE (base) == MEM_REF
291 	    && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
292 	|| (TREE_CODE (base) == TARGET_MEM_REF
293 	    && TREE_CODE (TMR_BASE (base)) == SSA_NAME)))
294     return false;
295 
296   /* If this is a reference based on a partitioned decl replace the
297      base with a MEM_REF of the pointer representative we
298      created during stack slot partitioning.  */
299   if (TREE_CODE (base) == VAR_DECL
300       && ! is_global_var (base)
301       && cfun->gimple_df->decls_to_pointers != NULL)
302     {
303       void *namep;
304       namep = pointer_map_contains (cfun->gimple_df->decls_to_pointers, base);
305       if (namep)
306 	ref->base = build_simple_mem_ref (*(tree *)namep);
307     }
308 
309   ref->ref_alias_set = MEM_ALIAS_SET (mem);
310 
311   /* If MEM_OFFSET or MEM_SIZE are unknown what we got from MEM_EXPR
312      is conservative, so trust it.  */
313   if (!MEM_OFFSET_KNOWN_P (mem)
314       || !MEM_SIZE_KNOWN_P (mem))
315     return true;
316 
317   /* If the base decl is a parameter we can have negative MEM_OFFSET in
318      case of promoted subregs on bigendian targets.  Trust the MEM_EXPR
319      here.  */
320   if (MEM_OFFSET (mem) < 0
321       && (MEM_SIZE (mem) + MEM_OFFSET (mem)) * BITS_PER_UNIT == ref->size)
322     return true;
323 
324   /* Otherwise continue and refine size and offset we got from analyzing
325      MEM_EXPR by using MEM_SIZE and MEM_OFFSET.  */
326 
327   ref->offset += MEM_OFFSET (mem) * BITS_PER_UNIT;
328   ref->size = MEM_SIZE (mem) * BITS_PER_UNIT;
329 
330   /* The MEM may extend into adjacent fields, so adjust max_size if
331      necessary.  */
332   if (ref->max_size != -1
333       && ref->size > ref->max_size)
334     ref->max_size = ref->size;
335 
336   /* If MEM_OFFSET and MEM_SIZE get us outside of the base object of
337      the MEM_EXPR punt.  This happens for STRICT_ALIGNMENT targets a lot.  */
338   if (MEM_EXPR (mem) != get_spill_slot_decl (false)
339       && (ref->offset < 0
340 	  || (DECL_P (ref->base)
341 	      && (!host_integerp (DECL_SIZE (ref->base), 1)
342 		  || (TREE_INT_CST_LOW (DECL_SIZE ((ref->base)))
343 		      < (unsigned HOST_WIDE_INT)(ref->offset + ref->size))))))
344     return false;
345 
346   return true;
347 }
348 
349 /* Query the alias-oracle on whether the two memory rtx X and MEM may
350    alias.  If TBAA_P is set also apply TBAA.  Returns true if the
351    two rtxen may alias, false otherwise.  */
352 
353 static bool
354 rtx_refs_may_alias_p (const_rtx x, const_rtx mem, bool tbaa_p)
355 {
356   ao_ref ref1, ref2;
357 
358   if (!ao_ref_from_mem (&ref1, x)
359       || !ao_ref_from_mem (&ref2, mem))
360     return true;
361 
362   return refs_may_alias_p_1 (&ref1, &ref2,
363 			     tbaa_p
364 			     && MEM_ALIAS_SET (x) != 0
365 			     && MEM_ALIAS_SET (mem) != 0);
366 }
367 
368 /* Returns a pointer to the alias set entry for ALIAS_SET, if there is
369    such an entry, or NULL otherwise.  */
370 
371 static inline alias_set_entry
372 get_alias_set_entry (alias_set_type alias_set)
373 {
374   return (*alias_sets)[alias_set];
375 }
376 
377 /* Returns nonzero if the alias sets for MEM1 and MEM2 are such that
378    the two MEMs cannot alias each other.  */
379 
380 static inline int
381 mems_in_disjoint_alias_sets_p (const_rtx mem1, const_rtx mem2)
382 {
383   return (flag_strict_aliasing
384 	  && ! alias_sets_conflict_p (MEM_ALIAS_SET (mem1),
385 				      MEM_ALIAS_SET (mem2)));
386 }
387 
388 /* Insert the NODE into the splay tree given by DATA.  Used by
389    record_alias_subset via splay_tree_foreach.  */
390 
391 static int
392 insert_subset_children (splay_tree_node node, void *data)
393 {
394   splay_tree_insert ((splay_tree) data, node->key, node->value);
395 
396   return 0;
397 }
398 
399 /* Return true if the first alias set is a subset of the second.  */
400 
401 bool
402 alias_set_subset_of (alias_set_type set1, alias_set_type set2)
403 {
404   alias_set_entry ase;
405 
406   /* Everything is a subset of the "aliases everything" set.  */
407   if (set2 == 0)
408     return true;
409 
410   /* Otherwise, check if set1 is a subset of set2.  */
411   ase = get_alias_set_entry (set2);
412   if (ase != 0
413       && (ase->has_zero_child
414 	  || splay_tree_lookup (ase->children,
415 			        (splay_tree_key) set1)))
416     return true;
417   return false;
418 }
419 
420 /* Return 1 if the two specified alias sets may conflict.  */
421 
422 int
423 alias_sets_conflict_p (alias_set_type set1, alias_set_type set2)
424 {
425   alias_set_entry ase;
426 
427   /* The easy case.  */
428   if (alias_sets_must_conflict_p (set1, set2))
429     return 1;
430 
431   /* See if the first alias set is a subset of the second.  */
432   ase = get_alias_set_entry (set1);
433   if (ase != 0
434       && (ase->has_zero_child
435 	  || splay_tree_lookup (ase->children,
436 				(splay_tree_key) set2)))
437     return 1;
438 
439   /* Now do the same, but with the alias sets reversed.  */
440   ase = get_alias_set_entry (set2);
441   if (ase != 0
442       && (ase->has_zero_child
443 	  || splay_tree_lookup (ase->children,
444 				(splay_tree_key) set1)))
445     return 1;
446 
447   /* The two alias sets are distinct and neither one is the
448      child of the other.  Therefore, they cannot conflict.  */
449   return 0;
450 }
451 
452 /* Return 1 if the two specified alias sets will always conflict.  */
453 
454 int
455 alias_sets_must_conflict_p (alias_set_type set1, alias_set_type set2)
456 {
457   if (set1 == 0 || set2 == 0 || set1 == set2)
458     return 1;
459 
460   return 0;
461 }
462 
463 /* Return 1 if any MEM object of type T1 will always conflict (using the
464    dependency routines in this file) with any MEM object of type T2.
465    This is used when allocating temporary storage.  If T1 and/or T2 are
466    NULL_TREE, it means we know nothing about the storage.  */
467 
468 int
469 objects_must_conflict_p (tree t1, tree t2)
470 {
471   alias_set_type set1, set2;
472 
473   /* If neither has a type specified, we don't know if they'll conflict
474      because we may be using them to store objects of various types, for
475      example the argument and local variables areas of inlined functions.  */
476   if (t1 == 0 && t2 == 0)
477     return 0;
478 
479   /* If they are the same type, they must conflict.  */
480   if (t1 == t2
481       /* Likewise if both are volatile.  */
482       || (t1 != 0 && TYPE_VOLATILE (t1) && t2 != 0 && TYPE_VOLATILE (t2)))
483     return 1;
484 
485   set1 = t1 ? get_alias_set (t1) : 0;
486   set2 = t2 ? get_alias_set (t2) : 0;
487 
488   /* We can't use alias_sets_conflict_p because we must make sure
489      that every subtype of t1 will conflict with every subtype of
490      t2 for which a pair of subobjects of these respective subtypes
491      overlaps on the stack.  */
492   return alias_sets_must_conflict_p (set1, set2);
493 }
494 
495 /* Return true if all nested component references handled by
496    get_inner_reference in T are such that we should use the alias set
497    provided by the object at the heart of T.
498 
499    This is true for non-addressable components (which don't have their
500    own alias set), as well as components of objects in alias set zero.
501    This later point is a special case wherein we wish to override the
502    alias set used by the component, but we don't have per-FIELD_DECL
503    assignable alias sets.  */
504 
505 bool
506 component_uses_parent_alias_set (const_tree t)
507 {
508   while (1)
509     {
510       /* If we're at the end, it vacuously uses its own alias set.  */
511       if (!handled_component_p (t))
512 	return false;
513 
514       switch (TREE_CODE (t))
515 	{
516 	case COMPONENT_REF:
517 	  if (DECL_NONADDRESSABLE_P (TREE_OPERAND (t, 1)))
518 	    return true;
519 	  break;
520 
521 	case ARRAY_REF:
522 	case ARRAY_RANGE_REF:
523 	  if (TYPE_NONALIASED_COMPONENT (TREE_TYPE (TREE_OPERAND (t, 0))))
524 	    return true;
525 	  break;
526 
527 	case REALPART_EXPR:
528 	case IMAGPART_EXPR:
529 	  break;
530 
531 	default:
532 	  /* Bitfields and casts are never addressable.  */
533 	  return true;
534 	}
535 
536       t = TREE_OPERAND (t, 0);
537       if (get_alias_set (TREE_TYPE (t)) == 0)
538 	return true;
539     }
540 }
541 
542 /* Return the alias set for the memory pointed to by T, which may be
543    either a type or an expression.  Return -1 if there is nothing
544    special about dereferencing T.  */
545 
546 static alias_set_type
547 get_deref_alias_set_1 (tree t)
548 {
549   /* If we're not doing any alias analysis, just assume everything
550      aliases everything else.  */
551   if (!flag_strict_aliasing)
552     return 0;
553 
554   /* All we care about is the type.  */
555   if (! TYPE_P (t))
556     t = TREE_TYPE (t);
557 
558   /* If we have an INDIRECT_REF via a void pointer, we don't
559      know anything about what that might alias.  Likewise if the
560      pointer is marked that way.  */
561   if (TREE_CODE (TREE_TYPE (t)) == VOID_TYPE
562       || TYPE_REF_CAN_ALIAS_ALL (t))
563     return 0;
564 
565   return -1;
566 }
567 
568 /* Return the alias set for the memory pointed to by T, which may be
569    either a type or an expression.  */
570 
571 alias_set_type
572 get_deref_alias_set (tree t)
573 {
574   alias_set_type set = get_deref_alias_set_1 (t);
575 
576   /* Fall back to the alias-set of the pointed-to type.  */
577   if (set == -1)
578     {
579       if (! TYPE_P (t))
580 	t = TREE_TYPE (t);
581       set = get_alias_set (TREE_TYPE (t));
582     }
583 
584   return set;
585 }
586 
587 /* Return the alias set for T, which may be either a type or an
588    expression.  Call language-specific routine for help, if needed.  */
589 
590 alias_set_type
591 get_alias_set (tree t)
592 {
593   alias_set_type set;
594 
595   /* If we're not doing any alias analysis, just assume everything
596      aliases everything else.  Also return 0 if this or its type is
597      an error.  */
598   if (! flag_strict_aliasing || t == error_mark_node
599       || (! TYPE_P (t)
600 	  && (TREE_TYPE (t) == 0 || TREE_TYPE (t) == error_mark_node)))
601     return 0;
602 
603   /* We can be passed either an expression or a type.  This and the
604      language-specific routine may make mutually-recursive calls to each other
605      to figure out what to do.  At each juncture, we see if this is a tree
606      that the language may need to handle specially.  First handle things that
607      aren't types.  */
608   if (! TYPE_P (t))
609     {
610       tree inner;
611 
612       /* Give the language a chance to do something with this tree
613 	 before we look at it.  */
614       STRIP_NOPS (t);
615       set = lang_hooks.get_alias_set (t);
616       if (set != -1)
617 	return set;
618 
619       /* Get the base object of the reference.  */
620       inner = t;
621       while (handled_component_p (inner))
622 	{
623 	  /* If there is a VIEW_CONVERT_EXPR in the chain we cannot use
624 	     the type of any component references that wrap it to
625 	     determine the alias-set.  */
626 	  if (TREE_CODE (inner) == VIEW_CONVERT_EXPR)
627 	    t = TREE_OPERAND (inner, 0);
628 	  inner = TREE_OPERAND (inner, 0);
629 	}
630 
631       /* Handle pointer dereferences here, they can override the
632 	 alias-set.  */
633       if (INDIRECT_REF_P (inner))
634 	{
635 	  set = get_deref_alias_set_1 (TREE_OPERAND (inner, 0));
636 	  if (set != -1)
637 	    return set;
638 	}
639       else if (TREE_CODE (inner) == TARGET_MEM_REF)
640 	return get_deref_alias_set (TMR_OFFSET (inner));
641       else if (TREE_CODE (inner) == MEM_REF)
642 	{
643 	  set = get_deref_alias_set_1 (TREE_OPERAND (inner, 1));
644 	  if (set != -1)
645 	    return set;
646 	}
647 
648       /* If the innermost reference is a MEM_REF that has a
649 	 conversion embedded treat it like a VIEW_CONVERT_EXPR above,
650 	 using the memory access type for determining the alias-set.  */
651      if (TREE_CODE (inner) == MEM_REF
652 	 && TYPE_MAIN_VARIANT (TREE_TYPE (inner))
653 	    != TYPE_MAIN_VARIANT
654 	       (TREE_TYPE (TREE_TYPE (TREE_OPERAND (inner, 1)))))
655        return get_deref_alias_set (TREE_OPERAND (inner, 1));
656 
657       /* Otherwise, pick up the outermost object that we could have a pointer
658 	 to, processing conversions as above.  */
659       while (component_uses_parent_alias_set (t))
660 	{
661 	  t = TREE_OPERAND (t, 0);
662 	  STRIP_NOPS (t);
663 	}
664 
665       /* If we've already determined the alias set for a decl, just return
666 	 it.  This is necessary for C++ anonymous unions, whose component
667 	 variables don't look like union members (boo!).  */
668       if (TREE_CODE (t) == VAR_DECL
669 	  && DECL_RTL_SET_P (t) && MEM_P (DECL_RTL (t)))
670 	return MEM_ALIAS_SET (DECL_RTL (t));
671 
672       /* Now all we care about is the type.  */
673       t = TREE_TYPE (t);
674     }
675 
676   /* Variant qualifiers don't affect the alias set, so get the main
677      variant.  */
678   t = TYPE_MAIN_VARIANT (t);
679 
680   /* Always use the canonical type as well.  If this is a type that
681      requires structural comparisons to identify compatible types
682      use alias set zero.  */
683   if (TYPE_STRUCTURAL_EQUALITY_P (t))
684     {
685       /* Allow the language to specify another alias set for this
686 	 type.  */
687       set = lang_hooks.get_alias_set (t);
688       if (set != -1)
689 	return set;
690       return 0;
691     }
692 
693   t = TYPE_CANONICAL (t);
694 
695   /* The canonical type should not require structural equality checks.  */
696   gcc_checking_assert (!TYPE_STRUCTURAL_EQUALITY_P (t));
697 
698   /* If this is a type with a known alias set, return it.  */
699   if (TYPE_ALIAS_SET_KNOWN_P (t))
700     return TYPE_ALIAS_SET (t);
701 
702   /* We don't want to set TYPE_ALIAS_SET for incomplete types.  */
703   if (!COMPLETE_TYPE_P (t))
704     {
705       /* For arrays with unknown size the conservative answer is the
706 	 alias set of the element type.  */
707       if (TREE_CODE (t) == ARRAY_TYPE)
708 	return get_alias_set (TREE_TYPE (t));
709 
710       /* But return zero as a conservative answer for incomplete types.  */
711       return 0;
712     }
713 
714   /* See if the language has special handling for this type.  */
715   set = lang_hooks.get_alias_set (t);
716   if (set != -1)
717     return set;
718 
719   /* There are no objects of FUNCTION_TYPE, so there's no point in
720      using up an alias set for them.  (There are, of course, pointers
721      and references to functions, but that's different.)  */
722   else if (TREE_CODE (t) == FUNCTION_TYPE || TREE_CODE (t) == METHOD_TYPE)
723     set = 0;
724 
725   /* Unless the language specifies otherwise, let vector types alias
726      their components.  This avoids some nasty type punning issues in
727      normal usage.  And indeed lets vectors be treated more like an
728      array slice.  */
729   else if (TREE_CODE (t) == VECTOR_TYPE)
730     set = get_alias_set (TREE_TYPE (t));
731 
732   /* Unless the language specifies otherwise, treat array types the
733      same as their components.  This avoids the asymmetry we get
734      through recording the components.  Consider accessing a
735      character(kind=1) through a reference to a character(kind=1)[1:1].
736      Or consider if we want to assign integer(kind=4)[0:D.1387] and
737      integer(kind=4)[4] the same alias set or not.
738      Just be pragmatic here and make sure the array and its element
739      type get the same alias set assigned.  */
740   else if (TREE_CODE (t) == ARRAY_TYPE && !TYPE_NONALIASED_COMPONENT (t))
741     set = get_alias_set (TREE_TYPE (t));
742 
743   /* From the former common C and C++ langhook implementation:
744 
745      Unfortunately, there is no canonical form of a pointer type.
746      In particular, if we have `typedef int I', then `int *', and
747      `I *' are different types.  So, we have to pick a canonical
748      representative.  We do this below.
749 
750      Technically, this approach is actually more conservative that
751      it needs to be.  In particular, `const int *' and `int *'
752      should be in different alias sets, according to the C and C++
753      standard, since their types are not the same, and so,
754      technically, an `int **' and `const int **' cannot point at
755      the same thing.
756 
757      But, the standard is wrong.  In particular, this code is
758      legal C++:
759 
760      int *ip;
761      int **ipp = &ip;
762      const int* const* cipp = ipp;
763      And, it doesn't make sense for that to be legal unless you
764      can dereference IPP and CIPP.  So, we ignore cv-qualifiers on
765      the pointed-to types.  This issue has been reported to the
766      C++ committee.
767 
768      In addition to the above canonicalization issue, with LTO
769      we should also canonicalize `T (*)[]' to `T *' avoiding
770      alias issues with pointer-to element types and pointer-to
771      array types.
772 
773      Likewise we need to deal with the situation of incomplete
774      pointed-to types and make `*(struct X **)&a' and
775      `*(struct X {} **)&a' alias.  Otherwise we will have to
776      guarantee that all pointer-to incomplete type variants
777      will be replaced by pointer-to complete type variants if
778      they are available.
779 
780      With LTO the convenient situation of using `void *' to
781      access and store any pointer type will also become
782      more apparent (and `void *' is just another pointer-to
783      incomplete type).  Assigning alias-set zero to `void *'
784      and all pointer-to incomplete types is a not appealing
785      solution.  Assigning an effective alias-set zero only
786      affecting pointers might be - by recording proper subset
787      relationships of all pointer alias-sets.
788 
789      Pointer-to function types are another grey area which
790      needs caution.  Globbing them all into one alias-set
791      or the above effective zero set would work.
792 
793      For now just assign the same alias-set to all pointers.
794      That's simple and avoids all the above problems.  */
795   else if (POINTER_TYPE_P (t)
796 	   && t != ptr_type_node)
797     set = get_alias_set (ptr_type_node);
798 
799   /* Otherwise make a new alias set for this type.  */
800   else
801     {
802       /* Each canonical type gets its own alias set, so canonical types
803 	 shouldn't form a tree.  It doesn't really matter for types
804 	 we handle specially above, so only check it where it possibly
805 	 would result in a bogus alias set.  */
806       gcc_checking_assert (TYPE_CANONICAL (t) == t);
807 
808       set = new_alias_set ();
809     }
810 
811   TYPE_ALIAS_SET (t) = set;
812 
813   /* If this is an aggregate type or a complex type, we must record any
814      component aliasing information.  */
815   if (AGGREGATE_TYPE_P (t) || TREE_CODE (t) == COMPLEX_TYPE)
816     record_component_aliases (t);
817 
818   return set;
819 }
820 
821 /* Return a brand-new alias set.  */
822 
823 alias_set_type
824 new_alias_set (void)
825 {
826   if (flag_strict_aliasing)
827     {
828       if (alias_sets == 0)
829 	vec_safe_push (alias_sets, (alias_set_entry) 0);
830       vec_safe_push (alias_sets, (alias_set_entry) 0);
831       return alias_sets->length () - 1;
832     }
833   else
834     return 0;
835 }
836 
837 /* Indicate that things in SUBSET can alias things in SUPERSET, but that
838    not everything that aliases SUPERSET also aliases SUBSET.  For example,
839    in C, a store to an `int' can alias a load of a structure containing an
840    `int', and vice versa.  But it can't alias a load of a 'double' member
841    of the same structure.  Here, the structure would be the SUPERSET and
842    `int' the SUBSET.  This relationship is also described in the comment at
843    the beginning of this file.
844 
845    This function should be called only once per SUPERSET/SUBSET pair.
846 
847    It is illegal for SUPERSET to be zero; everything is implicitly a
848    subset of alias set zero.  */
849 
850 void
851 record_alias_subset (alias_set_type superset, alias_set_type subset)
852 {
853   alias_set_entry superset_entry;
854   alias_set_entry subset_entry;
855 
856   /* It is possible in complex type situations for both sets to be the same,
857      in which case we can ignore this operation.  */
858   if (superset == subset)
859     return;
860 
861   gcc_assert (superset);
862 
863   superset_entry = get_alias_set_entry (superset);
864   if (superset_entry == 0)
865     {
866       /* Create an entry for the SUPERSET, so that we have a place to
867 	 attach the SUBSET.  */
868       superset_entry = ggc_alloc_cleared_alias_set_entry_d ();
869       superset_entry->alias_set = superset;
870       superset_entry->children
871 	= splay_tree_new_ggc (splay_tree_compare_ints,
872 			      ggc_alloc_splay_tree_scalar_scalar_splay_tree_s,
873 			      ggc_alloc_splay_tree_scalar_scalar_splay_tree_node_s);
874       superset_entry->has_zero_child = 0;
875       (*alias_sets)[superset] = superset_entry;
876     }
877 
878   if (subset == 0)
879     superset_entry->has_zero_child = 1;
880   else
881     {
882       subset_entry = get_alias_set_entry (subset);
883       /* If there is an entry for the subset, enter all of its children
884 	 (if they are not already present) as children of the SUPERSET.  */
885       if (subset_entry)
886 	{
887 	  if (subset_entry->has_zero_child)
888 	    superset_entry->has_zero_child = 1;
889 
890 	  splay_tree_foreach (subset_entry->children, insert_subset_children,
891 			      superset_entry->children);
892 	}
893 
894       /* Enter the SUBSET itself as a child of the SUPERSET.  */
895       splay_tree_insert (superset_entry->children,
896 			 (splay_tree_key) subset, 0);
897     }
898 }
899 
900 /* Record that component types of TYPE, if any, are part of that type for
901    aliasing purposes.  For record types, we only record component types
902    for fields that are not marked non-addressable.  For array types, we
903    only record the component type if it is not marked non-aliased.  */
904 
905 void
906 record_component_aliases (tree type)
907 {
908   alias_set_type superset = get_alias_set (type);
909   tree field;
910 
911   if (superset == 0)
912     return;
913 
914   switch (TREE_CODE (type))
915     {
916     case RECORD_TYPE:
917     case UNION_TYPE:
918     case QUAL_UNION_TYPE:
919       /* Recursively record aliases for the base classes, if there are any.  */
920       if (TYPE_BINFO (type))
921 	{
922 	  int i;
923 	  tree binfo, base_binfo;
924 
925 	  for (binfo = TYPE_BINFO (type), i = 0;
926 	       BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
927 	    record_alias_subset (superset,
928 				 get_alias_set (BINFO_TYPE (base_binfo)));
929 	}
930       for (field = TYPE_FIELDS (type); field != 0; field = DECL_CHAIN (field))
931 	if (TREE_CODE (field) == FIELD_DECL && !DECL_NONADDRESSABLE_P (field))
932 	  record_alias_subset (superset, get_alias_set (TREE_TYPE (field)));
933       break;
934 
935     case COMPLEX_TYPE:
936       record_alias_subset (superset, get_alias_set (TREE_TYPE (type)));
937       break;
938 
939     /* VECTOR_TYPE and ARRAY_TYPE share the alias set with their
940        element type.  */
941 
942     default:
943       break;
944     }
945 }
946 
947 /* Allocate an alias set for use in storing and reading from the varargs
948    spill area.  */
949 
950 static GTY(()) alias_set_type varargs_set = -1;
951 
952 alias_set_type
953 get_varargs_alias_set (void)
954 {
955 #if 1
956   /* We now lower VA_ARG_EXPR, and there's currently no way to attach the
957      varargs alias set to an INDIRECT_REF (FIXME!), so we can't
958      consistently use the varargs alias set for loads from the varargs
959      area.  So don't use it anywhere.  */
960   return 0;
961 #else
962   if (varargs_set == -1)
963     varargs_set = new_alias_set ();
964 
965   return varargs_set;
966 #endif
967 }
968 
969 /* Likewise, but used for the fixed portions of the frame, e.g., register
970    save areas.  */
971 
972 static GTY(()) alias_set_type frame_set = -1;
973 
974 alias_set_type
975 get_frame_alias_set (void)
976 {
977   if (frame_set == -1)
978     frame_set = new_alias_set ();
979 
980   return frame_set;
981 }
982 
983 /* Create a new, unique base with id ID.  */
984 
985 static rtx
986 unique_base_value (HOST_WIDE_INT id)
987 {
988   return gen_rtx_ADDRESS (Pmode, id);
989 }
990 
991 /* Return true if accesses based on any other base value cannot alias
992    those based on X.  */
993 
994 static bool
995 unique_base_value_p (rtx x)
996 {
997   return GET_CODE (x) == ADDRESS && GET_MODE (x) == Pmode;
998 }
999 
1000 /* Return true if X is known to be a base value.  */
1001 
1002 static bool
1003 known_base_value_p (rtx x)
1004 {
1005   switch (GET_CODE (x))
1006     {
1007     case LABEL_REF:
1008     case SYMBOL_REF:
1009       return true;
1010 
1011     case ADDRESS:
1012       /* Arguments may or may not be bases; we don't know for sure.  */
1013       return GET_MODE (x) != VOIDmode;
1014 
1015     default:
1016       return false;
1017     }
1018 }
1019 
1020 /* Inside SRC, the source of a SET, find a base address.  */
1021 
1022 static rtx
1023 find_base_value (rtx src)
1024 {
1025   unsigned int regno;
1026 
1027 #if defined (FIND_BASE_TERM)
1028   /* Try machine-dependent ways to find the base term.  */
1029   src = FIND_BASE_TERM (src);
1030 #endif
1031 
1032   switch (GET_CODE (src))
1033     {
1034     case SYMBOL_REF:
1035     case LABEL_REF:
1036       return src;
1037 
1038     case REG:
1039       regno = REGNO (src);
1040       /* At the start of a function, argument registers have known base
1041 	 values which may be lost later.  Returning an ADDRESS
1042 	 expression here allows optimization based on argument values
1043 	 even when the argument registers are used for other purposes.  */
1044       if (regno < FIRST_PSEUDO_REGISTER && copying_arguments)
1045 	return new_reg_base_value[regno];
1046 
1047       /* If a pseudo has a known base value, return it.  Do not do this
1048 	 for non-fixed hard regs since it can result in a circular
1049 	 dependency chain for registers which have values at function entry.
1050 
1051 	 The test above is not sufficient because the scheduler may move
1052 	 a copy out of an arg reg past the NOTE_INSN_FUNCTION_BEGIN.  */
1053       if ((regno >= FIRST_PSEUDO_REGISTER || fixed_regs[regno])
1054 	  && regno < vec_safe_length (reg_base_value))
1055 	{
1056 	  /* If we're inside init_alias_analysis, use new_reg_base_value
1057 	     to reduce the number of relaxation iterations.  */
1058 	  if (new_reg_base_value && new_reg_base_value[regno]
1059 	      && DF_REG_DEF_COUNT (regno) == 1)
1060 	    return new_reg_base_value[regno];
1061 
1062 	  if ((*reg_base_value)[regno])
1063 	    return (*reg_base_value)[regno];
1064 	}
1065 
1066       return 0;
1067 
1068     case MEM:
1069       /* Check for an argument passed in memory.  Only record in the
1070 	 copying-arguments block; it is too hard to track changes
1071 	 otherwise.  */
1072       if (copying_arguments
1073 	  && (XEXP (src, 0) == arg_pointer_rtx
1074 	      || (GET_CODE (XEXP (src, 0)) == PLUS
1075 		  && XEXP (XEXP (src, 0), 0) == arg_pointer_rtx)))
1076 	return arg_base_value;
1077       return 0;
1078 
1079     case CONST:
1080       src = XEXP (src, 0);
1081       if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS)
1082 	break;
1083 
1084       /* ... fall through ...  */
1085 
1086     case PLUS:
1087     case MINUS:
1088       {
1089 	rtx temp, src_0 = XEXP (src, 0), src_1 = XEXP (src, 1);
1090 
1091 	/* If either operand is a REG that is a known pointer, then it
1092 	   is the base.  */
1093 	if (REG_P (src_0) && REG_POINTER (src_0))
1094 	  return find_base_value (src_0);
1095 	if (REG_P (src_1) && REG_POINTER (src_1))
1096 	  return find_base_value (src_1);
1097 
1098 	/* If either operand is a REG, then see if we already have
1099 	   a known value for it.  */
1100 	if (REG_P (src_0))
1101 	  {
1102 	    temp = find_base_value (src_0);
1103 	    if (temp != 0)
1104 	      src_0 = temp;
1105 	  }
1106 
1107 	if (REG_P (src_1))
1108 	  {
1109 	    temp = find_base_value (src_1);
1110 	    if (temp!= 0)
1111 	      src_1 = temp;
1112 	  }
1113 
1114 	/* If either base is named object or a special address
1115 	   (like an argument or stack reference), then use it for the
1116 	   base term.  */
1117 	if (src_0 != 0 && known_base_value_p (src_0))
1118 	  return src_0;
1119 
1120 	if (src_1 != 0 && known_base_value_p (src_1))
1121 	  return src_1;
1122 
1123 	/* Guess which operand is the base address:
1124 	   If either operand is a symbol, then it is the base.  If
1125 	   either operand is a CONST_INT, then the other is the base.  */
1126 	if (CONST_INT_P (src_1) || CONSTANT_P (src_0))
1127 	  return find_base_value (src_0);
1128 	else if (CONST_INT_P (src_0) || CONSTANT_P (src_1))
1129 	  return find_base_value (src_1);
1130 
1131 	return 0;
1132       }
1133 
1134     case LO_SUM:
1135       /* The standard form is (lo_sum reg sym) so look only at the
1136 	 second operand.  */
1137       return find_base_value (XEXP (src, 1));
1138 
1139     case AND:
1140       /* If the second operand is constant set the base
1141 	 address to the first operand.  */
1142       if (CONST_INT_P (XEXP (src, 1)) && INTVAL (XEXP (src, 1)) != 0)
1143 	return find_base_value (XEXP (src, 0));
1144       return 0;
1145 
1146     case TRUNCATE:
1147       /* As we do not know which address space the pointer is referring to, we can
1148 	 handle this only if the target does not support different pointer or
1149 	 address modes depending on the address space.  */
1150       if (!target_default_pointer_address_modes_p ())
1151 	break;
1152       if (GET_MODE_SIZE (GET_MODE (src)) < GET_MODE_SIZE (Pmode))
1153 	break;
1154       /* Fall through.  */
1155     case HIGH:
1156     case PRE_INC:
1157     case PRE_DEC:
1158     case POST_INC:
1159     case POST_DEC:
1160     case PRE_MODIFY:
1161     case POST_MODIFY:
1162       return find_base_value (XEXP (src, 0));
1163 
1164     case ZERO_EXTEND:
1165     case SIGN_EXTEND:	/* used for NT/Alpha pointers */
1166       /* As we do not know which address space the pointer is referring to, we can
1167 	 handle this only if the target does not support different pointer or
1168 	 address modes depending on the address space.  */
1169       if (!target_default_pointer_address_modes_p ())
1170 	break;
1171 
1172       {
1173 	rtx temp = find_base_value (XEXP (src, 0));
1174 
1175 	if (temp != 0 && CONSTANT_P (temp))
1176 	  temp = convert_memory_address (Pmode, temp);
1177 
1178 	return temp;
1179       }
1180 
1181     default:
1182       break;
1183     }
1184 
1185   return 0;
1186 }
1187 
1188 /* Called from init_alias_analysis indirectly through note_stores,
1189    or directly if DEST is a register with a REG_NOALIAS note attached.
1190    SET is null in the latter case.  */
1191 
1192 /* While scanning insns to find base values, reg_seen[N] is nonzero if
1193    register N has been set in this function.  */
1194 static sbitmap reg_seen;
1195 
1196 static void
1197 record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED)
1198 {
1199   unsigned regno;
1200   rtx src;
1201   int n;
1202 
1203   if (!REG_P (dest))
1204     return;
1205 
1206   regno = REGNO (dest);
1207 
1208   gcc_checking_assert (regno < reg_base_value->length ());
1209 
1210   /* If this spans multiple hard registers, then we must indicate that every
1211      register has an unusable value.  */
1212   if (regno < FIRST_PSEUDO_REGISTER)
1213     n = hard_regno_nregs[regno][GET_MODE (dest)];
1214   else
1215     n = 1;
1216   if (n != 1)
1217     {
1218       while (--n >= 0)
1219 	{
1220 	  bitmap_set_bit (reg_seen, regno + n);
1221 	  new_reg_base_value[regno + n] = 0;
1222 	}
1223       return;
1224     }
1225 
1226   if (set)
1227     {
1228       /* A CLOBBER wipes out any old value but does not prevent a previously
1229 	 unset register from acquiring a base address (i.e. reg_seen is not
1230 	 set).  */
1231       if (GET_CODE (set) == CLOBBER)
1232 	{
1233 	  new_reg_base_value[regno] = 0;
1234 	  return;
1235 	}
1236       src = SET_SRC (set);
1237     }
1238   else
1239     {
1240       /* There's a REG_NOALIAS note against DEST.  */
1241       if (bitmap_bit_p (reg_seen, regno))
1242 	{
1243 	  new_reg_base_value[regno] = 0;
1244 	  return;
1245 	}
1246       bitmap_set_bit (reg_seen, regno);
1247       new_reg_base_value[regno] = unique_base_value (unique_id++);
1248       return;
1249     }
1250 
1251   /* If this is not the first set of REGNO, see whether the new value
1252      is related to the old one.  There are two cases of interest:
1253 
1254 	(1) The register might be assigned an entirely new value
1255 	    that has the same base term as the original set.
1256 
1257 	(2) The set might be a simple self-modification that
1258 	    cannot change REGNO's base value.
1259 
1260      If neither case holds, reject the original base value as invalid.
1261      Note that the following situation is not detected:
1262 
1263 	 extern int x, y;  int *p = &x; p += (&y-&x);
1264 
1265      ANSI C does not allow computing the difference of addresses
1266      of distinct top level objects.  */
1267   if (new_reg_base_value[regno] != 0
1268       && find_base_value (src) != new_reg_base_value[regno])
1269     switch (GET_CODE (src))
1270       {
1271       case LO_SUM:
1272       case MINUS:
1273 	if (XEXP (src, 0) != dest && XEXP (src, 1) != dest)
1274 	  new_reg_base_value[regno] = 0;
1275 	break;
1276       case PLUS:
1277 	/* If the value we add in the PLUS is also a valid base value,
1278 	   this might be the actual base value, and the original value
1279 	   an index.  */
1280 	{
1281 	  rtx other = NULL_RTX;
1282 
1283 	  if (XEXP (src, 0) == dest)
1284 	    other = XEXP (src, 1);
1285 	  else if (XEXP (src, 1) == dest)
1286 	    other = XEXP (src, 0);
1287 
1288 	  if (! other || find_base_value (other))
1289 	    new_reg_base_value[regno] = 0;
1290 	  break;
1291 	}
1292       case AND:
1293 	if (XEXP (src, 0) != dest || !CONST_INT_P (XEXP (src, 1)))
1294 	  new_reg_base_value[regno] = 0;
1295 	break;
1296       default:
1297 	new_reg_base_value[regno] = 0;
1298 	break;
1299       }
1300   /* If this is the first set of a register, record the value.  */
1301   else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno])
1302 	   && ! bitmap_bit_p (reg_seen, regno) && new_reg_base_value[regno] == 0)
1303     new_reg_base_value[regno] = find_base_value (src);
1304 
1305   bitmap_set_bit (reg_seen, regno);
1306 }
1307 
1308 /* Return REG_BASE_VALUE for REGNO.  Selective scheduler uses this to avoid
1309    using hard registers with non-null REG_BASE_VALUE for renaming.  */
1310 rtx
1311 get_reg_base_value (unsigned int regno)
1312 {
1313   return (*reg_base_value)[regno];
1314 }
1315 
1316 /* If a value is known for REGNO, return it.  */
1317 
1318 rtx
1319 get_reg_known_value (unsigned int regno)
1320 {
1321   if (regno >= FIRST_PSEUDO_REGISTER)
1322     {
1323       regno -= FIRST_PSEUDO_REGISTER;
1324       if (regno < vec_safe_length (reg_known_value))
1325 	return (*reg_known_value)[regno];
1326     }
1327   return NULL;
1328 }
1329 
1330 /* Set it.  */
1331 
1332 static void
1333 set_reg_known_value (unsigned int regno, rtx val)
1334 {
1335   if (regno >= FIRST_PSEUDO_REGISTER)
1336     {
1337       regno -= FIRST_PSEUDO_REGISTER;
1338       if (regno < vec_safe_length (reg_known_value))
1339 	(*reg_known_value)[regno] = val;
1340     }
1341 }
1342 
1343 /* Similarly for reg_known_equiv_p.  */
1344 
1345 bool
1346 get_reg_known_equiv_p (unsigned int regno)
1347 {
1348   if (regno >= FIRST_PSEUDO_REGISTER)
1349     {
1350       regno -= FIRST_PSEUDO_REGISTER;
1351       if (regno < vec_safe_length (reg_known_value))
1352 	return bitmap_bit_p (reg_known_equiv_p, regno);
1353     }
1354   return false;
1355 }
1356 
1357 static void
1358 set_reg_known_equiv_p (unsigned int regno, bool val)
1359 {
1360   if (regno >= FIRST_PSEUDO_REGISTER)
1361     {
1362       regno -= FIRST_PSEUDO_REGISTER;
1363       if (regno < vec_safe_length (reg_known_value))
1364 	{
1365 	  if (val)
1366 	    bitmap_set_bit (reg_known_equiv_p, regno);
1367 	  else
1368 	    bitmap_clear_bit (reg_known_equiv_p, regno);
1369 	}
1370     }
1371 }
1372 
1373 
1374 /* Returns a canonical version of X, from the point of view alias
1375    analysis.  (For example, if X is a MEM whose address is a register,
1376    and the register has a known value (say a SYMBOL_REF), then a MEM
1377    whose address is the SYMBOL_REF is returned.)  */
1378 
1379 rtx
1380 canon_rtx (rtx x)
1381 {
1382   /* Recursively look for equivalences.  */
1383   if (REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1384     {
1385       rtx t = get_reg_known_value (REGNO (x));
1386       if (t == x)
1387 	return x;
1388       if (t)
1389 	return canon_rtx (t);
1390     }
1391 
1392   if (GET_CODE (x) == PLUS)
1393     {
1394       rtx x0 = canon_rtx (XEXP (x, 0));
1395       rtx x1 = canon_rtx (XEXP (x, 1));
1396 
1397       if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
1398 	{
1399 	  if (CONST_INT_P (x0))
1400 	    return plus_constant (GET_MODE (x), x1, INTVAL (x0));
1401 	  else if (CONST_INT_P (x1))
1402 	    return plus_constant (GET_MODE (x), x0, INTVAL (x1));
1403 	  return gen_rtx_PLUS (GET_MODE (x), x0, x1);
1404 	}
1405     }
1406 
1407   /* This gives us much better alias analysis when called from
1408      the loop optimizer.   Note we want to leave the original
1409      MEM alone, but need to return the canonicalized MEM with
1410      all the flags with their original values.  */
1411   else if (MEM_P (x))
1412     x = replace_equiv_address_nv (x, canon_rtx (XEXP (x, 0)));
1413 
1414   return x;
1415 }
1416 
1417 /* Return 1 if X and Y are identical-looking rtx's.
1418    Expect that X and Y has been already canonicalized.
1419 
1420    We use the data in reg_known_value above to see if two registers with
1421    different numbers are, in fact, equivalent.  */
1422 
1423 static int
1424 rtx_equal_for_memref_p (const_rtx x, const_rtx y)
1425 {
1426   int i;
1427   int j;
1428   enum rtx_code code;
1429   const char *fmt;
1430 
1431   if (x == 0 && y == 0)
1432     return 1;
1433   if (x == 0 || y == 0)
1434     return 0;
1435 
1436   if (x == y)
1437     return 1;
1438 
1439   code = GET_CODE (x);
1440   /* Rtx's of different codes cannot be equal.  */
1441   if (code != GET_CODE (y))
1442     return 0;
1443 
1444   /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1445      (REG:SI x) and (REG:HI x) are NOT equivalent.  */
1446 
1447   if (GET_MODE (x) != GET_MODE (y))
1448     return 0;
1449 
1450   /* Some RTL can be compared without a recursive examination.  */
1451   switch (code)
1452     {
1453     case REG:
1454       return REGNO (x) == REGNO (y);
1455 
1456     case LABEL_REF:
1457       return XEXP (x, 0) == XEXP (y, 0);
1458 
1459     case SYMBOL_REF:
1460       return XSTR (x, 0) == XSTR (y, 0);
1461 
1462     case ENTRY_VALUE:
1463       /* This is magic, don't go through canonicalization et al.  */
1464       return rtx_equal_p (ENTRY_VALUE_EXP (x), ENTRY_VALUE_EXP (y));
1465 
1466     case VALUE:
1467     CASE_CONST_UNIQUE:
1468       /* There's no need to compare the contents of CONST_DOUBLEs or
1469 	 CONST_INTs because pointer equality is a good enough
1470 	 comparison for these nodes.  */
1471       return 0;
1472 
1473     default:
1474       break;
1475     }
1476 
1477   /* canon_rtx knows how to handle plus.  No need to canonicalize.  */
1478   if (code == PLUS)
1479     return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
1480 	     && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)))
1481 	    || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1))
1482 		&& rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0))));
1483   /* For commutative operations, the RTX match if the operand match in any
1484      order.  Also handle the simple binary and unary cases without a loop.  */
1485   if (COMMUTATIVE_P (x))
1486     {
1487       rtx xop0 = canon_rtx (XEXP (x, 0));
1488       rtx yop0 = canon_rtx (XEXP (y, 0));
1489       rtx yop1 = canon_rtx (XEXP (y, 1));
1490 
1491       return ((rtx_equal_for_memref_p (xop0, yop0)
1492 	       && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop1))
1493 	      || (rtx_equal_for_memref_p (xop0, yop1)
1494 		  && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop0)));
1495     }
1496   else if (NON_COMMUTATIVE_P (x))
1497     {
1498       return (rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1499 				      canon_rtx (XEXP (y, 0)))
1500 	      && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)),
1501 					 canon_rtx (XEXP (y, 1))));
1502     }
1503   else if (UNARY_P (x))
1504     return rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1505 				   canon_rtx (XEXP (y, 0)));
1506 
1507   /* Compare the elements.  If any pair of corresponding elements
1508      fail to match, return 0 for the whole things.
1509 
1510      Limit cases to types which actually appear in addresses.  */
1511 
1512   fmt = GET_RTX_FORMAT (code);
1513   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1514     {
1515       switch (fmt[i])
1516 	{
1517 	case 'i':
1518 	  if (XINT (x, i) != XINT (y, i))
1519 	    return 0;
1520 	  break;
1521 
1522 	case 'E':
1523 	  /* Two vectors must have the same length.  */
1524 	  if (XVECLEN (x, i) != XVECLEN (y, i))
1525 	    return 0;
1526 
1527 	  /* And the corresponding elements must match.  */
1528 	  for (j = 0; j < XVECLEN (x, i); j++)
1529 	    if (rtx_equal_for_memref_p (canon_rtx (XVECEXP (x, i, j)),
1530 					canon_rtx (XVECEXP (y, i, j))) == 0)
1531 	      return 0;
1532 	  break;
1533 
1534 	case 'e':
1535 	  if (rtx_equal_for_memref_p (canon_rtx (XEXP (x, i)),
1536 				      canon_rtx (XEXP (y, i))) == 0)
1537 	    return 0;
1538 	  break;
1539 
1540 	  /* This can happen for asm operands.  */
1541 	case 's':
1542 	  if (strcmp (XSTR (x, i), XSTR (y, i)))
1543 	    return 0;
1544 	  break;
1545 
1546 	/* This can happen for an asm which clobbers memory.  */
1547 	case '0':
1548 	  break;
1549 
1550 	  /* It is believed that rtx's at this level will never
1551 	     contain anything but integers and other rtx's,
1552 	     except for within LABEL_REFs and SYMBOL_REFs.  */
1553 	default:
1554 	  gcc_unreachable ();
1555 	}
1556     }
1557   return 1;
1558 }
1559 
1560 static rtx
1561 find_base_term (rtx x)
1562 {
1563   cselib_val *val;
1564   struct elt_loc_list *l, *f;
1565   rtx ret;
1566 
1567 #if defined (FIND_BASE_TERM)
1568   /* Try machine-dependent ways to find the base term.  */
1569   x = FIND_BASE_TERM (x);
1570 #endif
1571 
1572   switch (GET_CODE (x))
1573     {
1574     case REG:
1575       return REG_BASE_VALUE (x);
1576 
1577     case TRUNCATE:
1578       /* As we do not know which address space the pointer is referring to, we can
1579 	 handle this only if the target does not support different pointer or
1580 	 address modes depending on the address space.  */
1581       if (!target_default_pointer_address_modes_p ())
1582 	return 0;
1583       if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (Pmode))
1584 	return 0;
1585       /* Fall through.  */
1586     case HIGH:
1587     case PRE_INC:
1588     case PRE_DEC:
1589     case POST_INC:
1590     case POST_DEC:
1591     case PRE_MODIFY:
1592     case POST_MODIFY:
1593       return find_base_term (XEXP (x, 0));
1594 
1595     case ZERO_EXTEND:
1596     case SIGN_EXTEND:	/* Used for Alpha/NT pointers */
1597       /* As we do not know which address space the pointer is referring to, we can
1598 	 handle this only if the target does not support different pointer or
1599 	 address modes depending on the address space.  */
1600       if (!target_default_pointer_address_modes_p ())
1601 	return 0;
1602 
1603       {
1604 	rtx temp = find_base_term (XEXP (x, 0));
1605 
1606 	if (temp != 0 && CONSTANT_P (temp))
1607 	  temp = convert_memory_address (Pmode, temp);
1608 
1609 	return temp;
1610       }
1611 
1612     case VALUE:
1613       val = CSELIB_VAL_PTR (x);
1614       ret = NULL_RTX;
1615 
1616       if (!val)
1617 	return ret;
1618 
1619       if (cselib_sp_based_value_p (val))
1620 	return static_reg_base_value[STACK_POINTER_REGNUM];
1621 
1622       f = val->locs;
1623       /* Temporarily reset val->locs to avoid infinite recursion.  */
1624       val->locs = NULL;
1625 
1626       for (l = f; l; l = l->next)
1627 	if (GET_CODE (l->loc) == VALUE
1628 	    && CSELIB_VAL_PTR (l->loc)->locs
1629 	    && !CSELIB_VAL_PTR (l->loc)->locs->next
1630 	    && CSELIB_VAL_PTR (l->loc)->locs->loc == x)
1631 	  continue;
1632 	else if ((ret = find_base_term (l->loc)) != 0)
1633 	  break;
1634 
1635       val->locs = f;
1636       return ret;
1637 
1638     case LO_SUM:
1639       /* The standard form is (lo_sum reg sym) so look only at the
1640          second operand.  */
1641       return find_base_term (XEXP (x, 1));
1642 
1643     case CONST:
1644       x = XEXP (x, 0);
1645       if (GET_CODE (x) != PLUS && GET_CODE (x) != MINUS)
1646 	return 0;
1647       /* Fall through.  */
1648     case PLUS:
1649     case MINUS:
1650       {
1651 	rtx tmp1 = XEXP (x, 0);
1652 	rtx tmp2 = XEXP (x, 1);
1653 
1654 	/* This is a little bit tricky since we have to determine which of
1655 	   the two operands represents the real base address.  Otherwise this
1656 	   routine may return the index register instead of the base register.
1657 
1658 	   That may cause us to believe no aliasing was possible, when in
1659 	   fact aliasing is possible.
1660 
1661 	   We use a few simple tests to guess the base register.  Additional
1662 	   tests can certainly be added.  For example, if one of the operands
1663 	   is a shift or multiply, then it must be the index register and the
1664 	   other operand is the base register.  */
1665 
1666 	if (tmp1 == pic_offset_table_rtx && CONSTANT_P (tmp2))
1667 	  return find_base_term (tmp2);
1668 
1669 	/* If either operand is known to be a pointer, then prefer it
1670 	   to determine the base term.  */
1671 	if (REG_P (tmp1) && REG_POINTER (tmp1))
1672 	  ;
1673 	else if (REG_P (tmp2) && REG_POINTER (tmp2))
1674 	  {
1675 	    rtx tem = tmp1;
1676 	    tmp1 = tmp2;
1677 	    tmp2 = tem;
1678 	  }
1679 
1680 	/* Go ahead and find the base term for both operands.  If either base
1681 	   term is from a pointer or is a named object or a special address
1682 	   (like an argument or stack reference), then use it for the
1683 	   base term.  */
1684 	tmp1 = find_base_term (tmp1);
1685 	if (tmp1 != NULL_RTX
1686 	    && ((REG_P (tmp1) && REG_POINTER (tmp1))
1687 		 || known_base_value_p (tmp1)))
1688 	  return tmp1;
1689 	tmp2 = find_base_term (tmp2);
1690 	if (tmp2 != NULL_RTX
1691 	    && ((REG_P (tmp2) && REG_POINTER (tmp2))
1692 		 || known_base_value_p (tmp2)))
1693 	  return tmp2;
1694 
1695 	/* We could not determine which of the two operands was the
1696 	   base register and which was the index.  So we can determine
1697 	   nothing from the base alias check.  */
1698 	return 0;
1699       }
1700 
1701     case AND:
1702       if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) != 0)
1703 	return find_base_term (XEXP (x, 0));
1704       return 0;
1705 
1706     case SYMBOL_REF:
1707     case LABEL_REF:
1708       return x;
1709 
1710     default:
1711       return 0;
1712     }
1713 }
1714 
1715 /* Return true if accesses to address X may alias accesses based
1716    on the stack pointer.  */
1717 
1718 bool
1719 may_be_sp_based_p (rtx x)
1720 {
1721   rtx base = find_base_term (x);
1722   return !base || base == static_reg_base_value[STACK_POINTER_REGNUM];
1723 }
1724 
1725 /* Return 0 if the addresses X and Y are known to point to different
1726    objects, 1 if they might be pointers to the same object.  */
1727 
1728 static int
1729 base_alias_check (rtx x, rtx x_base, rtx y, rtx y_base,
1730 		  enum machine_mode x_mode, enum machine_mode y_mode)
1731 {
1732   /* If the address itself has no known base see if a known equivalent
1733      value has one.  If either address still has no known base, nothing
1734      is known about aliasing.  */
1735   if (x_base == 0)
1736     {
1737       rtx x_c;
1738 
1739       if (! flag_expensive_optimizations || (x_c = canon_rtx (x)) == x)
1740 	return 1;
1741 
1742       x_base = find_base_term (x_c);
1743       if (x_base == 0)
1744 	return 1;
1745     }
1746 
1747   if (y_base == 0)
1748     {
1749       rtx y_c;
1750       if (! flag_expensive_optimizations || (y_c = canon_rtx (y)) == y)
1751 	return 1;
1752 
1753       y_base = find_base_term (y_c);
1754       if (y_base == 0)
1755 	return 1;
1756     }
1757 
1758   /* If the base addresses are equal nothing is known about aliasing.  */
1759   if (rtx_equal_p (x_base, y_base))
1760     return 1;
1761 
1762   /* The base addresses are different expressions.  If they are not accessed
1763      via AND, there is no conflict.  We can bring knowledge of object
1764      alignment into play here.  For example, on alpha, "char a, b;" can
1765      alias one another, though "char a; long b;" cannot.  AND addesses may
1766      implicitly alias surrounding objects; i.e. unaligned access in DImode
1767      via AND address can alias all surrounding object types except those
1768      with aligment 8 or higher.  */
1769   if (GET_CODE (x) == AND && GET_CODE (y) == AND)
1770     return 1;
1771   if (GET_CODE (x) == AND
1772       && (!CONST_INT_P (XEXP (x, 1))
1773 	  || (int) GET_MODE_UNIT_SIZE (y_mode) < -INTVAL (XEXP (x, 1))))
1774     return 1;
1775   if (GET_CODE (y) == AND
1776       && (!CONST_INT_P (XEXP (y, 1))
1777 	  || (int) GET_MODE_UNIT_SIZE (x_mode) < -INTVAL (XEXP (y, 1))))
1778     return 1;
1779 
1780   /* Differing symbols not accessed via AND never alias.  */
1781   if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS)
1782     return 0;
1783 
1784   if (unique_base_value_p (x_base) || unique_base_value_p (y_base))
1785     return 0;
1786 
1787   return 1;
1788 }
1789 
1790 /* Callback for for_each_rtx, that returns 1 upon encountering a VALUE
1791    whose UID is greater than the int uid that D points to.  */
1792 
1793 static int
1794 refs_newer_value_cb (rtx *x, void *d)
1795 {
1796   if (GET_CODE (*x) == VALUE && CSELIB_VAL_PTR (*x)->uid > *(int *)d)
1797     return 1;
1798 
1799   return 0;
1800 }
1801 
1802 /* Return TRUE if EXPR refers to a VALUE whose uid is greater than
1803    that of V.  */
1804 
1805 static bool
1806 refs_newer_value_p (rtx expr, rtx v)
1807 {
1808   int minuid = CSELIB_VAL_PTR (v)->uid;
1809 
1810   return for_each_rtx (&expr, refs_newer_value_cb, &minuid);
1811 }
1812 
1813 /* Convert the address X into something we can use.  This is done by returning
1814    it unchanged unless it is a value; in the latter case we call cselib to get
1815    a more useful rtx.  */
1816 
1817 rtx
1818 get_addr (rtx x)
1819 {
1820   cselib_val *v;
1821   struct elt_loc_list *l;
1822 
1823   if (GET_CODE (x) != VALUE)
1824     return x;
1825   v = CSELIB_VAL_PTR (x);
1826   if (v)
1827     {
1828       bool have_equivs = cselib_have_permanent_equivalences ();
1829       if (have_equivs)
1830 	v = canonical_cselib_val (v);
1831       for (l = v->locs; l; l = l->next)
1832 	if (CONSTANT_P (l->loc))
1833 	  return l->loc;
1834       for (l = v->locs; l; l = l->next)
1835 	if (!REG_P (l->loc) && !MEM_P (l->loc)
1836 	    /* Avoid infinite recursion when potentially dealing with
1837 	       var-tracking artificial equivalences, by skipping the
1838 	       equivalences themselves, and not choosing expressions
1839 	       that refer to newer VALUEs.  */
1840 	    && (!have_equivs
1841 		|| (GET_CODE (l->loc) != VALUE
1842 		    && !refs_newer_value_p (l->loc, x))))
1843 	  return l->loc;
1844       if (have_equivs)
1845 	{
1846 	  for (l = v->locs; l; l = l->next)
1847 	    if (REG_P (l->loc)
1848 		|| (GET_CODE (l->loc) != VALUE
1849 		    && !refs_newer_value_p (l->loc, x)))
1850 	      return l->loc;
1851 	  /* Return the canonical value.  */
1852 	  return v->val_rtx;
1853 	}
1854       if (v->locs)
1855 	return v->locs->loc;
1856     }
1857   return x;
1858 }
1859 
1860 /*  Return the address of the (N_REFS + 1)th memory reference to ADDR
1861     where SIZE is the size in bytes of the memory reference.  If ADDR
1862     is not modified by the memory reference then ADDR is returned.  */
1863 
1864 static rtx
1865 addr_side_effect_eval (rtx addr, int size, int n_refs)
1866 {
1867   int offset = 0;
1868 
1869   switch (GET_CODE (addr))
1870     {
1871     case PRE_INC:
1872       offset = (n_refs + 1) * size;
1873       break;
1874     case PRE_DEC:
1875       offset = -(n_refs + 1) * size;
1876       break;
1877     case POST_INC:
1878       offset = n_refs * size;
1879       break;
1880     case POST_DEC:
1881       offset = -n_refs * size;
1882       break;
1883 
1884     default:
1885       return addr;
1886     }
1887 
1888   if (offset)
1889     addr = gen_rtx_PLUS (GET_MODE (addr), XEXP (addr, 0),
1890 			 GEN_INT (offset));
1891   else
1892     addr = XEXP (addr, 0);
1893   addr = canon_rtx (addr);
1894 
1895   return addr;
1896 }
1897 
1898 /* Return TRUE if an object X sized at XSIZE bytes and another object
1899    Y sized at YSIZE bytes, starting C bytes after X, may overlap.  If
1900    any of the sizes is zero, assume an overlap, otherwise use the
1901    absolute value of the sizes as the actual sizes.  */
1902 
1903 static inline bool
1904 offset_overlap_p (HOST_WIDE_INT c, int xsize, int ysize)
1905 {
1906   return (xsize == 0 || ysize == 0
1907 	  || (c >= 0
1908 	      ? (abs (xsize) > c)
1909 	      : (abs (ysize) > -c)));
1910 }
1911 
1912 /* Return one if X and Y (memory addresses) reference the
1913    same location in memory or if the references overlap.
1914    Return zero if they do not overlap, else return
1915    minus one in which case they still might reference the same location.
1916 
1917    C is an offset accumulator.  When
1918    C is nonzero, we are testing aliases between X and Y + C.
1919    XSIZE is the size in bytes of the X reference,
1920    similarly YSIZE is the size in bytes for Y.
1921    Expect that canon_rtx has been already called for X and Y.
1922 
1923    If XSIZE or YSIZE is zero, we do not know the amount of memory being
1924    referenced (the reference was BLKmode), so make the most pessimistic
1925    assumptions.
1926 
1927    If XSIZE or YSIZE is negative, we may access memory outside the object
1928    being referenced as a side effect.  This can happen when using AND to
1929    align memory references, as is done on the Alpha.
1930 
1931    Nice to notice that varying addresses cannot conflict with fp if no
1932    local variables had their addresses taken, but that's too hard now.
1933 
1934    ???  Contrary to the tree alias oracle this does not return
1935    one for X + non-constant and Y + non-constant when X and Y are equal.
1936    If that is fixed the TBAA hack for union type-punning can be removed.  */
1937 
1938 static int
1939 memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c)
1940 {
1941   if (GET_CODE (x) == VALUE)
1942     {
1943       if (REG_P (y))
1944 	{
1945 	  struct elt_loc_list *l = NULL;
1946 	  if (CSELIB_VAL_PTR (x))
1947 	    for (l = canonical_cselib_val (CSELIB_VAL_PTR (x))->locs;
1948 		 l; l = l->next)
1949 	      if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, y))
1950 		break;
1951 	  if (l)
1952 	    x = y;
1953 	  else
1954 	    x = get_addr (x);
1955 	}
1956       /* Don't call get_addr if y is the same VALUE.  */
1957       else if (x != y)
1958 	x = get_addr (x);
1959     }
1960   if (GET_CODE (y) == VALUE)
1961     {
1962       if (REG_P (x))
1963 	{
1964 	  struct elt_loc_list *l = NULL;
1965 	  if (CSELIB_VAL_PTR (y))
1966 	    for (l = canonical_cselib_val (CSELIB_VAL_PTR (y))->locs;
1967 		 l; l = l->next)
1968 	      if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, x))
1969 		break;
1970 	  if (l)
1971 	    y = x;
1972 	  else
1973 	    y = get_addr (y);
1974 	}
1975       /* Don't call get_addr if x is the same VALUE.  */
1976       else if (y != x)
1977 	y = get_addr (y);
1978     }
1979   if (GET_CODE (x) == HIGH)
1980     x = XEXP (x, 0);
1981   else if (GET_CODE (x) == LO_SUM)
1982     x = XEXP (x, 1);
1983   else
1984     x = addr_side_effect_eval (x, abs (xsize), 0);
1985   if (GET_CODE (y) == HIGH)
1986     y = XEXP (y, 0);
1987   else if (GET_CODE (y) == LO_SUM)
1988     y = XEXP (y, 1);
1989   else
1990     y = addr_side_effect_eval (y, abs (ysize), 0);
1991 
1992   if (rtx_equal_for_memref_p (x, y))
1993     {
1994       return offset_overlap_p (c, xsize, ysize);
1995     }
1996 
1997   /* This code used to check for conflicts involving stack references and
1998      globals but the base address alias code now handles these cases.  */
1999 
2000   if (GET_CODE (x) == PLUS)
2001     {
2002       /* The fact that X is canonicalized means that this
2003 	 PLUS rtx is canonicalized.  */
2004       rtx x0 = XEXP (x, 0);
2005       rtx x1 = XEXP (x, 1);
2006 
2007       if (GET_CODE (y) == PLUS)
2008 	{
2009 	  /* The fact that Y is canonicalized means that this
2010 	     PLUS rtx is canonicalized.  */
2011 	  rtx y0 = XEXP (y, 0);
2012 	  rtx y1 = XEXP (y, 1);
2013 
2014 	  if (rtx_equal_for_memref_p (x1, y1))
2015 	    return memrefs_conflict_p (xsize, x0, ysize, y0, c);
2016 	  if (rtx_equal_for_memref_p (x0, y0))
2017 	    return memrefs_conflict_p (xsize, x1, ysize, y1, c);
2018 	  if (CONST_INT_P (x1))
2019 	    {
2020 	      if (CONST_INT_P (y1))
2021 		return memrefs_conflict_p (xsize, x0, ysize, y0,
2022 					   c - INTVAL (x1) + INTVAL (y1));
2023 	      else
2024 		return memrefs_conflict_p (xsize, x0, ysize, y,
2025 					   c - INTVAL (x1));
2026 	    }
2027 	  else if (CONST_INT_P (y1))
2028 	    return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
2029 
2030 	  return -1;
2031 	}
2032       else if (CONST_INT_P (x1))
2033 	return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
2034     }
2035   else if (GET_CODE (y) == PLUS)
2036     {
2037       /* The fact that Y is canonicalized means that this
2038 	 PLUS rtx is canonicalized.  */
2039       rtx y0 = XEXP (y, 0);
2040       rtx y1 = XEXP (y, 1);
2041 
2042       if (CONST_INT_P (y1))
2043 	return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
2044       else
2045 	return -1;
2046     }
2047 
2048   if (GET_CODE (x) == GET_CODE (y))
2049     switch (GET_CODE (x))
2050       {
2051       case MULT:
2052 	{
2053 	  /* Handle cases where we expect the second operands to be the
2054 	     same, and check only whether the first operand would conflict
2055 	     or not.  */
2056 	  rtx x0, y0;
2057 	  rtx x1 = canon_rtx (XEXP (x, 1));
2058 	  rtx y1 = canon_rtx (XEXP (y, 1));
2059 	  if (! rtx_equal_for_memref_p (x1, y1))
2060 	    return -1;
2061 	  x0 = canon_rtx (XEXP (x, 0));
2062 	  y0 = canon_rtx (XEXP (y, 0));
2063 	  if (rtx_equal_for_memref_p (x0, y0))
2064 	    return offset_overlap_p (c, xsize, ysize);
2065 
2066 	  /* Can't properly adjust our sizes.  */
2067 	  if (!CONST_INT_P (x1))
2068 	    return -1;
2069 	  xsize /= INTVAL (x1);
2070 	  ysize /= INTVAL (x1);
2071 	  c /= INTVAL (x1);
2072 	  return memrefs_conflict_p (xsize, x0, ysize, y0, c);
2073 	}
2074 
2075       default:
2076 	break;
2077       }
2078 
2079   /* Deal with alignment ANDs by adjusting offset and size so as to
2080      cover the maximum range, without taking any previously known
2081      alignment into account.  Make a size negative after such an
2082      adjustments, so that, if we end up with e.g. two SYMBOL_REFs, we
2083      assume a potential overlap, because they may end up in contiguous
2084      memory locations and the stricter-alignment access may span over
2085      part of both.  */
2086   if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1)))
2087     {
2088       HOST_WIDE_INT sc = INTVAL (XEXP (x, 1));
2089       unsigned HOST_WIDE_INT uc = sc;
2090       if (sc < 0 && -uc == (uc & -uc))
2091 	{
2092 	  if (xsize > 0)
2093 	    xsize = -xsize;
2094 	  if (xsize)
2095 	    xsize += sc + 1;
2096 	  c -= sc + 1;
2097 	  return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2098 				     ysize, y, c);
2099 	}
2100     }
2101   if (GET_CODE (y) == AND && CONST_INT_P (XEXP (y, 1)))
2102     {
2103       HOST_WIDE_INT sc = INTVAL (XEXP (y, 1));
2104       unsigned HOST_WIDE_INT uc = sc;
2105       if (sc < 0 && -uc == (uc & -uc))
2106 	{
2107 	  if (ysize > 0)
2108 	    ysize = -ysize;
2109 	  if (ysize)
2110 	    ysize += sc + 1;
2111 	  c += sc + 1;
2112 	  return memrefs_conflict_p (xsize, x,
2113 				     ysize, canon_rtx (XEXP (y, 0)), c);
2114 	}
2115     }
2116 
2117   if (CONSTANT_P (x))
2118     {
2119       if (CONST_INT_P (x) && CONST_INT_P (y))
2120 	{
2121 	  c += (INTVAL (y) - INTVAL (x));
2122 	  return offset_overlap_p (c, xsize, ysize);
2123 	}
2124 
2125       if (GET_CODE (x) == CONST)
2126 	{
2127 	  if (GET_CODE (y) == CONST)
2128 	    return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2129 				       ysize, canon_rtx (XEXP (y, 0)), c);
2130 	  else
2131 	    return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2132 				       ysize, y, c);
2133 	}
2134       if (GET_CODE (y) == CONST)
2135 	return memrefs_conflict_p (xsize, x, ysize,
2136 				   canon_rtx (XEXP (y, 0)), c);
2137 
2138       /* Assume a potential overlap for symbolic addresses that went
2139 	 through alignment adjustments (i.e., that have negative
2140 	 sizes), because we can't know how far they are from each
2141 	 other.  */
2142       if (CONSTANT_P (y))
2143 	return (xsize < 0 || ysize < 0 || offset_overlap_p (c, xsize, ysize));
2144 
2145       return -1;
2146     }
2147 
2148   return -1;
2149 }
2150 
2151 /* Functions to compute memory dependencies.
2152 
2153    Since we process the insns in execution order, we can build tables
2154    to keep track of what registers are fixed (and not aliased), what registers
2155    are varying in known ways, and what registers are varying in unknown
2156    ways.
2157 
2158    If both memory references are volatile, then there must always be a
2159    dependence between the two references, since their order can not be
2160    changed.  A volatile and non-volatile reference can be interchanged
2161    though.
2162 
2163    We also must allow AND addresses, because they may generate accesses
2164    outside the object being referenced.  This is used to generate aligned
2165    addresses from unaligned addresses, for instance, the alpha
2166    storeqi_unaligned pattern.  */
2167 
2168 /* Read dependence: X is read after read in MEM takes place.  There can
2169    only be a dependence here if both reads are volatile, or if either is
2170    an explicit barrier.  */
2171 
2172 int
2173 read_dependence (const_rtx mem, const_rtx x)
2174 {
2175   if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2176     return true;
2177   if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2178       || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2179     return true;
2180   return false;
2181 }
2182 
2183 /* Return true if we can determine that the fields referenced cannot
2184    overlap for any pair of objects.  */
2185 
2186 static bool
2187 nonoverlapping_component_refs_p (const_rtx rtlx, const_rtx rtly)
2188 {
2189   const_tree x = MEM_EXPR (rtlx), y = MEM_EXPR (rtly);
2190   const_tree fieldx, fieldy, typex, typey, orig_y;
2191 
2192   if (!flag_strict_aliasing
2193       || !x || !y
2194       || TREE_CODE (x) != COMPONENT_REF
2195       || TREE_CODE (y) != COMPONENT_REF)
2196     return false;
2197 
2198   do
2199     {
2200       /* The comparison has to be done at a common type, since we don't
2201 	 know how the inheritance hierarchy works.  */
2202       orig_y = y;
2203       do
2204 	{
2205 	  fieldx = TREE_OPERAND (x, 1);
2206 	  typex = TYPE_MAIN_VARIANT (DECL_FIELD_CONTEXT (fieldx));
2207 
2208 	  y = orig_y;
2209 	  do
2210 	    {
2211 	      fieldy = TREE_OPERAND (y, 1);
2212 	      typey = TYPE_MAIN_VARIANT (DECL_FIELD_CONTEXT (fieldy));
2213 
2214 	      if (typex == typey)
2215 		goto found;
2216 
2217 	      y = TREE_OPERAND (y, 0);
2218 	    }
2219 	  while (y && TREE_CODE (y) == COMPONENT_REF);
2220 
2221 	  x = TREE_OPERAND (x, 0);
2222 	}
2223       while (x && TREE_CODE (x) == COMPONENT_REF);
2224       /* Never found a common type.  */
2225       return false;
2226 
2227     found:
2228       /* If we're left with accessing different fields of a structure, then no
2229 	 possible overlap, unless they are both bitfields.  */
2230       if (TREE_CODE (typex) == RECORD_TYPE && fieldx != fieldy)
2231 	return !(DECL_BIT_FIELD (fieldx) && DECL_BIT_FIELD (fieldy));
2232 
2233       /* The comparison on the current field failed.  If we're accessing
2234 	 a very nested structure, look at the next outer level.  */
2235       x = TREE_OPERAND (x, 0);
2236       y = TREE_OPERAND (y, 0);
2237     }
2238   while (x && y
2239 	 && TREE_CODE (x) == COMPONENT_REF
2240 	 && TREE_CODE (y) == COMPONENT_REF);
2241 
2242   return false;
2243 }
2244 
2245 /* Look at the bottom of the COMPONENT_REF list for a DECL, and return it.  */
2246 
2247 static tree
2248 decl_for_component_ref (tree x)
2249 {
2250   do
2251     {
2252       x = TREE_OPERAND (x, 0);
2253     }
2254   while (x && TREE_CODE (x) == COMPONENT_REF);
2255 
2256   return x && DECL_P (x) ? x : NULL_TREE;
2257 }
2258 
2259 /* Walk up the COMPONENT_REF list in X and adjust *OFFSET to compensate
2260    for the offset of the field reference.  *KNOWN_P says whether the
2261    offset is known.  */
2262 
2263 static void
2264 adjust_offset_for_component_ref (tree x, bool *known_p,
2265 				 HOST_WIDE_INT *offset)
2266 {
2267   if (!*known_p)
2268     return;
2269   do
2270     {
2271       tree xoffset = component_ref_field_offset (x);
2272       tree field = TREE_OPERAND (x, 1);
2273 
2274       if (! host_integerp (xoffset, 1))
2275 	{
2276 	  *known_p = false;
2277 	  return;
2278 	}
2279       *offset += (tree_low_cst (xoffset, 1)
2280 		  + (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
2281 		     / BITS_PER_UNIT));
2282 
2283       x = TREE_OPERAND (x, 0);
2284     }
2285   while (x && TREE_CODE (x) == COMPONENT_REF);
2286 }
2287 
2288 /* Return nonzero if we can determine the exprs corresponding to memrefs
2289    X and Y and they do not overlap.
2290    If LOOP_VARIANT is set, skip offset-based disambiguation */
2291 
2292 int
2293 nonoverlapping_memrefs_p (const_rtx x, const_rtx y, bool loop_invariant)
2294 {
2295   tree exprx = MEM_EXPR (x), expry = MEM_EXPR (y);
2296   rtx rtlx, rtly;
2297   rtx basex, basey;
2298   bool moffsetx_known_p, moffsety_known_p;
2299   HOST_WIDE_INT moffsetx = 0, moffsety = 0;
2300   HOST_WIDE_INT offsetx = 0, offsety = 0, sizex, sizey, tem;
2301 
2302   /* Unless both have exprs, we can't tell anything.  */
2303   if (exprx == 0 || expry == 0)
2304     return 0;
2305 
2306   /* For spill-slot accesses make sure we have valid offsets.  */
2307   if ((exprx == get_spill_slot_decl (false)
2308        && ! MEM_OFFSET_KNOWN_P (x))
2309       || (expry == get_spill_slot_decl (false)
2310 	  && ! MEM_OFFSET_KNOWN_P (y)))
2311     return 0;
2312 
2313   /* If the field reference test failed, look at the DECLs involved.  */
2314   moffsetx_known_p = MEM_OFFSET_KNOWN_P (x);
2315   if (moffsetx_known_p)
2316     moffsetx = MEM_OFFSET (x);
2317   if (TREE_CODE (exprx) == COMPONENT_REF)
2318     {
2319       tree t = decl_for_component_ref (exprx);
2320       if (! t)
2321 	return 0;
2322       adjust_offset_for_component_ref (exprx, &moffsetx_known_p, &moffsetx);
2323       exprx = t;
2324     }
2325 
2326   moffsety_known_p = MEM_OFFSET_KNOWN_P (y);
2327   if (moffsety_known_p)
2328     moffsety = MEM_OFFSET (y);
2329   if (TREE_CODE (expry) == COMPONENT_REF)
2330     {
2331       tree t = decl_for_component_ref (expry);
2332       if (! t)
2333 	return 0;
2334       adjust_offset_for_component_ref (expry, &moffsety_known_p, &moffsety);
2335       expry = t;
2336     }
2337 
2338   if (! DECL_P (exprx) || ! DECL_P (expry))
2339     return 0;
2340 
2341   /* With invalid code we can end up storing into the constant pool.
2342      Bail out to avoid ICEing when creating RTL for this.
2343      See gfortran.dg/lto/20091028-2_0.f90.  */
2344   if (TREE_CODE (exprx) == CONST_DECL
2345       || TREE_CODE (expry) == CONST_DECL)
2346     return 1;
2347 
2348   rtlx = DECL_RTL (exprx);
2349   rtly = DECL_RTL (expry);
2350 
2351   /* If either RTL is not a MEM, it must be a REG or CONCAT, meaning they
2352      can't overlap unless they are the same because we never reuse that part
2353      of the stack frame used for locals for spilled pseudos.  */
2354   if ((!MEM_P (rtlx) || !MEM_P (rtly))
2355       && ! rtx_equal_p (rtlx, rtly))
2356     return 1;
2357 
2358   /* If we have MEMs referring to different address spaces (which can
2359      potentially overlap), we cannot easily tell from the addresses
2360      whether the references overlap.  */
2361   if (MEM_P (rtlx) && MEM_P (rtly)
2362       && MEM_ADDR_SPACE (rtlx) != MEM_ADDR_SPACE (rtly))
2363     return 0;
2364 
2365   /* Get the base and offsets of both decls.  If either is a register, we
2366      know both are and are the same, so use that as the base.  The only
2367      we can avoid overlap is if we can deduce that they are nonoverlapping
2368      pieces of that decl, which is very rare.  */
2369   basex = MEM_P (rtlx) ? XEXP (rtlx, 0) : rtlx;
2370   if (GET_CODE (basex) == PLUS && CONST_INT_P (XEXP (basex, 1)))
2371     offsetx = INTVAL (XEXP (basex, 1)), basex = XEXP (basex, 0);
2372 
2373   basey = MEM_P (rtly) ? XEXP (rtly, 0) : rtly;
2374   if (GET_CODE (basey) == PLUS && CONST_INT_P (XEXP (basey, 1)))
2375     offsety = INTVAL (XEXP (basey, 1)), basey = XEXP (basey, 0);
2376 
2377   /* If the bases are different, we know they do not overlap if both
2378      are constants or if one is a constant and the other a pointer into the
2379      stack frame.  Otherwise a different base means we can't tell if they
2380      overlap or not.  */
2381   if (! rtx_equal_p (basex, basey))
2382     return ((CONSTANT_P (basex) && CONSTANT_P (basey))
2383 	    || (CONSTANT_P (basex) && REG_P (basey)
2384 		&& REGNO_PTR_FRAME_P (REGNO (basey)))
2385 	    || (CONSTANT_P (basey) && REG_P (basex)
2386 		&& REGNO_PTR_FRAME_P (REGNO (basex))));
2387 
2388   /* Offset based disambiguation not appropriate for loop invariant */
2389   if (loop_invariant)
2390     return 0;
2391 
2392   sizex = (!MEM_P (rtlx) ? (int) GET_MODE_SIZE (GET_MODE (rtlx))
2393 	   : MEM_SIZE_KNOWN_P (rtlx) ? MEM_SIZE (rtlx)
2394 	   : -1);
2395   sizey = (!MEM_P (rtly) ? (int) GET_MODE_SIZE (GET_MODE (rtly))
2396 	   : MEM_SIZE_KNOWN_P (rtly) ? MEM_SIZE (rtly)
2397 	   : -1);
2398 
2399   /* If we have an offset for either memref, it can update the values computed
2400      above.  */
2401   if (moffsetx_known_p)
2402     offsetx += moffsetx, sizex -= moffsetx;
2403   if (moffsety_known_p)
2404     offsety += moffsety, sizey -= moffsety;
2405 
2406   /* If a memref has both a size and an offset, we can use the smaller size.
2407      We can't do this if the offset isn't known because we must view this
2408      memref as being anywhere inside the DECL's MEM.  */
2409   if (MEM_SIZE_KNOWN_P (x) && moffsetx_known_p)
2410     sizex = MEM_SIZE (x);
2411   if (MEM_SIZE_KNOWN_P (y) && moffsety_known_p)
2412     sizey = MEM_SIZE (y);
2413 
2414   /* Put the values of the memref with the lower offset in X's values.  */
2415   if (offsetx > offsety)
2416     {
2417       tem = offsetx, offsetx = offsety, offsety = tem;
2418       tem = sizex, sizex = sizey, sizey = tem;
2419     }
2420 
2421   /* If we don't know the size of the lower-offset value, we can't tell
2422      if they conflict.  Otherwise, we do the test.  */
2423   return sizex >= 0 && offsety >= offsetx + sizex;
2424 }
2425 
2426 /* Helper for true_dependence and canon_true_dependence.
2427    Checks for true dependence: X is read after store in MEM takes place.
2428 
2429    If MEM_CANONICALIZED is FALSE, then X_ADDR and MEM_ADDR should be
2430    NULL_RTX, and the canonical addresses of MEM and X are both computed
2431    here.  If MEM_CANONICALIZED, then MEM must be already canonicalized.
2432 
2433    If X_ADDR is non-NULL, it is used in preference of XEXP (x, 0).
2434 
2435    Returns 1 if there is a true dependence, 0 otherwise.  */
2436 
2437 static int
2438 true_dependence_1 (const_rtx mem, enum machine_mode mem_mode, rtx mem_addr,
2439 		   const_rtx x, rtx x_addr, bool mem_canonicalized)
2440 {
2441   rtx true_mem_addr;
2442   rtx base;
2443   int ret;
2444 
2445   gcc_checking_assert (mem_canonicalized ? (mem_addr != NULL_RTX)
2446 		       : (mem_addr == NULL_RTX && x_addr == NULL_RTX));
2447 
2448   if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2449     return 1;
2450 
2451   /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2452      This is used in epilogue deallocation functions, and in cselib.  */
2453   if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2454     return 1;
2455   if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2456     return 1;
2457   if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2458       || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2459     return 1;
2460 
2461   if (! x_addr)
2462     x_addr = XEXP (x, 0);
2463   x_addr = get_addr (x_addr);
2464 
2465   if (! mem_addr)
2466     {
2467       mem_addr = XEXP (mem, 0);
2468       if (mem_mode == VOIDmode)
2469 	mem_mode = GET_MODE (mem);
2470     }
2471   true_mem_addr = get_addr (mem_addr);
2472 
2473   /* Read-only memory is by definition never modified, and therefore can't
2474      conflict with anything.  However, don't assume anything when AND
2475      addresses are involved and leave to the code below to determine
2476      dependence.  We don't expect to find read-only set on MEM, but
2477      stupid user tricks can produce them, so don't die.  */
2478   if (MEM_READONLY_P (x)
2479       && GET_CODE (x_addr) != AND
2480       && GET_CODE (true_mem_addr) != AND)
2481     return 0;
2482 
2483   /* If we have MEMs referring to different address spaces (which can
2484      potentially overlap), we cannot easily tell from the addresses
2485      whether the references overlap.  */
2486   if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2487     return 1;
2488 
2489   base = find_base_term (x_addr);
2490   if (base && (GET_CODE (base) == LABEL_REF
2491 	       || (GET_CODE (base) == SYMBOL_REF
2492 		   && CONSTANT_POOL_ADDRESS_P (base))))
2493     return 0;
2494 
2495   rtx mem_base = find_base_term (true_mem_addr);
2496   if (! base_alias_check (x_addr, base, true_mem_addr, mem_base,
2497 			  GET_MODE (x), mem_mode))
2498     return 0;
2499 
2500   x_addr = canon_rtx (x_addr);
2501   if (!mem_canonicalized)
2502     mem_addr = canon_rtx (true_mem_addr);
2503 
2504   if ((ret = memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
2505 				 SIZE_FOR_MODE (x), x_addr, 0)) != -1)
2506     return ret;
2507 
2508   if (mems_in_disjoint_alias_sets_p (x, mem))
2509     return 0;
2510 
2511   if (nonoverlapping_memrefs_p (mem, x, false))
2512     return 0;
2513 
2514   if (nonoverlapping_component_refs_p (mem, x))
2515     return 0;
2516 
2517   return rtx_refs_may_alias_p (x, mem, true);
2518 }
2519 
2520 /* True dependence: X is read after store in MEM takes place.  */
2521 
2522 int
2523 true_dependence (const_rtx mem, enum machine_mode mem_mode, const_rtx x)
2524 {
2525   return true_dependence_1 (mem, mem_mode, NULL_RTX,
2526 			    x, NULL_RTX, /*mem_canonicalized=*/false);
2527 }
2528 
2529 /* Canonical true dependence: X is read after store in MEM takes place.
2530    Variant of true_dependence which assumes MEM has already been
2531    canonicalized (hence we no longer do that here).
2532    The mem_addr argument has been added, since true_dependence_1 computed
2533    this value prior to canonicalizing.  */
2534 
2535 int
2536 canon_true_dependence (const_rtx mem, enum machine_mode mem_mode, rtx mem_addr,
2537 		       const_rtx x, rtx x_addr)
2538 {
2539   return true_dependence_1 (mem, mem_mode, mem_addr,
2540 			    x, x_addr, /*mem_canonicalized=*/true);
2541 }
2542 
2543 /* Returns nonzero if a write to X might alias a previous read from
2544    (or, if WRITEP is true, a write to) MEM.
2545    If X_CANONCALIZED is true, then X_ADDR is the canonicalized address of X,
2546    and X_MODE the mode for that access.
2547    If MEM_CANONICALIZED is true, MEM is canonicalized.  */
2548 
2549 static int
2550 write_dependence_p (const_rtx mem,
2551 		    	           const_rtx x, enum machine_mode x_mode, rtx x_addr,
2552 				   	         bool mem_canonicalized, bool x_canonicalized, bool writep)
2553 {
2554   rtx mem_addr;
2555   rtx true_mem_addr, true_x_addr;
2556   rtx base;
2557   int ret;
2558 
2559   gcc_checking_assert (x_canonicalized
2560   		              ? (x_addr != NULL_RTX && x_mode != VOIDmode)
2561 			      	       : (x_addr == NULL_RTX && x_mode == VOIDmode));
2562 
2563   if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2564     return 1;
2565 
2566   /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2567      This is used in epilogue deallocation functions.  */
2568   if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2569     return 1;
2570   if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2571     return 1;
2572   if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2573       || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2574     return 1;
2575 
2576   if (!x_addr)
2577     x_addr = XEXP (x, 0);
2578   true_x_addr = get_addr (x_addr);
2579 
2580   mem_addr = XEXP (mem, 0);
2581   true_mem_addr = get_addr (mem_addr);
2582 
2583   /* A read from read-only memory can't conflict with read-write memory.
2584      Don't assume anything when AND addresses are involved and leave to
2585      the code below to determine dependence.  */
2586   if (!writep
2587       && MEM_READONLY_P (mem)
2588       && GET_CODE (true_x_addr) != AND
2589       && GET_CODE (true_mem_addr) != AND)
2590     return 0;
2591 
2592   /* If we have MEMs referring to different address spaces (which can
2593      potentially overlap), we cannot easily tell from the addresses
2594      whether the references overlap.  */
2595   if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2596     return 1;
2597 
2598   base = find_base_term (true_mem_addr);
2599   if (! writep
2600       && base
2601       && (GET_CODE (base) == LABEL_REF
2602 	  || (GET_CODE (base) == SYMBOL_REF
2603 	      && CONSTANT_POOL_ADDRESS_P (base))))
2604     return 0;
2605 
2606   rtx x_base = find_base_term (true_x_addr);
2607   if (! base_alias_check (true_x_addr, x_base, true_mem_addr, base,
2608 			  GET_MODE (x), GET_MODE (mem)))
2609     return 0;
2610 
2611   if (!x_canonicalized)
2612     {
2613       x_addr = canon_rtx (true_x_addr);
2614       x_mode = GET_MODE (x);
2615     }
2616   if (!mem_canonicalized)
2617     mem_addr = canon_rtx (true_mem_addr);
2618 
2619   if ((ret = memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr,
2620 				 GET_MODE_SIZE (x_mode), x_addr, 0)) != -1)
2621     return ret;
2622 
2623   if (nonoverlapping_memrefs_p (x, mem, false))
2624     return 0;
2625 
2626   return rtx_refs_may_alias_p (x, mem, false);
2627 }
2628 
2629 /* Anti dependence: X is written after read in MEM takes place.  */
2630 
2631 int
2632 anti_dependence (const_rtx mem, const_rtx x)
2633 {
2634   return write_dependence_p (mem, x, VOIDmode, NULL_RTX,
2635   	  		          /*mem_canonicalized=*/false,
2636 						     /*x_canonicalized*/false, /*writep=*/false);
2637 }
2638 
2639 /* Likewise, but we already have a canonicalized MEM, and X_ADDR for X.
2640    Also, consider X in X_MODE (which might be from an enclosing
2641    STRICT_LOW_PART / ZERO_EXTRACT).
2642    If MEM_CANONICALIZED is true, MEM is canonicalized.  */
2643 
2644 int
2645 canon_anti_dependence (const_rtx mem, bool mem_canonicalized,
2646 		       		         const_rtx x, enum machine_mode x_mode, rtx x_addr)
2647 {
2648   return write_dependence_p (mem, x, x_mode, x_addr,
2649   	  		          mem_canonicalized, /*x_canonicalized=*/true,
2650 							     /*writep=*/false);
2651 }
2652 
2653 /* Output dependence: X is written after store in MEM takes place.  */
2654 
2655 int
2656 output_dependence (const_rtx mem, const_rtx x)
2657 {
2658   return write_dependence_p (mem, x, VOIDmode, NULL_RTX,
2659   	  		          /*mem_canonicalized=*/false,
2660 						     /*x_canonicalized*/false, /*writep=*/true);
2661 }
2662 
2663 
2664 
2665 /* Check whether X may be aliased with MEM.  Don't do offset-based
2666   memory disambiguation & TBAA.  */
2667 int
2668 may_alias_p (const_rtx mem, const_rtx x)
2669 {
2670   rtx x_addr, mem_addr;
2671 
2672   if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2673     return 1;
2674 
2675   /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2676      This is used in epilogue deallocation functions.  */
2677   if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2678     return 1;
2679   if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2680     return 1;
2681   if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2682       || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2683     return 1;
2684 
2685   x_addr = XEXP (x, 0);
2686   x_addr = get_addr (x_addr);
2687 
2688   mem_addr = XEXP (mem, 0);
2689   mem_addr = get_addr (mem_addr);
2690 
2691   /* Read-only memory is by definition never modified, and therefore can't
2692      conflict with anything.  However, don't assume anything when AND
2693      addresses are involved and leave to the code below to determine
2694      dependence.  We don't expect to find read-only set on MEM, but
2695      stupid user tricks can produce them, so don't die.  */
2696   if (MEM_READONLY_P (x)
2697       && GET_CODE (x_addr) != AND
2698       && GET_CODE (mem_addr) != AND)
2699     return 0;
2700 
2701   /* If we have MEMs referring to different address spaces (which can
2702      potentially overlap), we cannot easily tell from the addresses
2703      whether the references overlap.  */
2704   if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2705     return 1;
2706 
2707   rtx x_base = find_base_term (x_addr);
2708   rtx mem_base = find_base_term (mem_addr);
2709   if (! base_alias_check (x_addr, x_base, mem_addr, mem_base,
2710 			  GET_MODE (x), GET_MODE (mem_addr)))
2711     return 0;
2712 
2713   if (nonoverlapping_memrefs_p (mem, x, true))
2714     return 0;
2715 
2716   /* TBAA not valid for loop_invarint */
2717   return rtx_refs_may_alias_p (x, mem, false);
2718 }
2719 
2720 void
2721 init_alias_target (void)
2722 {
2723   int i;
2724 
2725   if (!arg_base_value)
2726     arg_base_value = gen_rtx_ADDRESS (VOIDmode, 0);
2727 
2728   memset (static_reg_base_value, 0, sizeof static_reg_base_value);
2729 
2730   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2731     /* Check whether this register can hold an incoming pointer
2732        argument.  FUNCTION_ARG_REGNO_P tests outgoing register
2733        numbers, so translate if necessary due to register windows.  */
2734     if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (i))
2735 	&& HARD_REGNO_MODE_OK (i, Pmode))
2736       static_reg_base_value[i] = arg_base_value;
2737 
2738   static_reg_base_value[STACK_POINTER_REGNUM]
2739     = unique_base_value (UNIQUE_BASE_VALUE_SP);
2740   static_reg_base_value[ARG_POINTER_REGNUM]
2741     = unique_base_value (UNIQUE_BASE_VALUE_ARGP);
2742   static_reg_base_value[FRAME_POINTER_REGNUM]
2743     = unique_base_value (UNIQUE_BASE_VALUE_FP);
2744 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
2745   static_reg_base_value[HARD_FRAME_POINTER_REGNUM]
2746     = unique_base_value (UNIQUE_BASE_VALUE_HFP);
2747 #endif
2748 }
2749 
2750 /* Set MEMORY_MODIFIED when X modifies DATA (that is assumed
2751    to be memory reference.  */
2752 static bool memory_modified;
2753 static void
2754 memory_modified_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
2755 {
2756   if (MEM_P (x))
2757     {
2758       if (anti_dependence (x, (const_rtx)data) || output_dependence (x, (const_rtx)data))
2759 	memory_modified = true;
2760     }
2761 }
2762 
2763 
2764 /* Return true when INSN possibly modify memory contents of MEM
2765    (i.e. address can be modified).  */
2766 bool
2767 memory_modified_in_insn_p (const_rtx mem, const_rtx insn)
2768 {
2769   if (!INSN_P (insn))
2770     return false;
2771   memory_modified = false;
2772   note_stores (PATTERN (insn), memory_modified_1, CONST_CAST_RTX(mem));
2773   return memory_modified;
2774 }
2775 
2776 /* Return TRUE if the destination of a set is rtx identical to
2777    ITEM.  */
2778 static inline bool
2779 set_dest_equal_p (const_rtx set, const_rtx item)
2780 {
2781   rtx dest = SET_DEST (set);
2782   return rtx_equal_p (dest, item);
2783 }
2784 
2785 /* Like memory_modified_in_insn_p, but return TRUE if INSN will
2786    *DEFINITELY* modify the memory contents of MEM.  */
2787 bool
2788 memory_must_be_modified_in_insn_p (const_rtx mem, const_rtx insn)
2789 {
2790   if (!INSN_P (insn))
2791     return false;
2792   insn = PATTERN (insn);
2793   if (GET_CODE (insn) == SET)
2794     return set_dest_equal_p (insn, mem);
2795   else if (GET_CODE (insn) == PARALLEL)
2796     {
2797       int i;
2798       for (i = 0; i < XVECLEN (insn, 0); i++)
2799 	{
2800 	  rtx sub = XVECEXP (insn, 0, i);
2801 	  if (GET_CODE (sub) == SET
2802 	      &&  set_dest_equal_p (sub, mem))
2803 	    return true;
2804 	}
2805     }
2806   return false;
2807 }
2808 
2809 /* Initialize the aliasing machinery.  Initialize the REG_KNOWN_VALUE
2810    array.  */
2811 
2812 void
2813 init_alias_analysis (void)
2814 {
2815   unsigned int maxreg = max_reg_num ();
2816   int changed, pass;
2817   int i;
2818   unsigned int ui;
2819   rtx insn, val;
2820   int rpo_cnt;
2821   int *rpo;
2822 
2823   timevar_push (TV_ALIAS_ANALYSIS);
2824 
2825   vec_safe_grow_cleared (reg_known_value, maxreg - FIRST_PSEUDO_REGISTER);
2826   reg_known_equiv_p = sbitmap_alloc (maxreg - FIRST_PSEUDO_REGISTER);
2827   bitmap_clear (reg_known_equiv_p);
2828 
2829   /* If we have memory allocated from the previous run, use it.  */
2830   if (old_reg_base_value)
2831     reg_base_value = old_reg_base_value;
2832 
2833   if (reg_base_value)
2834     reg_base_value->truncate (0);
2835 
2836   vec_safe_grow_cleared (reg_base_value, maxreg);
2837 
2838   new_reg_base_value = XNEWVEC (rtx, maxreg);
2839   reg_seen = sbitmap_alloc (maxreg);
2840 
2841   /* The basic idea is that each pass through this loop will use the
2842      "constant" information from the previous pass to propagate alias
2843      information through another level of assignments.
2844 
2845      The propagation is done on the CFG in reverse post-order, to propagate
2846      things forward as far as possible in each iteration.
2847 
2848      This could get expensive if the assignment chains are long.  Maybe
2849      we should throttle the number of iterations, possibly based on
2850      the optimization level or flag_expensive_optimizations.
2851 
2852      We could propagate more information in the first pass by making use
2853      of DF_REG_DEF_COUNT to determine immediately that the alias information
2854      for a pseudo is "constant".
2855 
2856      A program with an uninitialized variable can cause an infinite loop
2857      here.  Instead of doing a full dataflow analysis to detect such problems
2858      we just cap the number of iterations for the loop.
2859 
2860      The state of the arrays for the set chain in question does not matter
2861      since the program has undefined behavior.  */
2862 
2863   rpo = XNEWVEC (int, n_basic_blocks);
2864   rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
2865 
2866   pass = 0;
2867   do
2868     {
2869       /* Assume nothing will change this iteration of the loop.  */
2870       changed = 0;
2871 
2872       /* We want to assign the same IDs each iteration of this loop, so
2873 	 start counting from one each iteration of the loop.  */
2874       unique_id = 1;
2875 
2876       /* We're at the start of the function each iteration through the
2877 	 loop, so we're copying arguments.  */
2878       copying_arguments = true;
2879 
2880       /* Wipe the potential alias information clean for this pass.  */
2881       memset (new_reg_base_value, 0, maxreg * sizeof (rtx));
2882 
2883       /* Wipe the reg_seen array clean.  */
2884       bitmap_clear (reg_seen);
2885 
2886       /* Initialize the alias information for this pass.  */
2887       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2888 	if (static_reg_base_value[i])
2889 	  {
2890 	    new_reg_base_value[i] = static_reg_base_value[i];
2891 	    bitmap_set_bit (reg_seen, i);
2892 	  }
2893 
2894       /* Walk the insns adding values to the new_reg_base_value array.  */
2895       for (i = 0; i < rpo_cnt; i++)
2896 	{
2897 	  basic_block bb = BASIC_BLOCK (rpo[i]);
2898 	  FOR_BB_INSNS (bb, insn)
2899 	    {
2900 	      if (NONDEBUG_INSN_P (insn))
2901 		{
2902 		  rtx note, set;
2903 
2904 #if defined (HAVE_prologue) || defined (HAVE_epilogue)
2905 		  /* The prologue/epilogue insns are not threaded onto the
2906 		     insn chain until after reload has completed.  Thus,
2907 		     there is no sense wasting time checking if INSN is in
2908 		     the prologue/epilogue until after reload has completed.  */
2909 		  if (reload_completed
2910 		      && prologue_epilogue_contains (insn))
2911 		    continue;
2912 #endif
2913 
2914 		  /* If this insn has a noalias note, process it,  Otherwise,
2915 		     scan for sets.  A simple set will have no side effects
2916 		     which could change the base value of any other register.  */
2917 
2918 		  if (GET_CODE (PATTERN (insn)) == SET
2919 		      && REG_NOTES (insn) != 0
2920 		      && find_reg_note (insn, REG_NOALIAS, NULL_RTX))
2921 		    record_set (SET_DEST (PATTERN (insn)), NULL_RTX, NULL);
2922 		  else
2923 		    note_stores (PATTERN (insn), record_set, NULL);
2924 
2925 		  set = single_set (insn);
2926 
2927 		  if (set != 0
2928 		      && REG_P (SET_DEST (set))
2929 		      && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
2930 		    {
2931 		      unsigned int regno = REGNO (SET_DEST (set));
2932 		      rtx src = SET_SRC (set);
2933 		      rtx t;
2934 
2935 		      note = find_reg_equal_equiv_note (insn);
2936 		      if (note && REG_NOTE_KIND (note) == REG_EQUAL
2937 			  && DF_REG_DEF_COUNT (regno) != 1)
2938 			note = NULL_RTX;
2939 
2940 		      if (note != NULL_RTX
2941 			  && GET_CODE (XEXP (note, 0)) != EXPR_LIST
2942 			  && ! rtx_varies_p (XEXP (note, 0), 1)
2943 			  && ! reg_overlap_mentioned_p (SET_DEST (set),
2944 							XEXP (note, 0)))
2945 			{
2946 			  set_reg_known_value (regno, XEXP (note, 0));
2947 			  set_reg_known_equiv_p (regno,
2948 						 REG_NOTE_KIND (note) == REG_EQUIV);
2949 			}
2950 		      else if (DF_REG_DEF_COUNT (regno) == 1
2951 			       && GET_CODE (src) == PLUS
2952 			       && REG_P (XEXP (src, 0))
2953 			       && (t = get_reg_known_value (REGNO (XEXP (src, 0))))
2954 			       && CONST_INT_P (XEXP (src, 1)))
2955 			{
2956 			  t = plus_constant (GET_MODE (src), t,
2957 					     INTVAL (XEXP (src, 1)));
2958 			  set_reg_known_value (regno, t);
2959 			  set_reg_known_equiv_p (regno, false);
2960 			}
2961 		      else if (DF_REG_DEF_COUNT (regno) == 1
2962 			       && ! rtx_varies_p (src, 1))
2963 			{
2964 			  set_reg_known_value (regno, src);
2965 			  set_reg_known_equiv_p (regno, false);
2966 			}
2967 		    }
2968 		}
2969 	      else if (NOTE_P (insn)
2970 		       && NOTE_KIND (insn) == NOTE_INSN_FUNCTION_BEG)
2971 		copying_arguments = false;
2972 	    }
2973 	}
2974 
2975       /* Now propagate values from new_reg_base_value to reg_base_value.  */
2976       gcc_assert (maxreg == (unsigned int) max_reg_num ());
2977 
2978       for (ui = 0; ui < maxreg; ui++)
2979 	{
2980 	  if (new_reg_base_value[ui]
2981 	      && new_reg_base_value[ui] != (*reg_base_value)[ui]
2982 	      && ! rtx_equal_p (new_reg_base_value[ui], (*reg_base_value)[ui]))
2983 	    {
2984 	      (*reg_base_value)[ui] = new_reg_base_value[ui];
2985 	      changed = 1;
2986 	    }
2987 	}
2988     }
2989   while (changed && ++pass < MAX_ALIAS_LOOP_PASSES);
2990   XDELETEVEC (rpo);
2991 
2992   /* Fill in the remaining entries.  */
2993   FOR_EACH_VEC_ELT (*reg_known_value, i, val)
2994     {
2995       int regno = i + FIRST_PSEUDO_REGISTER;
2996       if (! val)
2997 	set_reg_known_value (regno, regno_reg_rtx[regno]);
2998     }
2999 
3000   /* Clean up.  */
3001   free (new_reg_base_value);
3002   new_reg_base_value = 0;
3003   sbitmap_free (reg_seen);
3004   reg_seen = 0;
3005   timevar_pop (TV_ALIAS_ANALYSIS);
3006 }
3007 
3008 /* Equate REG_BASE_VALUE (reg1) to REG_BASE_VALUE (reg2).
3009    Special API for var-tracking pass purposes.  */
3010 
3011 void
3012 vt_equate_reg_base_value (const_rtx reg1, const_rtx reg2)
3013 {
3014   (*reg_base_value)[REGNO (reg1)] = REG_BASE_VALUE (reg2);
3015 }
3016 
3017 void
3018 end_alias_analysis (void)
3019 {
3020   old_reg_base_value = reg_base_value;
3021   vec_free (reg_known_value);
3022   sbitmap_free (reg_known_equiv_p);
3023 }
3024 
3025 #include "gt-alias.h"
3026