xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/alias.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /* Alias analysis for GNU C
2    Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
3    2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4    Contributed by John Carr (jfc@mit.edu).
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "function.h"
30 #include "alias.h"
31 #include "emit-rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "basic-block.h"
35 #include "flags.h"
36 #include "output.h"
37 #include "toplev.h"
38 #include "cselib.h"
39 #include "splay-tree.h"
40 #include "ggc.h"
41 #include "langhooks.h"
42 #include "timevar.h"
43 #include "target.h"
44 #include "cgraph.h"
45 #include "varray.h"
46 #include "tree-pass.h"
47 #include "ipa-type-escape.h"
48 #include "df.h"
49 #include "tree-ssa-alias.h"
50 #include "pointer-set.h"
51 #include "tree-flow.h"
52 
53 /* The aliasing API provided here solves related but different problems:
54 
55    Say there exists (in c)
56 
57    struct X {
58      struct Y y1;
59      struct Z z2;
60    } x1, *px1,  *px2;
61 
62    struct Y y2, *py;
63    struct Z z2, *pz;
64 
65 
66    py = &px1.y1;
67    px2 = &x1;
68 
69    Consider the four questions:
70 
71    Can a store to x1 interfere with px2->y1?
72    Can a store to x1 interfere with px2->z2?
73    (*px2).z2
74    Can a store to x1 change the value pointed to by with py?
75    Can a store to x1 change the value pointed to by with pz?
76 
77    The answer to these questions can be yes, yes, yes, and maybe.
78 
79    The first two questions can be answered with a simple examination
80    of the type system.  If structure X contains a field of type Y then
81    a store thru a pointer to an X can overwrite any field that is
82    contained (recursively) in an X (unless we know that px1 != px2).
83 
84    The last two of the questions can be solved in the same way as the
85    first two questions but this is too conservative.  The observation
86    is that in some cases analysis we can know if which (if any) fields
87    are addressed and if those addresses are used in bad ways.  This
88    analysis may be language specific.  In C, arbitrary operations may
89    be applied to pointers.  However, there is some indication that
90    this may be too conservative for some C++ types.
91 
92    The pass ipa-type-escape does this analysis for the types whose
93    instances do not escape across the compilation boundary.
94 
95    Historically in GCC, these two problems were combined and a single
96    data structure was used to represent the solution to these
97    problems.  We now have two similar but different data structures,
98    The data structure to solve the last two question is similar to the
99    first, but does not contain have the fields in it whose address are
100    never taken.  For types that do escape the compilation unit, the
101    data structures will have identical information.
102 */
103 
104 /* The alias sets assigned to MEMs assist the back-end in determining
105    which MEMs can alias which other MEMs.  In general, two MEMs in
106    different alias sets cannot alias each other, with one important
107    exception.  Consider something like:
108 
109      struct S { int i; double d; };
110 
111    a store to an `S' can alias something of either type `int' or type
112    `double'.  (However, a store to an `int' cannot alias a `double'
113    and vice versa.)  We indicate this via a tree structure that looks
114    like:
115 	   struct S
116 	    /   \
117 	   /     \
118 	 |/_     _\|
119 	 int    double
120 
121    (The arrows are directed and point downwards.)
122     In this situation we say the alias set for `struct S' is the
123    `superset' and that those for `int' and `double' are `subsets'.
124 
125    To see whether two alias sets can point to the same memory, we must
126    see if either alias set is a subset of the other. We need not trace
127    past immediate descendants, however, since we propagate all
128    grandchildren up one level.
129 
130    Alias set zero is implicitly a superset of all other alias sets.
131    However, this is no actual entry for alias set zero.  It is an
132    error to attempt to explicitly construct a subset of zero.  */
133 
134 struct GTY(()) alias_set_entry_d {
135   /* The alias set number, as stored in MEM_ALIAS_SET.  */
136   alias_set_type alias_set;
137 
138   /* Nonzero if would have a child of zero: this effectively makes this
139      alias set the same as alias set zero.  */
140   int has_zero_child;
141 
142   /* The children of the alias set.  These are not just the immediate
143      children, but, in fact, all descendants.  So, if we have:
144 
145        struct T { struct S s; float f; }
146 
147      continuing our example above, the children here will be all of
148      `int', `double', `float', and `struct S'.  */
149   splay_tree GTY((param1_is (int), param2_is (int))) children;
150 };
151 typedef struct alias_set_entry_d *alias_set_entry;
152 
153 static int rtx_equal_for_memref_p (const_rtx, const_rtx);
154 static int memrefs_conflict_p (int, rtx, int, rtx, HOST_WIDE_INT);
155 static void record_set (rtx, const_rtx, void *);
156 static int base_alias_check (rtx, rtx, enum machine_mode,
157 			     enum machine_mode);
158 static rtx find_base_value (rtx);
159 static int mems_in_disjoint_alias_sets_p (const_rtx, const_rtx);
160 static int insert_subset_children (splay_tree_node, void*);
161 static alias_set_entry get_alias_set_entry (alias_set_type);
162 static const_rtx fixed_scalar_and_varying_struct_p (const_rtx, const_rtx, rtx, rtx,
163 						    bool (*) (const_rtx, bool));
164 static int aliases_everything_p (const_rtx);
165 static bool nonoverlapping_component_refs_p (const_tree, const_tree);
166 static tree decl_for_component_ref (tree);
167 static rtx adjust_offset_for_component_ref (tree, rtx);
168 static int write_dependence_p (const_rtx, const_rtx, int);
169 
170 static void memory_modified_1 (rtx, const_rtx, void *);
171 
172 /* Set up all info needed to perform alias analysis on memory references.  */
173 
174 /* Returns the size in bytes of the mode of X.  */
175 #define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
176 
177 /* Returns nonzero if MEM1 and MEM2 do not alias because they are in
178    different alias sets.  We ignore alias sets in functions making use
179    of variable arguments because the va_arg macros on some systems are
180    not legal ANSI C.  */
181 #define DIFFERENT_ALIAS_SETS_P(MEM1, MEM2)			\
182   mems_in_disjoint_alias_sets_p (MEM1, MEM2)
183 
184 /* Cap the number of passes we make over the insns propagating alias
185    information through set chains.   10 is a completely arbitrary choice.  */
186 #define MAX_ALIAS_LOOP_PASSES 10
187 
188 /* reg_base_value[N] gives an address to which register N is related.
189    If all sets after the first add or subtract to the current value
190    or otherwise modify it so it does not point to a different top level
191    object, reg_base_value[N] is equal to the address part of the source
192    of the first set.
193 
194    A base address can be an ADDRESS, SYMBOL_REF, or LABEL_REF.  ADDRESS
195    expressions represent certain special values: function arguments and
196    the stack, frame, and argument pointers.
197 
198    The contents of an ADDRESS is not normally used, the mode of the
199    ADDRESS determines whether the ADDRESS is a function argument or some
200    other special value.  Pointer equality, not rtx_equal_p, determines whether
201    two ADDRESS expressions refer to the same base address.
202 
203    The only use of the contents of an ADDRESS is for determining if the
204    current function performs nonlocal memory memory references for the
205    purposes of marking the function as a constant function.  */
206 
207 static GTY(()) VEC(rtx,gc) *reg_base_value;
208 static rtx *new_reg_base_value;
209 
210 /* We preserve the copy of old array around to avoid amount of garbage
211    produced.  About 8% of garbage produced were attributed to this
212    array.  */
213 static GTY((deletable)) VEC(rtx,gc) *old_reg_base_value;
214 
215 /* Static hunks of RTL used by the aliasing code; these are initialized
216    once per function to avoid unnecessary RTL allocations.  */
217 static GTY (()) rtx static_reg_base_value[FIRST_PSEUDO_REGISTER];
218 
219 #define REG_BASE_VALUE(X)				\
220   (REGNO (X) < VEC_length (rtx, reg_base_value)		\
221    ? VEC_index (rtx, reg_base_value, REGNO (X)) : 0)
222 
223 /* Vector indexed by N giving the initial (unchanging) value known for
224    pseudo-register N.  This array is initialized in init_alias_analysis,
225    and does not change until end_alias_analysis is called.  */
226 static GTY((length("reg_known_value_size"))) rtx *reg_known_value;
227 
228 /* Indicates number of valid entries in reg_known_value.  */
229 static GTY(()) unsigned int reg_known_value_size;
230 
231 /* Vector recording for each reg_known_value whether it is due to a
232    REG_EQUIV note.  Future passes (viz., reload) may replace the
233    pseudo with the equivalent expression and so we account for the
234    dependences that would be introduced if that happens.
235 
236    The REG_EQUIV notes created in assign_parms may mention the arg
237    pointer, and there are explicit insns in the RTL that modify the
238    arg pointer.  Thus we must ensure that such insns don't get
239    scheduled across each other because that would invalidate the
240    REG_EQUIV notes.  One could argue that the REG_EQUIV notes are
241    wrong, but solving the problem in the scheduler will likely give
242    better code, so we do it here.  */
243 static bool *reg_known_equiv_p;
244 
245 /* True when scanning insns from the start of the rtl to the
246    NOTE_INSN_FUNCTION_BEG note.  */
247 static bool copying_arguments;
248 
249 DEF_VEC_P(alias_set_entry);
250 DEF_VEC_ALLOC_P(alias_set_entry,gc);
251 
252 /* The splay-tree used to store the various alias set entries.  */
253 static GTY (()) VEC(alias_set_entry,gc) *alias_sets;
254 
255 /* Build a decomposed reference object for querying the alias-oracle
256    from the MEM rtx and store it in *REF.
257    Returns false if MEM is not suitable for the alias-oracle.  */
258 
259 static bool
260 ao_ref_from_mem (ao_ref *ref, const_rtx mem)
261 {
262   tree expr = MEM_EXPR (mem);
263   tree base;
264 
265   if (!expr)
266     return false;
267 
268   /* If MEM_OFFSET or MEM_SIZE are NULL punt.  */
269   if (!MEM_OFFSET (mem)
270       || !MEM_SIZE (mem))
271     return false;
272 
273   ao_ref_init (ref, expr);
274 
275   /* Get the base of the reference and see if we have to reject or
276      adjust it.  */
277   base = ao_ref_base (ref);
278   if (base == NULL_TREE)
279     return false;
280 
281   /* If this is a pointer dereference of a non-SSA_NAME punt.
282      ???  We could replace it with a pointer to anything.  */
283   if (INDIRECT_REF_P (base)
284       && TREE_CODE (TREE_OPERAND (base, 0)) != SSA_NAME)
285     return false;
286 
287   /* The tree oracle doesn't like to have these.  */
288   if (TREE_CODE (base) == FUNCTION_DECL
289       || TREE_CODE (base) == LABEL_DECL)
290     return false;
291 
292   /* If this is a reference based on a partitioned decl replace the
293      base with an INDIRECT_REF of the pointer representative we
294      created during stack slot partitioning.  */
295   if (TREE_CODE (base) == VAR_DECL
296       && ! TREE_STATIC (base)
297       && cfun->gimple_df->decls_to_pointers != NULL)
298     {
299       void *namep;
300       namep = pointer_map_contains (cfun->gimple_df->decls_to_pointers, base);
301       if (namep)
302 	{
303 	  ref->base_alias_set = get_alias_set (base);
304 	  ref->base = build1 (INDIRECT_REF, TREE_TYPE (base), *(tree *)namep);
305 	}
306     }
307 
308   ref->ref_alias_set = MEM_ALIAS_SET (mem);
309 
310   /* If the base decl is a parameter we can have negative MEM_OFFSET in
311      case of promoted subregs on bigendian targets.  Trust the MEM_EXPR
312      here.  */
313   if (INTVAL (MEM_OFFSET (mem)) < 0
314       && ((INTVAL (MEM_SIZE (mem)) + INTVAL (MEM_OFFSET (mem)))
315 	  * BITS_PER_UNIT) == ref->size)
316     return true;
317 
318   ref->offset += INTVAL (MEM_OFFSET (mem)) * BITS_PER_UNIT;
319   ref->size = INTVAL (MEM_SIZE (mem)) * BITS_PER_UNIT;
320 
321   /* The MEM may extend into adjacent fields, so adjust max_size if
322      necessary.  */
323   if (ref->max_size != -1
324       && ref->size > ref->max_size)
325     ref->max_size = ref->size;
326 
327   /* If MEM_OFFSET and MEM_SIZE get us outside of the base object of
328      the MEM_EXPR punt.  This happens for STRICT_ALIGNMENT targets a lot.  */
329   if (MEM_EXPR (mem) != get_spill_slot_decl (false)
330       && (ref->offset < 0
331 	  || (DECL_P (ref->base)
332 	      && (!host_integerp (DECL_SIZE (ref->base), 1)
333 		  || (TREE_INT_CST_LOW (DECL_SIZE ((ref->base)))
334 		      < (unsigned HOST_WIDE_INT)(ref->offset + ref->size))))))
335     return false;
336 
337   return true;
338 }
339 
340 /* Query the alias-oracle on whether the two memory rtx X and MEM may
341    alias.  If TBAA_P is set also apply TBAA.  Returns true if the
342    two rtxen may alias, false otherwise.  */
343 
344 static bool
345 rtx_refs_may_alias_p (const_rtx x, const_rtx mem, bool tbaa_p)
346 {
347   ao_ref ref1, ref2;
348 
349   if (!ao_ref_from_mem (&ref1, x)
350       || !ao_ref_from_mem (&ref2, mem))
351     return true;
352 
353   return refs_may_alias_p_1 (&ref1, &ref2, tbaa_p);
354 }
355 
356 /* Returns a pointer to the alias set entry for ALIAS_SET, if there is
357    such an entry, or NULL otherwise.  */
358 
359 static inline alias_set_entry
360 get_alias_set_entry (alias_set_type alias_set)
361 {
362   return VEC_index (alias_set_entry, alias_sets, alias_set);
363 }
364 
365 /* Returns nonzero if the alias sets for MEM1 and MEM2 are such that
366    the two MEMs cannot alias each other.  */
367 
368 static inline int
369 mems_in_disjoint_alias_sets_p (const_rtx mem1, const_rtx mem2)
370 {
371 /* Perform a basic sanity check.  Namely, that there are no alias sets
372    if we're not using strict aliasing.  This helps to catch bugs
373    whereby someone uses PUT_CODE, but doesn't clear MEM_ALIAS_SET, or
374    where a MEM is allocated in some way other than by the use of
375    gen_rtx_MEM, and the MEM_ALIAS_SET is not cleared.  If we begin to
376    use alias sets to indicate that spilled registers cannot alias each
377    other, we might need to remove this check.  */
378   gcc_assert (flag_strict_aliasing
379 	      || (!MEM_ALIAS_SET (mem1) && !MEM_ALIAS_SET (mem2)));
380 
381   return ! alias_sets_conflict_p (MEM_ALIAS_SET (mem1), MEM_ALIAS_SET (mem2));
382 }
383 
384 /* Insert the NODE into the splay tree given by DATA.  Used by
385    record_alias_subset via splay_tree_foreach.  */
386 
387 static int
388 insert_subset_children (splay_tree_node node, void *data)
389 {
390   splay_tree_insert ((splay_tree) data, node->key, node->value);
391 
392   return 0;
393 }
394 
395 /* Return true if the first alias set is a subset of the second.  */
396 
397 bool
398 alias_set_subset_of (alias_set_type set1, alias_set_type set2)
399 {
400   alias_set_entry ase;
401 
402   /* Everything is a subset of the "aliases everything" set.  */
403   if (set2 == 0)
404     return true;
405 
406   /* Otherwise, check if set1 is a subset of set2.  */
407   ase = get_alias_set_entry (set2);
408   if (ase != 0
409       && (ase->has_zero_child
410 	  || splay_tree_lookup (ase->children,
411 			        (splay_tree_key) set1)))
412     return true;
413   return false;
414 }
415 
416 /* Return 1 if the two specified alias sets may conflict.  */
417 
418 int
419 alias_sets_conflict_p (alias_set_type set1, alias_set_type set2)
420 {
421   alias_set_entry ase;
422 
423   /* The easy case.  */
424   if (alias_sets_must_conflict_p (set1, set2))
425     return 1;
426 
427   /* See if the first alias set is a subset of the second.  */
428   ase = get_alias_set_entry (set1);
429   if (ase != 0
430       && (ase->has_zero_child
431 	  || splay_tree_lookup (ase->children,
432 				(splay_tree_key) set2)))
433     return 1;
434 
435   /* Now do the same, but with the alias sets reversed.  */
436   ase = get_alias_set_entry (set2);
437   if (ase != 0
438       && (ase->has_zero_child
439 	  || splay_tree_lookup (ase->children,
440 				(splay_tree_key) set1)))
441     return 1;
442 
443   /* The two alias sets are distinct and neither one is the
444      child of the other.  Therefore, they cannot conflict.  */
445   return 0;
446 }
447 
448 static int
449 walk_mems_2 (rtx *x, rtx mem)
450 {
451   if (MEM_P (*x))
452     {
453       if (alias_sets_conflict_p (MEM_ALIAS_SET(*x), MEM_ALIAS_SET(mem)))
454         return 1;
455 
456       return -1;
457     }
458   return 0;
459 }
460 
461 static int
462 walk_mems_1 (rtx *x, rtx *pat)
463 {
464   if (MEM_P (*x))
465     {
466       /* Visit all MEMs in *PAT and check indepedence.  */
467       if (for_each_rtx (pat, (rtx_function) walk_mems_2, *x))
468         /* Indicate that dependence was determined and stop traversal.  */
469         return 1;
470 
471       return -1;
472     }
473   return 0;
474 }
475 
476 /* Return 1 if two specified instructions have mem expr with conflict alias sets*/
477 bool
478 insn_alias_sets_conflict_p (rtx insn1, rtx insn2)
479 {
480   /* For each pair of MEMs in INSN1 and INSN2 check their independence.  */
481   return  for_each_rtx (&PATTERN (insn1), (rtx_function) walk_mems_1,
482 			 &PATTERN (insn2));
483 }
484 
485 /* Return 1 if the two specified alias sets will always conflict.  */
486 
487 int
488 alias_sets_must_conflict_p (alias_set_type set1, alias_set_type set2)
489 {
490   if (set1 == 0 || set2 == 0 || set1 == set2)
491     return 1;
492 
493   return 0;
494 }
495 
496 /* Return 1 if any MEM object of type T1 will always conflict (using the
497    dependency routines in this file) with any MEM object of type T2.
498    This is used when allocating temporary storage.  If T1 and/or T2 are
499    NULL_TREE, it means we know nothing about the storage.  */
500 
501 int
502 objects_must_conflict_p (tree t1, tree t2)
503 {
504   alias_set_type set1, set2;
505 
506   /* If neither has a type specified, we don't know if they'll conflict
507      because we may be using them to store objects of various types, for
508      example the argument and local variables areas of inlined functions.  */
509   if (t1 == 0 && t2 == 0)
510     return 0;
511 
512   /* If they are the same type, they must conflict.  */
513   if (t1 == t2
514       /* Likewise if both are volatile.  */
515       || (t1 != 0 && TYPE_VOLATILE (t1) && t2 != 0 && TYPE_VOLATILE (t2)))
516     return 1;
517 
518   set1 = t1 ? get_alias_set (t1) : 0;
519   set2 = t2 ? get_alias_set (t2) : 0;
520 
521   /* We can't use alias_sets_conflict_p because we must make sure
522      that every subtype of t1 will conflict with every subtype of
523      t2 for which a pair of subobjects of these respective subtypes
524      overlaps on the stack.  */
525   return alias_sets_must_conflict_p (set1, set2);
526 }
527 
528 /* Return true if all nested component references handled by
529    get_inner_reference in T are such that we should use the alias set
530    provided by the object at the heart of T.
531 
532    This is true for non-addressable components (which don't have their
533    own alias set), as well as components of objects in alias set zero.
534    This later point is a special case wherein we wish to override the
535    alias set used by the component, but we don't have per-FIELD_DECL
536    assignable alias sets.  */
537 
538 bool
539 component_uses_parent_alias_set (const_tree t)
540 {
541   while (1)
542     {
543       /* If we're at the end, it vacuously uses its own alias set.  */
544       if (!handled_component_p (t))
545 	return false;
546 
547       switch (TREE_CODE (t))
548 	{
549 	case COMPONENT_REF:
550 	  if (DECL_NONADDRESSABLE_P (TREE_OPERAND (t, 1)))
551 	    return true;
552 	  break;
553 
554 	case ARRAY_REF:
555 	case ARRAY_RANGE_REF:
556 	  if (TYPE_NONALIASED_COMPONENT (TREE_TYPE (TREE_OPERAND (t, 0))))
557 	    return true;
558 	  break;
559 
560 	case REALPART_EXPR:
561 	case IMAGPART_EXPR:
562 	  break;
563 
564 	default:
565 	  /* Bitfields and casts are never addressable.  */
566 	  return true;
567 	}
568 
569       t = TREE_OPERAND (t, 0);
570       if (get_alias_set (TREE_TYPE (t)) == 0)
571 	return true;
572     }
573 }
574 
575 /* Return the alias set for the memory pointed to by T, which may be
576    either a type or an expression.  Return -1 if there is nothing
577    special about dereferencing T.  */
578 
579 static alias_set_type
580 get_deref_alias_set_1 (tree t)
581 {
582   /* If we're not doing any alias analysis, just assume everything
583      aliases everything else.  */
584   if (!flag_strict_aliasing)
585     return 0;
586 
587   /* All we care about is the type.  */
588   if (! TYPE_P (t))
589     t = TREE_TYPE (t);
590 
591   /* If we have an INDIRECT_REF via a void pointer, we don't
592      know anything about what that might alias.  Likewise if the
593      pointer is marked that way.  */
594   if (TREE_CODE (TREE_TYPE (t)) == VOID_TYPE
595       || TYPE_REF_CAN_ALIAS_ALL (t))
596     return 0;
597 
598   return -1;
599 }
600 
601 /* Return the alias set for the memory pointed to by T, which may be
602    either a type or an expression.  */
603 
604 alias_set_type
605 get_deref_alias_set (tree t)
606 {
607   alias_set_type set = get_deref_alias_set_1 (t);
608 
609   /* Fall back to the alias-set of the pointed-to type.  */
610   if (set == -1)
611     {
612       if (! TYPE_P (t))
613 	t = TREE_TYPE (t);
614       set = get_alias_set (TREE_TYPE (t));
615     }
616 
617   return set;
618 }
619 
620 /* Return the alias set for T, which may be either a type or an
621    expression.  Call language-specific routine for help, if needed.  */
622 
623 alias_set_type
624 get_alias_set (tree t)
625 {
626   alias_set_type set;
627 
628   /* If we're not doing any alias analysis, just assume everything
629      aliases everything else.  Also return 0 if this or its type is
630      an error.  */
631   if (! flag_strict_aliasing || t == error_mark_node
632       || (! TYPE_P (t)
633 	  && (TREE_TYPE (t) == 0 || TREE_TYPE (t) == error_mark_node)))
634     return 0;
635 
636   /* We can be passed either an expression or a type.  This and the
637      language-specific routine may make mutually-recursive calls to each other
638      to figure out what to do.  At each juncture, we see if this is a tree
639      that the language may need to handle specially.  First handle things that
640      aren't types.  */
641   if (! TYPE_P (t))
642     {
643       tree inner;
644 
645       /* Remove any nops, then give the language a chance to do
646 	 something with this tree before we look at it.  */
647       STRIP_NOPS (t);
648       set = lang_hooks.get_alias_set (t);
649       if (set != -1)
650 	return set;
651 
652       /* Retrieve the original memory reference if needed.  */
653       if (TREE_CODE (t) == TARGET_MEM_REF)
654 	t = TMR_ORIGINAL (t);
655 
656       /* First see if the actual object referenced is an INDIRECT_REF from a
657 	 restrict-qualified pointer or a "void *".  */
658       inner = t;
659       while (handled_component_p (inner))
660 	{
661 	  inner = TREE_OPERAND (inner, 0);
662 	  STRIP_NOPS (inner);
663 	}
664 
665       if (INDIRECT_REF_P (inner))
666 	{
667 	  set = get_deref_alias_set_1 (TREE_OPERAND (inner, 0));
668 	  if (set != -1)
669 	    return set;
670 	}
671 
672       /* Otherwise, pick up the outermost object that we could have a pointer
673 	 to, processing conversions as above.  */
674       while (component_uses_parent_alias_set (t))
675 	{
676 	  t = TREE_OPERAND (t, 0);
677 	  STRIP_NOPS (t);
678 	}
679 
680       /* If we've already determined the alias set for a decl, just return
681 	 it.  This is necessary for C++ anonymous unions, whose component
682 	 variables don't look like union members (boo!).  */
683       if (TREE_CODE (t) == VAR_DECL
684 	  && DECL_RTL_SET_P (t) && MEM_P (DECL_RTL (t)))
685 	return MEM_ALIAS_SET (DECL_RTL (t));
686 
687       /* Now all we care about is the type.  */
688       t = TREE_TYPE (t);
689     }
690 
691   /* Variant qualifiers don't affect the alias set, so get the main
692      variant.  */
693   t = TYPE_MAIN_VARIANT (t);
694 
695   /* Always use the canonical type as well.  If this is a type that
696      requires structural comparisons to identify compatible types
697      use alias set zero.  */
698   if (TYPE_STRUCTURAL_EQUALITY_P (t))
699     {
700       /* Allow the language to specify another alias set for this
701 	 type.  */
702       set = lang_hooks.get_alias_set (t);
703       if (set != -1)
704 	return set;
705       return 0;
706     }
707   t = TYPE_CANONICAL (t);
708   /* Canonical types shouldn't form a tree nor should the canonical
709      type require structural equality checks.  */
710   gcc_assert (!TYPE_STRUCTURAL_EQUALITY_P (t) && TYPE_CANONICAL (t) == t);
711 
712   /* If this is a type with a known alias set, return it.  */
713   if (TYPE_ALIAS_SET_KNOWN_P (t))
714     return TYPE_ALIAS_SET (t);
715 
716   /* We don't want to set TYPE_ALIAS_SET for incomplete types.  */
717   if (!COMPLETE_TYPE_P (t))
718     {
719       /* For arrays with unknown size the conservative answer is the
720 	 alias set of the element type.  */
721       if (TREE_CODE (t) == ARRAY_TYPE)
722 	return get_alias_set (TREE_TYPE (t));
723 
724       /* But return zero as a conservative answer for incomplete types.  */
725       return 0;
726     }
727 
728   /* See if the language has special handling for this type.  */
729   set = lang_hooks.get_alias_set (t);
730   if (set != -1)
731     return set;
732 
733   /* There are no objects of FUNCTION_TYPE, so there's no point in
734      using up an alias set for them.  (There are, of course, pointers
735      and references to functions, but that's different.)  */
736   else if (TREE_CODE (t) == FUNCTION_TYPE
737 	   || TREE_CODE (t) == METHOD_TYPE)
738     set = 0;
739 
740   /* Unless the language specifies otherwise, let vector types alias
741      their components.  This avoids some nasty type punning issues in
742      normal usage.  And indeed lets vectors be treated more like an
743      array slice.  */
744   else if (TREE_CODE (t) == VECTOR_TYPE)
745     set = get_alias_set (TREE_TYPE (t));
746 
747   /* Unless the language specifies otherwise, treat array types the
748      same as their components.  This avoids the asymmetry we get
749      through recording the components.  Consider accessing a
750      character(kind=1) through a reference to a character(kind=1)[1:1].
751      Or consider if we want to assign integer(kind=4)[0:D.1387] and
752      integer(kind=4)[4] the same alias set or not.
753      Just be pragmatic here and make sure the array and its element
754      type get the same alias set assigned.  */
755   else if (TREE_CODE (t) == ARRAY_TYPE
756 	   && !TYPE_NONALIASED_COMPONENT (t))
757     set = get_alias_set (TREE_TYPE (t));
758 
759   else
760     /* Otherwise make a new alias set for this type.  */
761     set = new_alias_set ();
762 
763   TYPE_ALIAS_SET (t) = set;
764 
765   /* If this is an aggregate type, we must record any component aliasing
766      information.  */
767   if (AGGREGATE_TYPE_P (t) || TREE_CODE (t) == COMPLEX_TYPE)
768     record_component_aliases (t);
769 
770   return set;
771 }
772 
773 /* Return a brand-new alias set.  */
774 
775 alias_set_type
776 new_alias_set (void)
777 {
778   if (flag_strict_aliasing)
779     {
780       if (alias_sets == 0)
781 	VEC_safe_push (alias_set_entry, gc, alias_sets, 0);
782       VEC_safe_push (alias_set_entry, gc, alias_sets, 0);
783       return VEC_length (alias_set_entry, alias_sets) - 1;
784     }
785   else
786     return 0;
787 }
788 
789 /* Indicate that things in SUBSET can alias things in SUPERSET, but that
790    not everything that aliases SUPERSET also aliases SUBSET.  For example,
791    in C, a store to an `int' can alias a load of a structure containing an
792    `int', and vice versa.  But it can't alias a load of a 'double' member
793    of the same structure.  Here, the structure would be the SUPERSET and
794    `int' the SUBSET.  This relationship is also described in the comment at
795    the beginning of this file.
796 
797    This function should be called only once per SUPERSET/SUBSET pair.
798 
799    It is illegal for SUPERSET to be zero; everything is implicitly a
800    subset of alias set zero.  */
801 
802 void
803 record_alias_subset (alias_set_type superset, alias_set_type subset)
804 {
805   alias_set_entry superset_entry;
806   alias_set_entry subset_entry;
807 
808   /* It is possible in complex type situations for both sets to be the same,
809      in which case we can ignore this operation.  */
810   if (superset == subset)
811     return;
812 
813   gcc_assert (superset);
814 
815   superset_entry = get_alias_set_entry (superset);
816   if (superset_entry == 0)
817     {
818       /* Create an entry for the SUPERSET, so that we have a place to
819 	 attach the SUBSET.  */
820       superset_entry = GGC_NEW (struct alias_set_entry_d);
821       superset_entry->alias_set = superset;
822       superset_entry->children
823 	= splay_tree_new_ggc (splay_tree_compare_ints);
824       superset_entry->has_zero_child = 0;
825       VEC_replace (alias_set_entry, alias_sets, superset, superset_entry);
826     }
827 
828   if (subset == 0)
829     superset_entry->has_zero_child = 1;
830   else
831     {
832       subset_entry = get_alias_set_entry (subset);
833       /* If there is an entry for the subset, enter all of its children
834 	 (if they are not already present) as children of the SUPERSET.  */
835       if (subset_entry)
836 	{
837 	  if (subset_entry->has_zero_child)
838 	    superset_entry->has_zero_child = 1;
839 
840 	  splay_tree_foreach (subset_entry->children, insert_subset_children,
841 			      superset_entry->children);
842 	}
843 
844       /* Enter the SUBSET itself as a child of the SUPERSET.  */
845       splay_tree_insert (superset_entry->children,
846 			 (splay_tree_key) subset, 0);
847     }
848 }
849 
850 /* Record that component types of TYPE, if any, are part of that type for
851    aliasing purposes.  For record types, we only record component types
852    for fields that are not marked non-addressable.  For array types, we
853    only record the component type if it is not marked non-aliased.  */
854 
855 void
856 record_component_aliases (tree type)
857 {
858   alias_set_type superset = get_alias_set (type);
859   tree field;
860 
861   if (superset == 0)
862     return;
863 
864   switch (TREE_CODE (type))
865     {
866     case RECORD_TYPE:
867     case UNION_TYPE:
868     case QUAL_UNION_TYPE:
869       /* Recursively record aliases for the base classes, if there are any.  */
870       if (TYPE_BINFO (type))
871 	{
872 	  int i;
873 	  tree binfo, base_binfo;
874 
875 	  for (binfo = TYPE_BINFO (type), i = 0;
876 	       BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
877 	    record_alias_subset (superset,
878 				 get_alias_set (BINFO_TYPE (base_binfo)));
879 	}
880       for (field = TYPE_FIELDS (type); field != 0; field = TREE_CHAIN (field))
881 	if (TREE_CODE (field) == FIELD_DECL && !DECL_NONADDRESSABLE_P (field))
882 	  record_alias_subset (superset, get_alias_set (TREE_TYPE (field)));
883       break;
884 
885     case COMPLEX_TYPE:
886       record_alias_subset (superset, get_alias_set (TREE_TYPE (type)));
887       break;
888 
889     /* VECTOR_TYPE and ARRAY_TYPE share the alias set with their
890        element type.  */
891 
892     default:
893       break;
894     }
895 }
896 
897 /* Allocate an alias set for use in storing and reading from the varargs
898    spill area.  */
899 
900 static GTY(()) alias_set_type varargs_set = -1;
901 
902 alias_set_type
903 get_varargs_alias_set (void)
904 {
905 #if 1
906   /* We now lower VA_ARG_EXPR, and there's currently no way to attach the
907      varargs alias set to an INDIRECT_REF (FIXME!), so we can't
908      consistently use the varargs alias set for loads from the varargs
909      area.  So don't use it anywhere.  */
910   return 0;
911 #else
912   if (varargs_set == -1)
913     varargs_set = new_alias_set ();
914 
915   return varargs_set;
916 #endif
917 }
918 
919 /* Likewise, but used for the fixed portions of the frame, e.g., register
920    save areas.  */
921 
922 static GTY(()) alias_set_type frame_set = -1;
923 
924 alias_set_type
925 get_frame_alias_set (void)
926 {
927   if (frame_set == -1)
928     frame_set = new_alias_set ();
929 
930   return frame_set;
931 }
932 
933 /* Inside SRC, the source of a SET, find a base address.  */
934 
935 static rtx
936 find_base_value (rtx src)
937 {
938   unsigned int regno;
939 
940 #if defined (FIND_BASE_TERM)
941   /* Try machine-dependent ways to find the base term.  */
942   src = FIND_BASE_TERM (src);
943 #endif
944 
945   switch (GET_CODE (src))
946     {
947     case SYMBOL_REF:
948     case LABEL_REF:
949       return src;
950 
951     case REG:
952       regno = REGNO (src);
953       /* At the start of a function, argument registers have known base
954 	 values which may be lost later.  Returning an ADDRESS
955 	 expression here allows optimization based on argument values
956 	 even when the argument registers are used for other purposes.  */
957       if (regno < FIRST_PSEUDO_REGISTER && copying_arguments)
958 	return new_reg_base_value[regno];
959 
960       /* If a pseudo has a known base value, return it.  Do not do this
961 	 for non-fixed hard regs since it can result in a circular
962 	 dependency chain for registers which have values at function entry.
963 
964 	 The test above is not sufficient because the scheduler may move
965 	 a copy out of an arg reg past the NOTE_INSN_FUNCTION_BEGIN.  */
966       if ((regno >= FIRST_PSEUDO_REGISTER || fixed_regs[regno])
967 	  && regno < VEC_length (rtx, reg_base_value))
968 	{
969 	  /* If we're inside init_alias_analysis, use new_reg_base_value
970 	     to reduce the number of relaxation iterations.  */
971 	  if (new_reg_base_value && new_reg_base_value[regno]
972 	      && DF_REG_DEF_COUNT (regno) == 1)
973 	    return new_reg_base_value[regno];
974 
975 	  if (VEC_index (rtx, reg_base_value, regno))
976 	    return VEC_index (rtx, reg_base_value, regno);
977 	}
978 
979       return 0;
980 
981     case MEM:
982       /* Check for an argument passed in memory.  Only record in the
983 	 copying-arguments block; it is too hard to track changes
984 	 otherwise.  */
985       if (copying_arguments
986 	  && (XEXP (src, 0) == arg_pointer_rtx
987 	      || (GET_CODE (XEXP (src, 0)) == PLUS
988 		  && XEXP (XEXP (src, 0), 0) == arg_pointer_rtx)))
989 	return gen_rtx_ADDRESS (VOIDmode, src);
990       return 0;
991 
992     case CONST:
993       src = XEXP (src, 0);
994       if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS)
995 	break;
996 
997       /* ... fall through ...  */
998 
999     case PLUS:
1000     case MINUS:
1001       {
1002 	rtx temp, src_0 = XEXP (src, 0), src_1 = XEXP (src, 1);
1003 
1004 	/* If either operand is a REG that is a known pointer, then it
1005 	   is the base.  */
1006 	if (REG_P (src_0) && REG_POINTER (src_0))
1007 	  return find_base_value (src_0);
1008 	if (REG_P (src_1) && REG_POINTER (src_1))
1009 	  return find_base_value (src_1);
1010 
1011 	/* If either operand is a REG, then see if we already have
1012 	   a known value for it.  */
1013 	if (REG_P (src_0))
1014 	  {
1015 	    temp = find_base_value (src_0);
1016 	    if (temp != 0)
1017 	      src_0 = temp;
1018 	  }
1019 
1020 	if (REG_P (src_1))
1021 	  {
1022 	    temp = find_base_value (src_1);
1023 	    if (temp!= 0)
1024 	      src_1 = temp;
1025 	  }
1026 
1027 	/* If either base is named object or a special address
1028 	   (like an argument or stack reference), then use it for the
1029 	   base term.  */
1030 	if (src_0 != 0
1031 	    && (GET_CODE (src_0) == SYMBOL_REF
1032 		|| GET_CODE (src_0) == LABEL_REF
1033 		|| (GET_CODE (src_0) == ADDRESS
1034 		    && GET_MODE (src_0) != VOIDmode)))
1035 	  return src_0;
1036 
1037 	if (src_1 != 0
1038 	    && (GET_CODE (src_1) == SYMBOL_REF
1039 		|| GET_CODE (src_1) == LABEL_REF
1040 		|| (GET_CODE (src_1) == ADDRESS
1041 		    && GET_MODE (src_1) != VOIDmode)))
1042 	  return src_1;
1043 
1044 	/* Guess which operand is the base address:
1045 	   If either operand is a symbol, then it is the base.  If
1046 	   either operand is a CONST_INT, then the other is the base.  */
1047 	if (CONST_INT_P (src_1) || CONSTANT_P (src_0))
1048 	  return find_base_value (src_0);
1049 	else if (CONST_INT_P (src_0) || CONSTANT_P (src_1))
1050 	  return find_base_value (src_1);
1051 
1052 	return 0;
1053       }
1054 
1055     case LO_SUM:
1056       /* The standard form is (lo_sum reg sym) so look only at the
1057 	 second operand.  */
1058       return find_base_value (XEXP (src, 1));
1059 
1060     case AND:
1061       /* If the second operand is constant set the base
1062 	 address to the first operand.  */
1063       if (CONST_INT_P (XEXP (src, 1)) && INTVAL (XEXP (src, 1)) != 0)
1064 	return find_base_value (XEXP (src, 0));
1065       return 0;
1066 
1067     case TRUNCATE:
1068       /* As we do not know which address space the pointer is refering to, we can
1069 	 handle this only if the target does not support different pointer or
1070 	 address modes depending on the address space.  */
1071       if (!target_default_pointer_address_modes_p ())
1072 	break;
1073       if (GET_MODE_SIZE (GET_MODE (src)) < GET_MODE_SIZE (Pmode))
1074 	break;
1075       /* Fall through.  */
1076     case HIGH:
1077     case PRE_INC:
1078     case PRE_DEC:
1079     case POST_INC:
1080     case POST_DEC:
1081     case PRE_MODIFY:
1082     case POST_MODIFY:
1083       return find_base_value (XEXP (src, 0));
1084 
1085     case ZERO_EXTEND:
1086     case SIGN_EXTEND:	/* used for NT/Alpha pointers */
1087       /* As we do not know which address space the pointer is refering to, we can
1088 	 handle this only if the target does not support different pointer or
1089 	 address modes depending on the address space.  */
1090       if (!target_default_pointer_address_modes_p ())
1091 	break;
1092 
1093       {
1094 	rtx temp = find_base_value (XEXP (src, 0));
1095 
1096 	if (temp != 0 && CONSTANT_P (temp))
1097 	  temp = convert_memory_address (Pmode, temp);
1098 
1099 	return temp;
1100       }
1101 
1102     default:
1103       break;
1104     }
1105 
1106   return 0;
1107 }
1108 
1109 /* Called from init_alias_analysis indirectly through note_stores.  */
1110 
1111 /* While scanning insns to find base values, reg_seen[N] is nonzero if
1112    register N has been set in this function.  */
1113 static char *reg_seen;
1114 
1115 /* Addresses which are known not to alias anything else are identified
1116    by a unique integer.  */
1117 static int unique_id;
1118 
1119 static void
1120 record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED)
1121 {
1122   unsigned regno;
1123   rtx src;
1124   int n;
1125 
1126   if (!REG_P (dest))
1127     return;
1128 
1129   regno = REGNO (dest);
1130 
1131   gcc_assert (regno < VEC_length (rtx, reg_base_value));
1132 
1133   /* If this spans multiple hard registers, then we must indicate that every
1134      register has an unusable value.  */
1135   if (regno < FIRST_PSEUDO_REGISTER)
1136     n = hard_regno_nregs[regno][GET_MODE (dest)];
1137   else
1138     n = 1;
1139   if (n != 1)
1140     {
1141       while (--n >= 0)
1142 	{
1143 	  reg_seen[regno + n] = 1;
1144 	  new_reg_base_value[regno + n] = 0;
1145 	}
1146       return;
1147     }
1148 
1149   if (set)
1150     {
1151       /* A CLOBBER wipes out any old value but does not prevent a previously
1152 	 unset register from acquiring a base address (i.e. reg_seen is not
1153 	 set).  */
1154       if (GET_CODE (set) == CLOBBER)
1155 	{
1156 	  new_reg_base_value[regno] = 0;
1157 	  return;
1158 	}
1159       src = SET_SRC (set);
1160     }
1161   else
1162     {
1163       if (reg_seen[regno])
1164 	{
1165 	  new_reg_base_value[regno] = 0;
1166 	  return;
1167 	}
1168       reg_seen[regno] = 1;
1169       new_reg_base_value[regno] = gen_rtx_ADDRESS (Pmode,
1170 						   GEN_INT (unique_id++));
1171       return;
1172     }
1173 
1174   /* If this is not the first set of REGNO, see whether the new value
1175      is related to the old one.  There are two cases of interest:
1176 
1177 	(1) The register might be assigned an entirely new value
1178 	    that has the same base term as the original set.
1179 
1180 	(2) The set might be a simple self-modification that
1181 	    cannot change REGNO's base value.
1182 
1183      If neither case holds, reject the original base value as invalid.
1184      Note that the following situation is not detected:
1185 
1186 	 extern int x, y;  int *p = &x; p += (&y-&x);
1187 
1188      ANSI C does not allow computing the difference of addresses
1189      of distinct top level objects.  */
1190   if (new_reg_base_value[regno] != 0
1191       && find_base_value (src) != new_reg_base_value[regno])
1192     switch (GET_CODE (src))
1193       {
1194       case LO_SUM:
1195       case MINUS:
1196 	if (XEXP (src, 0) != dest && XEXP (src, 1) != dest)
1197 	  new_reg_base_value[regno] = 0;
1198 	break;
1199       case PLUS:
1200 	/* If the value we add in the PLUS is also a valid base value,
1201 	   this might be the actual base value, and the original value
1202 	   an index.  */
1203 	{
1204 	  rtx other = NULL_RTX;
1205 
1206 	  if (XEXP (src, 0) == dest)
1207 	    other = XEXP (src, 1);
1208 	  else if (XEXP (src, 1) == dest)
1209 	    other = XEXP (src, 0);
1210 
1211 	  if (! other || find_base_value (other))
1212 	    new_reg_base_value[regno] = 0;
1213 	  break;
1214 	}
1215       case AND:
1216 	if (XEXP (src, 0) != dest || !CONST_INT_P (XEXP (src, 1)))
1217 	  new_reg_base_value[regno] = 0;
1218 	break;
1219       default:
1220 	new_reg_base_value[regno] = 0;
1221 	break;
1222       }
1223   /* If this is the first set of a register, record the value.  */
1224   else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno])
1225 	   && ! reg_seen[regno] && new_reg_base_value[regno] == 0)
1226     new_reg_base_value[regno] = find_base_value (src);
1227 
1228   reg_seen[regno] = 1;
1229 }
1230 
1231 /* Return REG_BASE_VALUE for REGNO.  Selective scheduler uses this to avoid
1232    using hard registers with non-null REG_BASE_VALUE for renaming.  */
1233 rtx
1234 get_reg_base_value (unsigned int regno)
1235 {
1236   return VEC_index (rtx, reg_base_value, regno);
1237 }
1238 
1239 /* If a value is known for REGNO, return it.  */
1240 
1241 rtx
1242 get_reg_known_value (unsigned int regno)
1243 {
1244   if (regno >= FIRST_PSEUDO_REGISTER)
1245     {
1246       regno -= FIRST_PSEUDO_REGISTER;
1247       if (regno < reg_known_value_size)
1248 	return reg_known_value[regno];
1249     }
1250   return NULL;
1251 }
1252 
1253 /* Set it.  */
1254 
1255 static void
1256 set_reg_known_value (unsigned int regno, rtx val)
1257 {
1258   if (regno >= FIRST_PSEUDO_REGISTER)
1259     {
1260       regno -= FIRST_PSEUDO_REGISTER;
1261       if (regno < reg_known_value_size)
1262 	reg_known_value[regno] = val;
1263     }
1264 }
1265 
1266 /* Similarly for reg_known_equiv_p.  */
1267 
1268 bool
1269 get_reg_known_equiv_p (unsigned int regno)
1270 {
1271   if (regno >= FIRST_PSEUDO_REGISTER)
1272     {
1273       regno -= FIRST_PSEUDO_REGISTER;
1274       if (regno < reg_known_value_size)
1275 	return reg_known_equiv_p[regno];
1276     }
1277   return false;
1278 }
1279 
1280 static void
1281 set_reg_known_equiv_p (unsigned int regno, bool val)
1282 {
1283   if (regno >= FIRST_PSEUDO_REGISTER)
1284     {
1285       regno -= FIRST_PSEUDO_REGISTER;
1286       if (regno < reg_known_value_size)
1287 	reg_known_equiv_p[regno] = val;
1288     }
1289 }
1290 
1291 
1292 /* Returns a canonical version of X, from the point of view alias
1293    analysis.  (For example, if X is a MEM whose address is a register,
1294    and the register has a known value (say a SYMBOL_REF), then a MEM
1295    whose address is the SYMBOL_REF is returned.)  */
1296 
1297 rtx
1298 canon_rtx (rtx x)
1299 {
1300   /* Recursively look for equivalences.  */
1301   if (REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1302     {
1303       rtx t = get_reg_known_value (REGNO (x));
1304       if (t == x)
1305 	return x;
1306       if (t)
1307 	return canon_rtx (t);
1308     }
1309 
1310   if (GET_CODE (x) == PLUS)
1311     {
1312       rtx x0 = canon_rtx (XEXP (x, 0));
1313       rtx x1 = canon_rtx (XEXP (x, 1));
1314 
1315       if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
1316 	{
1317 	  if (CONST_INT_P (x0))
1318 	    return plus_constant (x1, INTVAL (x0));
1319 	  else if (CONST_INT_P (x1))
1320 	    return plus_constant (x0, INTVAL (x1));
1321 	  return gen_rtx_PLUS (GET_MODE (x), x0, x1);
1322 	}
1323     }
1324 
1325   /* This gives us much better alias analysis when called from
1326      the loop optimizer.   Note we want to leave the original
1327      MEM alone, but need to return the canonicalized MEM with
1328      all the flags with their original values.  */
1329   else if (MEM_P (x))
1330     x = replace_equiv_address_nv (x, canon_rtx (XEXP (x, 0)));
1331 
1332   return x;
1333 }
1334 
1335 /* Return 1 if X and Y are identical-looking rtx's.
1336    Expect that X and Y has been already canonicalized.
1337 
1338    We use the data in reg_known_value above to see if two registers with
1339    different numbers are, in fact, equivalent.  */
1340 
1341 static int
1342 rtx_equal_for_memref_p (const_rtx x, const_rtx y)
1343 {
1344   int i;
1345   int j;
1346   enum rtx_code code;
1347   const char *fmt;
1348 
1349   if (x == 0 && y == 0)
1350     return 1;
1351   if (x == 0 || y == 0)
1352     return 0;
1353 
1354   if (x == y)
1355     return 1;
1356 
1357   code = GET_CODE (x);
1358   /* Rtx's of different codes cannot be equal.  */
1359   if (code != GET_CODE (y))
1360     return 0;
1361 
1362   /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1363      (REG:SI x) and (REG:HI x) are NOT equivalent.  */
1364 
1365   if (GET_MODE (x) != GET_MODE (y))
1366     return 0;
1367 
1368   /* Some RTL can be compared without a recursive examination.  */
1369   switch (code)
1370     {
1371     case REG:
1372       return REGNO (x) == REGNO (y);
1373 
1374     case LABEL_REF:
1375       return XEXP (x, 0) == XEXP (y, 0);
1376 
1377     case SYMBOL_REF:
1378       return XSTR (x, 0) == XSTR (y, 0);
1379 
1380     case VALUE:
1381     case CONST_INT:
1382     case CONST_DOUBLE:
1383     case CONST_FIXED:
1384       /* There's no need to compare the contents of CONST_DOUBLEs or
1385 	 CONST_INTs because pointer equality is a good enough
1386 	 comparison for these nodes.  */
1387       return 0;
1388 
1389     default:
1390       break;
1391     }
1392 
1393   /* canon_rtx knows how to handle plus.  No need to canonicalize.  */
1394   if (code == PLUS)
1395     return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
1396 	     && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)))
1397 	    || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1))
1398 		&& rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0))));
1399   /* For commutative operations, the RTX match if the operand match in any
1400      order.  Also handle the simple binary and unary cases without a loop.  */
1401   if (COMMUTATIVE_P (x))
1402     {
1403       rtx xop0 = canon_rtx (XEXP (x, 0));
1404       rtx yop0 = canon_rtx (XEXP (y, 0));
1405       rtx yop1 = canon_rtx (XEXP (y, 1));
1406 
1407       return ((rtx_equal_for_memref_p (xop0, yop0)
1408 	       && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop1))
1409 	      || (rtx_equal_for_memref_p (xop0, yop1)
1410 		  && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop0)));
1411     }
1412   else if (NON_COMMUTATIVE_P (x))
1413     {
1414       return (rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1415 				      canon_rtx (XEXP (y, 0)))
1416 	      && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)),
1417 					 canon_rtx (XEXP (y, 1))));
1418     }
1419   else if (UNARY_P (x))
1420     return rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1421 				   canon_rtx (XEXP (y, 0)));
1422 
1423   /* Compare the elements.  If any pair of corresponding elements
1424      fail to match, return 0 for the whole things.
1425 
1426      Limit cases to types which actually appear in addresses.  */
1427 
1428   fmt = GET_RTX_FORMAT (code);
1429   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1430     {
1431       switch (fmt[i])
1432 	{
1433 	case 'i':
1434 	  if (XINT (x, i) != XINT (y, i))
1435 	    return 0;
1436 	  break;
1437 
1438 	case 'E':
1439 	  /* Two vectors must have the same length.  */
1440 	  if (XVECLEN (x, i) != XVECLEN (y, i))
1441 	    return 0;
1442 
1443 	  /* And the corresponding elements must match.  */
1444 	  for (j = 0; j < XVECLEN (x, i); j++)
1445 	    if (rtx_equal_for_memref_p (canon_rtx (XVECEXP (x, i, j)),
1446 					canon_rtx (XVECEXP (y, i, j))) == 0)
1447 	      return 0;
1448 	  break;
1449 
1450 	case 'e':
1451 	  if (rtx_equal_for_memref_p (canon_rtx (XEXP (x, i)),
1452 				      canon_rtx (XEXP (y, i))) == 0)
1453 	    return 0;
1454 	  break;
1455 
1456 	  /* This can happen for asm operands.  */
1457 	case 's':
1458 	  if (strcmp (XSTR (x, i), XSTR (y, i)))
1459 	    return 0;
1460 	  break;
1461 
1462 	/* This can happen for an asm which clobbers memory.  */
1463 	case '0':
1464 	  break;
1465 
1466 	  /* It is believed that rtx's at this level will never
1467 	     contain anything but integers and other rtx's,
1468 	     except for within LABEL_REFs and SYMBOL_REFs.  */
1469 	default:
1470 	  gcc_unreachable ();
1471 	}
1472     }
1473   return 1;
1474 }
1475 
1476 rtx
1477 find_base_term (rtx x)
1478 {
1479   cselib_val *val;
1480   struct elt_loc_list *l;
1481 
1482 #if defined (FIND_BASE_TERM)
1483   /* Try machine-dependent ways to find the base term.  */
1484   x = FIND_BASE_TERM (x);
1485 #endif
1486 
1487   switch (GET_CODE (x))
1488     {
1489     case REG:
1490       return REG_BASE_VALUE (x);
1491 
1492     case TRUNCATE:
1493       /* As we do not know which address space the pointer is refering to, we can
1494 	 handle this only if the target does not support different pointer or
1495 	 address modes depending on the address space.  */
1496       if (!target_default_pointer_address_modes_p ())
1497 	return 0;
1498       if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (Pmode))
1499 	return 0;
1500       /* Fall through.  */
1501     case HIGH:
1502     case PRE_INC:
1503     case PRE_DEC:
1504     case POST_INC:
1505     case POST_DEC:
1506     case PRE_MODIFY:
1507     case POST_MODIFY:
1508       return find_base_term (XEXP (x, 0));
1509 
1510     case ZERO_EXTEND:
1511     case SIGN_EXTEND:	/* Used for Alpha/NT pointers */
1512       /* As we do not know which address space the pointer is refering to, we can
1513 	 handle this only if the target does not support different pointer or
1514 	 address modes depending on the address space.  */
1515       if (!target_default_pointer_address_modes_p ())
1516 	return 0;
1517 
1518       {
1519 	rtx temp = find_base_term (XEXP (x, 0));
1520 
1521 	if (temp != 0 && CONSTANT_P (temp))
1522 	  temp = convert_memory_address (Pmode, temp);
1523 
1524 	return temp;
1525       }
1526 
1527     case VALUE:
1528       val = CSELIB_VAL_PTR (x);
1529       if (!val)
1530 	return 0;
1531       for (l = val->locs; l; l = l->next)
1532 	if ((x = find_base_term (l->loc)) != 0)
1533 	  return x;
1534       return 0;
1535 
1536     case LO_SUM:
1537       /* The standard form is (lo_sum reg sym) so look only at the
1538          second operand.  */
1539       return find_base_term (XEXP (x, 1));
1540 
1541     case CONST:
1542       x = XEXP (x, 0);
1543       if (GET_CODE (x) != PLUS && GET_CODE (x) != MINUS)
1544 	return 0;
1545       /* Fall through.  */
1546     case PLUS:
1547     case MINUS:
1548       {
1549 	rtx tmp1 = XEXP (x, 0);
1550 	rtx tmp2 = XEXP (x, 1);
1551 
1552 	/* This is a little bit tricky since we have to determine which of
1553 	   the two operands represents the real base address.  Otherwise this
1554 	   routine may return the index register instead of the base register.
1555 
1556 	   That may cause us to believe no aliasing was possible, when in
1557 	   fact aliasing is possible.
1558 
1559 	   We use a few simple tests to guess the base register.  Additional
1560 	   tests can certainly be added.  For example, if one of the operands
1561 	   is a shift or multiply, then it must be the index register and the
1562 	   other operand is the base register.  */
1563 
1564 	if (tmp1 == pic_offset_table_rtx && CONSTANT_P (tmp2))
1565 	  return find_base_term (tmp2);
1566 
1567 	/* If either operand is known to be a pointer, then use it
1568 	   to determine the base term.  */
1569 	if (REG_P (tmp1) && REG_POINTER (tmp1))
1570 	  {
1571 	    rtx base = find_base_term (tmp1);
1572 	    if (base)
1573 	      return base;
1574 	  }
1575 
1576 	if (REG_P (tmp2) && REG_POINTER (tmp2))
1577 	  {
1578 	    rtx base = find_base_term (tmp2);
1579 	    if (base)
1580 	      return base;
1581 	  }
1582 
1583 	/* Neither operand was known to be a pointer.  Go ahead and find the
1584 	   base term for both operands.  */
1585 	tmp1 = find_base_term (tmp1);
1586 	tmp2 = find_base_term (tmp2);
1587 
1588 	/* If either base term is named object or a special address
1589 	   (like an argument or stack reference), then use it for the
1590 	   base term.  */
1591 	if (tmp1 != 0
1592 	    && (GET_CODE (tmp1) == SYMBOL_REF
1593 		|| GET_CODE (tmp1) == LABEL_REF
1594 		|| (GET_CODE (tmp1) == ADDRESS
1595 		    && GET_MODE (tmp1) != VOIDmode)))
1596 	  return tmp1;
1597 
1598 	if (tmp2 != 0
1599 	    && (GET_CODE (tmp2) == SYMBOL_REF
1600 		|| GET_CODE (tmp2) == LABEL_REF
1601 		|| (GET_CODE (tmp2) == ADDRESS
1602 		    && GET_MODE (tmp2) != VOIDmode)))
1603 	  return tmp2;
1604 
1605 	/* We could not determine which of the two operands was the
1606 	   base register and which was the index.  So we can determine
1607 	   nothing from the base alias check.  */
1608 	return 0;
1609       }
1610 
1611     case AND:
1612       if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) != 0)
1613 	return find_base_term (XEXP (x, 0));
1614       return 0;
1615 
1616     case SYMBOL_REF:
1617     case LABEL_REF:
1618       return x;
1619 
1620     default:
1621       return 0;
1622     }
1623 }
1624 
1625 /* Return 0 if the addresses X and Y are known to point to different
1626    objects, 1 if they might be pointers to the same object.  */
1627 
1628 static int
1629 base_alias_check (rtx x, rtx y, enum machine_mode x_mode,
1630 		  enum machine_mode y_mode)
1631 {
1632   rtx x_base = find_base_term (x);
1633   rtx y_base = find_base_term (y);
1634 
1635   /* If the address itself has no known base see if a known equivalent
1636      value has one.  If either address still has no known base, nothing
1637      is known about aliasing.  */
1638   if (x_base == 0)
1639     {
1640       rtx x_c;
1641 
1642       if (! flag_expensive_optimizations || (x_c = canon_rtx (x)) == x)
1643 	return 1;
1644 
1645       x_base = find_base_term (x_c);
1646       if (x_base == 0)
1647 	return 1;
1648     }
1649 
1650   if (y_base == 0)
1651     {
1652       rtx y_c;
1653       if (! flag_expensive_optimizations || (y_c = canon_rtx (y)) == y)
1654 	return 1;
1655 
1656       y_base = find_base_term (y_c);
1657       if (y_base == 0)
1658 	return 1;
1659     }
1660 
1661   /* If the base addresses are equal nothing is known about aliasing.  */
1662   if (rtx_equal_p (x_base, y_base))
1663     return 1;
1664 
1665   /* The base addresses are different expressions.  If they are not accessed
1666      via AND, there is no conflict.  We can bring knowledge of object
1667      alignment into play here.  For example, on alpha, "char a, b;" can
1668      alias one another, though "char a; long b;" cannot.  AND addesses may
1669      implicitly alias surrounding objects; i.e. unaligned access in DImode
1670      via AND address can alias all surrounding object types except those
1671      with aligment 8 or higher.  */
1672   if (GET_CODE (x) == AND && GET_CODE (y) == AND)
1673     return 1;
1674   if (GET_CODE (x) == AND
1675       && (!CONST_INT_P (XEXP (x, 1))
1676 	  || (int) GET_MODE_UNIT_SIZE (y_mode) < -INTVAL (XEXP (x, 1))))
1677     return 1;
1678   if (GET_CODE (y) == AND
1679       && (!CONST_INT_P (XEXP (y, 1))
1680 	  || (int) GET_MODE_UNIT_SIZE (x_mode) < -INTVAL (XEXP (y, 1))))
1681     return 1;
1682 
1683   /* Differing symbols not accessed via AND never alias.  */
1684   if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS)
1685     return 0;
1686 
1687   /* If one address is a stack reference there can be no alias:
1688      stack references using different base registers do not alias,
1689      a stack reference can not alias a parameter, and a stack reference
1690      can not alias a global.  */
1691   if ((GET_CODE (x_base) == ADDRESS && GET_MODE (x_base) == Pmode)
1692       || (GET_CODE (y_base) == ADDRESS && GET_MODE (y_base) == Pmode))
1693     return 0;
1694 
1695   if (! flag_argument_noalias)
1696     return 1;
1697 
1698   if (flag_argument_noalias > 1)
1699     return 0;
1700 
1701   /* Weak noalias assertion (arguments are distinct, but may match globals).  */
1702   return ! (GET_MODE (x_base) == VOIDmode && GET_MODE (y_base) == VOIDmode);
1703 }
1704 
1705 /* Convert the address X into something we can use.  This is done by returning
1706    it unchanged unless it is a value; in the latter case we call cselib to get
1707    a more useful rtx.  */
1708 
1709 rtx
1710 get_addr (rtx x)
1711 {
1712   cselib_val *v;
1713   struct elt_loc_list *l;
1714 
1715   if (GET_CODE (x) != VALUE)
1716     return x;
1717   v = CSELIB_VAL_PTR (x);
1718   if (v)
1719     {
1720       for (l = v->locs; l; l = l->next)
1721 	if (CONSTANT_P (l->loc))
1722 	  return l->loc;
1723       for (l = v->locs; l; l = l->next)
1724 	if (!REG_P (l->loc) && !MEM_P (l->loc))
1725 	  return l->loc;
1726       if (v->locs)
1727 	return v->locs->loc;
1728     }
1729   return x;
1730 }
1731 
1732 /*  Return the address of the (N_REFS + 1)th memory reference to ADDR
1733     where SIZE is the size in bytes of the memory reference.  If ADDR
1734     is not modified by the memory reference then ADDR is returned.  */
1735 
1736 static rtx
1737 addr_side_effect_eval (rtx addr, int size, int n_refs)
1738 {
1739   int offset = 0;
1740 
1741   switch (GET_CODE (addr))
1742     {
1743     case PRE_INC:
1744       offset = (n_refs + 1) * size;
1745       break;
1746     case PRE_DEC:
1747       offset = -(n_refs + 1) * size;
1748       break;
1749     case POST_INC:
1750       offset = n_refs * size;
1751       break;
1752     case POST_DEC:
1753       offset = -n_refs * size;
1754       break;
1755 
1756     default:
1757       return addr;
1758     }
1759 
1760   if (offset)
1761     addr = gen_rtx_PLUS (GET_MODE (addr), XEXP (addr, 0),
1762 			 GEN_INT (offset));
1763   else
1764     addr = XEXP (addr, 0);
1765   addr = canon_rtx (addr);
1766 
1767   return addr;
1768 }
1769 
1770 /* Return one if X and Y (memory addresses) reference the
1771    same location in memory or if the references overlap.
1772    Return zero if they do not overlap, else return
1773    minus one in which case they still might reference the same location.
1774 
1775    C is an offset accumulator.  When
1776    C is nonzero, we are testing aliases between X and Y + C.
1777    XSIZE is the size in bytes of the X reference,
1778    similarly YSIZE is the size in bytes for Y.
1779    Expect that canon_rtx has been already called for X and Y.
1780 
1781    If XSIZE or YSIZE is zero, we do not know the amount of memory being
1782    referenced (the reference was BLKmode), so make the most pessimistic
1783    assumptions.
1784 
1785    If XSIZE or YSIZE is negative, we may access memory outside the object
1786    being referenced as a side effect.  This can happen when using AND to
1787    align memory references, as is done on the Alpha.
1788 
1789    Nice to notice that varying addresses cannot conflict with fp if no
1790    local variables had their addresses taken, but that's too hard now.
1791 
1792    ???  Contrary to the tree alias oracle this does not return
1793    one for X + non-constant and Y + non-constant when X and Y are equal.
1794    If that is fixed the TBAA hack for union type-punning can be removed.  */
1795 
1796 static int
1797 memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c)
1798 {
1799   if (GET_CODE (x) == VALUE)
1800     x = get_addr (x);
1801   if (GET_CODE (y) == VALUE)
1802     y = get_addr (y);
1803   if (GET_CODE (x) == HIGH)
1804     x = XEXP (x, 0);
1805   else if (GET_CODE (x) == LO_SUM)
1806     x = XEXP (x, 1);
1807   else
1808     x = addr_side_effect_eval (x, xsize, 0);
1809   if (GET_CODE (y) == HIGH)
1810     y = XEXP (y, 0);
1811   else if (GET_CODE (y) == LO_SUM)
1812     y = XEXP (y, 1);
1813   else
1814     y = addr_side_effect_eval (y, ysize, 0);
1815 
1816   if (rtx_equal_for_memref_p (x, y))
1817     {
1818       if (xsize <= 0 || ysize <= 0)
1819 	return 1;
1820       if (c >= 0 && xsize > c)
1821 	return 1;
1822       if (c < 0 && ysize+c > 0)
1823 	return 1;
1824       return 0;
1825     }
1826 
1827   /* This code used to check for conflicts involving stack references and
1828      globals but the base address alias code now handles these cases.  */
1829 
1830   if (GET_CODE (x) == PLUS)
1831     {
1832       /* The fact that X is canonicalized means that this
1833 	 PLUS rtx is canonicalized.  */
1834       rtx x0 = XEXP (x, 0);
1835       rtx x1 = XEXP (x, 1);
1836 
1837       if (GET_CODE (y) == PLUS)
1838 	{
1839 	  /* The fact that Y is canonicalized means that this
1840 	     PLUS rtx is canonicalized.  */
1841 	  rtx y0 = XEXP (y, 0);
1842 	  rtx y1 = XEXP (y, 1);
1843 
1844 	  if (rtx_equal_for_memref_p (x1, y1))
1845 	    return memrefs_conflict_p (xsize, x0, ysize, y0, c);
1846 	  if (rtx_equal_for_memref_p (x0, y0))
1847 	    return memrefs_conflict_p (xsize, x1, ysize, y1, c);
1848 	  if (CONST_INT_P (x1))
1849 	    {
1850 	      if (CONST_INT_P (y1))
1851 		return memrefs_conflict_p (xsize, x0, ysize, y0,
1852 					   c - INTVAL (x1) + INTVAL (y1));
1853 	      else
1854 		return memrefs_conflict_p (xsize, x0, ysize, y,
1855 					   c - INTVAL (x1));
1856 	    }
1857 	  else if (CONST_INT_P (y1))
1858 	    return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
1859 
1860 	  return -1;
1861 	}
1862       else if (CONST_INT_P (x1))
1863 	return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
1864     }
1865   else if (GET_CODE (y) == PLUS)
1866     {
1867       /* The fact that Y is canonicalized means that this
1868 	 PLUS rtx is canonicalized.  */
1869       rtx y0 = XEXP (y, 0);
1870       rtx y1 = XEXP (y, 1);
1871 
1872       if (CONST_INT_P (y1))
1873 	return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
1874       else
1875 	return -1;
1876     }
1877 
1878   if (GET_CODE (x) == GET_CODE (y))
1879     switch (GET_CODE (x))
1880       {
1881       case MULT:
1882 	{
1883 	  /* Handle cases where we expect the second operands to be the
1884 	     same, and check only whether the first operand would conflict
1885 	     or not.  */
1886 	  rtx x0, y0;
1887 	  rtx x1 = canon_rtx (XEXP (x, 1));
1888 	  rtx y1 = canon_rtx (XEXP (y, 1));
1889 	  if (! rtx_equal_for_memref_p (x1, y1))
1890 	    return -1;
1891 	  x0 = canon_rtx (XEXP (x, 0));
1892 	  y0 = canon_rtx (XEXP (y, 0));
1893 	  if (rtx_equal_for_memref_p (x0, y0))
1894 	    return (xsize == 0 || ysize == 0
1895 		    || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
1896 
1897 	  /* Can't properly adjust our sizes.  */
1898 	  if (!CONST_INT_P (x1))
1899 	    return -1;
1900 	  xsize /= INTVAL (x1);
1901 	  ysize /= INTVAL (x1);
1902 	  c /= INTVAL (x1);
1903 	  return memrefs_conflict_p (xsize, x0, ysize, y0, c);
1904 	}
1905 
1906       default:
1907 	break;
1908       }
1909 
1910   /* Treat an access through an AND (e.g. a subword access on an Alpha)
1911      as an access with indeterminate size.  Assume that references
1912      besides AND are aligned, so if the size of the other reference is
1913      at least as large as the alignment, assume no other overlap.  */
1914   if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1)))
1915     {
1916       if (GET_CODE (y) == AND || ysize < -INTVAL (XEXP (x, 1)))
1917 	xsize = -1;
1918       return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)), ysize, y, c);
1919     }
1920   if (GET_CODE (y) == AND && CONST_INT_P (XEXP (y, 1)))
1921     {
1922       /* ??? If we are indexing far enough into the array/structure, we
1923 	 may yet be able to determine that we can not overlap.  But we
1924 	 also need to that we are far enough from the end not to overlap
1925 	 a following reference, so we do nothing with that for now.  */
1926       if (GET_CODE (x) == AND || xsize < -INTVAL (XEXP (y, 1)))
1927 	ysize = -1;
1928       return memrefs_conflict_p (xsize, x, ysize, canon_rtx (XEXP (y, 0)), c);
1929     }
1930 
1931   if (CONSTANT_P (x))
1932     {
1933       if (CONST_INT_P (x) && CONST_INT_P (y))
1934 	{
1935 	  c += (INTVAL (y) - INTVAL (x));
1936 	  return (xsize <= 0 || ysize <= 0
1937 		  || (c >= 0 && xsize > c) || (c < 0 && ysize+c > 0));
1938 	}
1939 
1940       if (GET_CODE (x) == CONST)
1941 	{
1942 	  if (GET_CODE (y) == CONST)
1943 	    return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
1944 				       ysize, canon_rtx (XEXP (y, 0)), c);
1945 	  else
1946 	    return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
1947 				       ysize, y, c);
1948 	}
1949       if (GET_CODE (y) == CONST)
1950 	return memrefs_conflict_p (xsize, x, ysize,
1951 				   canon_rtx (XEXP (y, 0)), c);
1952 
1953       if (CONSTANT_P (y))
1954 	return (xsize <= 0 || ysize <= 0
1955 		|| (rtx_equal_for_memref_p (x, y)
1956 		    && ((c >= 0 && xsize > c) || (c < 0 && ysize+c > 0))));
1957 
1958       return -1;
1959     }
1960 
1961   return -1;
1962 }
1963 
1964 /* Functions to compute memory dependencies.
1965 
1966    Since we process the insns in execution order, we can build tables
1967    to keep track of what registers are fixed (and not aliased), what registers
1968    are varying in known ways, and what registers are varying in unknown
1969    ways.
1970 
1971    If both memory references are volatile, then there must always be a
1972    dependence between the two references, since their order can not be
1973    changed.  A volatile and non-volatile reference can be interchanged
1974    though.
1975 
1976    A MEM_IN_STRUCT reference at a non-AND varying address can never
1977    conflict with a non-MEM_IN_STRUCT reference at a fixed address.  We
1978    also must allow AND addresses, because they may generate accesses
1979    outside the object being referenced.  This is used to generate
1980    aligned addresses from unaligned addresses, for instance, the alpha
1981    storeqi_unaligned pattern.  */
1982 
1983 /* Read dependence: X is read after read in MEM takes place.  There can
1984    only be a dependence here if both reads are volatile.  */
1985 
1986 int
1987 read_dependence (const_rtx mem, const_rtx x)
1988 {
1989   return MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem);
1990 }
1991 
1992 /* Returns MEM1 if and only if MEM1 is a scalar at a fixed address and
1993    MEM2 is a reference to a structure at a varying address, or returns
1994    MEM2 if vice versa.  Otherwise, returns NULL_RTX.  If a non-NULL
1995    value is returned MEM1 and MEM2 can never alias.  VARIES_P is used
1996    to decide whether or not an address may vary; it should return
1997    nonzero whenever variation is possible.
1998    MEM1_ADDR and MEM2_ADDR are the addresses of MEM1 and MEM2.  */
1999 
2000 static const_rtx
2001 fixed_scalar_and_varying_struct_p (const_rtx mem1, const_rtx mem2, rtx mem1_addr,
2002 				   rtx mem2_addr,
2003 				   bool (*varies_p) (const_rtx, bool))
2004 {
2005   if (! flag_strict_aliasing)
2006     return NULL_RTX;
2007 
2008   if (MEM_ALIAS_SET (mem2)
2009       && MEM_SCALAR_P (mem1) && MEM_IN_STRUCT_P (mem2)
2010       && !varies_p (mem1_addr, 1) && varies_p (mem2_addr, 1))
2011     /* MEM1 is a scalar at a fixed address; MEM2 is a struct at a
2012        varying address.  */
2013     return mem1;
2014 
2015   if (MEM_ALIAS_SET (mem1)
2016       && MEM_IN_STRUCT_P (mem1) && MEM_SCALAR_P (mem2)
2017       && varies_p (mem1_addr, 1) && !varies_p (mem2_addr, 1))
2018     /* MEM2 is a scalar at a fixed address; MEM1 is a struct at a
2019        varying address.  */
2020     return mem2;
2021 
2022   return NULL_RTX;
2023 }
2024 
2025 /* Returns nonzero if something about the mode or address format MEM1
2026    indicates that it might well alias *anything*.  */
2027 
2028 static int
2029 aliases_everything_p (const_rtx mem)
2030 {
2031   if (GET_CODE (XEXP (mem, 0)) == AND)
2032     /* If the address is an AND, it's very hard to know at what it is
2033        actually pointing.  */
2034     return 1;
2035 
2036   return 0;
2037 }
2038 
2039 /* Return true if we can determine that the fields referenced cannot
2040    overlap for any pair of objects.  */
2041 
2042 static bool
2043 nonoverlapping_component_refs_p (const_tree x, const_tree y)
2044 {
2045   const_tree fieldx, fieldy, typex, typey, orig_y;
2046 
2047   if (!flag_strict_aliasing)
2048     return false;
2049 
2050   do
2051     {
2052       /* The comparison has to be done at a common type, since we don't
2053 	 know how the inheritance hierarchy works.  */
2054       orig_y = y;
2055       do
2056 	{
2057 	  fieldx = TREE_OPERAND (x, 1);
2058 	  typex = TYPE_MAIN_VARIANT (DECL_FIELD_CONTEXT (fieldx));
2059 
2060 	  y = orig_y;
2061 	  do
2062 	    {
2063 	      fieldy = TREE_OPERAND (y, 1);
2064 	      typey = TYPE_MAIN_VARIANT (DECL_FIELD_CONTEXT (fieldy));
2065 
2066 	      if (typex == typey)
2067 		goto found;
2068 
2069 	      y = TREE_OPERAND (y, 0);
2070 	    }
2071 	  while (y && TREE_CODE (y) == COMPONENT_REF);
2072 
2073 	  x = TREE_OPERAND (x, 0);
2074 	}
2075       while (x && TREE_CODE (x) == COMPONENT_REF);
2076       /* Never found a common type.  */
2077       return false;
2078 
2079     found:
2080       /* If we're left with accessing different fields of a structure,
2081 	 then no overlap.  */
2082       if (TREE_CODE (typex) == RECORD_TYPE
2083 	  && fieldx != fieldy)
2084 	return true;
2085 
2086       /* The comparison on the current field failed.  If we're accessing
2087 	 a very nested structure, look at the next outer level.  */
2088       x = TREE_OPERAND (x, 0);
2089       y = TREE_OPERAND (y, 0);
2090     }
2091   while (x && y
2092 	 && TREE_CODE (x) == COMPONENT_REF
2093 	 && TREE_CODE (y) == COMPONENT_REF);
2094 
2095   return false;
2096 }
2097 
2098 /* Look at the bottom of the COMPONENT_REF list for a DECL, and return it.  */
2099 
2100 static tree
2101 decl_for_component_ref (tree x)
2102 {
2103   do
2104     {
2105       x = TREE_OPERAND (x, 0);
2106     }
2107   while (x && TREE_CODE (x) == COMPONENT_REF);
2108 
2109   return x && DECL_P (x) ? x : NULL_TREE;
2110 }
2111 
2112 /* Walk up the COMPONENT_REF list and adjust OFFSET to compensate for the
2113    offset of the field reference.  */
2114 
2115 static rtx
2116 adjust_offset_for_component_ref (tree x, rtx offset)
2117 {
2118   HOST_WIDE_INT ioffset;
2119 
2120   if (! offset)
2121     return NULL_RTX;
2122 
2123   ioffset = INTVAL (offset);
2124   do
2125     {
2126       tree offset = component_ref_field_offset (x);
2127       tree field = TREE_OPERAND (x, 1);
2128 
2129       if (! host_integerp (offset, 1))
2130 	return NULL_RTX;
2131       ioffset += (tree_low_cst (offset, 1)
2132 		  + (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
2133 		     / BITS_PER_UNIT));
2134 
2135       x = TREE_OPERAND (x, 0);
2136     }
2137   while (x && TREE_CODE (x) == COMPONENT_REF);
2138 
2139   return GEN_INT (ioffset);
2140 }
2141 
2142 /* Return nonzero if we can determine the exprs corresponding to memrefs
2143    X and Y and they do not overlap.  */
2144 
2145 int
2146 nonoverlapping_memrefs_p (const_rtx x, const_rtx y)
2147 {
2148   tree exprx = MEM_EXPR (x), expry = MEM_EXPR (y);
2149   rtx rtlx, rtly;
2150   rtx basex, basey;
2151   rtx moffsetx, moffsety;
2152   HOST_WIDE_INT offsetx = 0, offsety = 0, sizex, sizey, tem;
2153 
2154   /* Unless both have exprs, we can't tell anything.  */
2155   if (exprx == 0 || expry == 0)
2156     return 0;
2157 
2158   /* For spill-slot accesses make sure we have valid offsets.  */
2159   if ((exprx == get_spill_slot_decl (false)
2160        && ! MEM_OFFSET (x))
2161       || (expry == get_spill_slot_decl (false)
2162 	  && ! MEM_OFFSET (y)))
2163     return 0;
2164 
2165   /* If both are field references, we may be able to determine something.  */
2166   if (TREE_CODE (exprx) == COMPONENT_REF
2167       && TREE_CODE (expry) == COMPONENT_REF
2168       && nonoverlapping_component_refs_p (exprx, expry))
2169     return 1;
2170 
2171 
2172   /* If the field reference test failed, look at the DECLs involved.  */
2173   moffsetx = MEM_OFFSET (x);
2174   if (TREE_CODE (exprx) == COMPONENT_REF)
2175     {
2176       if (TREE_CODE (expry) == VAR_DECL
2177 	  && POINTER_TYPE_P (TREE_TYPE (expry)))
2178 	{
2179 	 tree field = TREE_OPERAND (exprx, 1);
2180 	 tree fieldcontext = DECL_FIELD_CONTEXT (field);
2181 	 if (ipa_type_escape_field_does_not_clobber_p (fieldcontext,
2182 						       TREE_TYPE (field)))
2183 	   return 1;
2184 	}
2185       {
2186 	tree t = decl_for_component_ref (exprx);
2187 	if (! t)
2188 	  return 0;
2189 	moffsetx = adjust_offset_for_component_ref (exprx, moffsetx);
2190 	exprx = t;
2191       }
2192     }
2193   else if (INDIRECT_REF_P (exprx))
2194     {
2195       exprx = TREE_OPERAND (exprx, 0);
2196       if (flag_argument_noalias < 2
2197 	  || TREE_CODE (exprx) != PARM_DECL)
2198 	return 0;
2199     }
2200 
2201   moffsety = MEM_OFFSET (y);
2202   if (TREE_CODE (expry) == COMPONENT_REF)
2203     {
2204       if (TREE_CODE (exprx) == VAR_DECL
2205 	  && POINTER_TYPE_P (TREE_TYPE (exprx)))
2206 	{
2207 	 tree field = TREE_OPERAND (expry, 1);
2208 	 tree fieldcontext = DECL_FIELD_CONTEXT (field);
2209 	 if (ipa_type_escape_field_does_not_clobber_p (fieldcontext,
2210 						       TREE_TYPE (field)))
2211 	   return 1;
2212 	}
2213       {
2214 	tree t = decl_for_component_ref (expry);
2215 	if (! t)
2216 	  return 0;
2217 	moffsety = adjust_offset_for_component_ref (expry, moffsety);
2218 	expry = t;
2219       }
2220     }
2221   else if (INDIRECT_REF_P (expry))
2222     {
2223       expry = TREE_OPERAND (expry, 0);
2224       if (flag_argument_noalias < 2
2225 	  || TREE_CODE (expry) != PARM_DECL)
2226 	return 0;
2227     }
2228 
2229   if (! DECL_P (exprx) || ! DECL_P (expry))
2230     return 0;
2231 
2232   /* With invalid code we can end up storing into the constant pool.
2233      Bail out to avoid ICEing when creating RTL for this.
2234      See gfortran.dg/lto/20091028-2_0.f90.  */
2235   if (TREE_CODE (exprx) == CONST_DECL
2236       || TREE_CODE (expry) == CONST_DECL)
2237     return 1;
2238 
2239   rtlx = DECL_RTL (exprx);
2240   rtly = DECL_RTL (expry);
2241 
2242   /* If either RTL is not a MEM, it must be a REG or CONCAT, meaning they
2243      can't overlap unless they are the same because we never reuse that part
2244      of the stack frame used for locals for spilled pseudos.  */
2245   if ((!MEM_P (rtlx) || !MEM_P (rtly))
2246       && ! rtx_equal_p (rtlx, rtly))
2247     return 1;
2248 
2249   /* If we have MEMs refering to different address spaces (which can
2250      potentially overlap), we cannot easily tell from the addresses
2251      whether the references overlap.  */
2252   if (MEM_P (rtlx) && MEM_P (rtly)
2253       && MEM_ADDR_SPACE (rtlx) != MEM_ADDR_SPACE (rtly))
2254     return 0;
2255 
2256   /* Get the base and offsets of both decls.  If either is a register, we
2257      know both are and are the same, so use that as the base.  The only
2258      we can avoid overlap is if we can deduce that they are nonoverlapping
2259      pieces of that decl, which is very rare.  */
2260   basex = MEM_P (rtlx) ? XEXP (rtlx, 0) : rtlx;
2261   if (GET_CODE (basex) == PLUS && CONST_INT_P (XEXP (basex, 1)))
2262     offsetx = INTVAL (XEXP (basex, 1)), basex = XEXP (basex, 0);
2263 
2264   basey = MEM_P (rtly) ? XEXP (rtly, 0) : rtly;
2265   if (GET_CODE (basey) == PLUS && CONST_INT_P (XEXP (basey, 1)))
2266     offsety = INTVAL (XEXP (basey, 1)), basey = XEXP (basey, 0);
2267 
2268   /* If the bases are different, we know they do not overlap if both
2269      are constants or if one is a constant and the other a pointer into the
2270      stack frame.  Otherwise a different base means we can't tell if they
2271      overlap or not.  */
2272   if (! rtx_equal_p (basex, basey))
2273     return ((CONSTANT_P (basex) && CONSTANT_P (basey))
2274 	    || (CONSTANT_P (basex) && REG_P (basey)
2275 		&& REGNO_PTR_FRAME_P (REGNO (basey)))
2276 	    || (CONSTANT_P (basey) && REG_P (basex)
2277 		&& REGNO_PTR_FRAME_P (REGNO (basex))));
2278 
2279   sizex = (!MEM_P (rtlx) ? (int) GET_MODE_SIZE (GET_MODE (rtlx))
2280 	   : MEM_SIZE (rtlx) ? INTVAL (MEM_SIZE (rtlx))
2281 	   : -1);
2282   sizey = (!MEM_P (rtly) ? (int) GET_MODE_SIZE (GET_MODE (rtly))
2283 	   : MEM_SIZE (rtly) ? INTVAL (MEM_SIZE (rtly)) :
2284 	   -1);
2285 
2286   /* If we have an offset for either memref, it can update the values computed
2287      above.  */
2288   if (moffsetx)
2289     offsetx += INTVAL (moffsetx), sizex -= INTVAL (moffsetx);
2290   if (moffsety)
2291     offsety += INTVAL (moffsety), sizey -= INTVAL (moffsety);
2292 
2293   /* If a memref has both a size and an offset, we can use the smaller size.
2294      We can't do this if the offset isn't known because we must view this
2295      memref as being anywhere inside the DECL's MEM.  */
2296   if (MEM_SIZE (x) && moffsetx)
2297     sizex = INTVAL (MEM_SIZE (x));
2298   if (MEM_SIZE (y) && moffsety)
2299     sizey = INTVAL (MEM_SIZE (y));
2300 
2301   /* Put the values of the memref with the lower offset in X's values.  */
2302   if (offsetx > offsety)
2303     {
2304       tem = offsetx, offsetx = offsety, offsety = tem;
2305       tem = sizex, sizex = sizey, sizey = tem;
2306     }
2307 
2308   /* If we don't know the size of the lower-offset value, we can't tell
2309      if they conflict.  Otherwise, we do the test.  */
2310   return sizex >= 0 && offsety >= offsetx + sizex;
2311 }
2312 
2313 /* True dependence: X is read after store in MEM takes place.  */
2314 
2315 int
2316 true_dependence (const_rtx mem, enum machine_mode mem_mode, const_rtx x,
2317 		 bool (*varies) (const_rtx, bool))
2318 {
2319   rtx x_addr, mem_addr;
2320   rtx base;
2321   int ret;
2322 
2323   if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2324     return 1;
2325 
2326   /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2327      This is used in epilogue deallocation functions, and in cselib.  */
2328   if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2329     return 1;
2330   if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2331     return 1;
2332   if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2333       || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2334     return 1;
2335 
2336   /* Read-only memory is by definition never modified, and therefore can't
2337      conflict with anything.  We don't expect to find read-only set on MEM,
2338      but stupid user tricks can produce them, so don't die.  */
2339   if (MEM_READONLY_P (x))
2340     return 0;
2341 
2342   /* If we have MEMs refering to different address spaces (which can
2343      potentially overlap), we cannot easily tell from the addresses
2344      whether the references overlap.  */
2345   if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2346     return 1;
2347 
2348   if (mem_mode == VOIDmode)
2349     mem_mode = GET_MODE (mem);
2350 
2351   x_addr = XEXP (x, 0);
2352   mem_addr = XEXP (mem, 0);
2353   if (!((GET_CODE (x_addr) == VALUE
2354 	 && GET_CODE (mem_addr) != VALUE
2355 	 && reg_mentioned_p (x_addr, mem_addr))
2356 	|| (GET_CODE (x_addr) != VALUE
2357 	    && GET_CODE (mem_addr) == VALUE
2358 	    && reg_mentioned_p (mem_addr, x_addr))))
2359     {
2360       x_addr = get_addr (x_addr);
2361       mem_addr = get_addr (mem_addr);
2362     }
2363 
2364   base = find_base_term (x_addr);
2365   if (base && (GET_CODE (base) == LABEL_REF
2366 	       || (GET_CODE (base) == SYMBOL_REF
2367 		   && CONSTANT_POOL_ADDRESS_P (base))))
2368     return 0;
2369 
2370   if (! base_alias_check (x_addr, mem_addr, GET_MODE (x), mem_mode))
2371     return 0;
2372 
2373   x_addr = canon_rtx (x_addr);
2374   mem_addr = canon_rtx (mem_addr);
2375 
2376   if ((ret = memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
2377 				 SIZE_FOR_MODE (x), x_addr, 0)) != -1)
2378     return ret;
2379 
2380   if (DIFFERENT_ALIAS_SETS_P (x, mem))
2381     return 0;
2382 
2383   if (nonoverlapping_memrefs_p (mem, x))
2384     return 0;
2385 
2386   if (aliases_everything_p (x))
2387     return 1;
2388 
2389   /* We cannot use aliases_everything_p to test MEM, since we must look
2390      at MEM_MODE, rather than GET_MODE (MEM).  */
2391   if (mem_mode == QImode || GET_CODE (mem_addr) == AND)
2392     return 1;
2393 
2394   /* In true_dependence we also allow BLKmode to alias anything.  Why
2395      don't we do this in anti_dependence and output_dependence?  */
2396   if (mem_mode == BLKmode || GET_MODE (x) == BLKmode)
2397     return 1;
2398 
2399   if (fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr, varies))
2400     return 0;
2401 
2402   return rtx_refs_may_alias_p (x, mem, true);
2403 }
2404 
2405 /* Canonical true dependence: X is read after store in MEM takes place.
2406    Variant of true_dependence which assumes MEM has already been
2407    canonicalized (hence we no longer do that here).
2408    The mem_addr argument has been added, since true_dependence computed
2409    this value prior to canonicalizing.
2410    If x_addr is non-NULL, it is used in preference of XEXP (x, 0).  */
2411 
2412 int
2413 canon_true_dependence (const_rtx mem, enum machine_mode mem_mode, rtx mem_addr,
2414 		       const_rtx x, rtx x_addr, bool (*varies) (const_rtx, bool))
2415 {
2416   int ret;
2417 
2418   if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2419     return 1;
2420 
2421   /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2422      This is used in epilogue deallocation functions.  */
2423   if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2424     return 1;
2425   if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2426     return 1;
2427   if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2428       || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2429     return 1;
2430 
2431   /* Read-only memory is by definition never modified, and therefore can't
2432      conflict with anything.  We don't expect to find read-only set on MEM,
2433      but stupid user tricks can produce them, so don't die.  */
2434   if (MEM_READONLY_P (x))
2435     return 0;
2436 
2437   /* If we have MEMs refering to different address spaces (which can
2438      potentially overlap), we cannot easily tell from the addresses
2439      whether the references overlap.  */
2440   if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2441     return 1;
2442 
2443   if (! x_addr)
2444     {
2445       x_addr = XEXP (x, 0);
2446       if (!((GET_CODE (x_addr) == VALUE
2447 	     && GET_CODE (mem_addr) != VALUE
2448 	     && reg_mentioned_p (x_addr, mem_addr))
2449 	    || (GET_CODE (x_addr) != VALUE
2450 		&& GET_CODE (mem_addr) == VALUE
2451 		&& reg_mentioned_p (mem_addr, x_addr))))
2452 	x_addr = get_addr (x_addr);
2453     }
2454 
2455   if (! base_alias_check (x_addr, mem_addr, GET_MODE (x), mem_mode))
2456     return 0;
2457 
2458   x_addr = canon_rtx (x_addr);
2459   if ((ret = memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
2460 				 SIZE_FOR_MODE (x), x_addr, 0)) != -1)
2461     return ret;
2462 
2463   if (DIFFERENT_ALIAS_SETS_P (x, mem))
2464     return 0;
2465 
2466   if (nonoverlapping_memrefs_p (x, mem))
2467     return 0;
2468 
2469   if (aliases_everything_p (x))
2470     return 1;
2471 
2472   /* We cannot use aliases_everything_p to test MEM, since we must look
2473      at MEM_MODE, rather than GET_MODE (MEM).  */
2474   if (mem_mode == QImode || GET_CODE (mem_addr) == AND)
2475     return 1;
2476 
2477   /* In true_dependence we also allow BLKmode to alias anything.  Why
2478      don't we do this in anti_dependence and output_dependence?  */
2479   if (mem_mode == BLKmode || GET_MODE (x) == BLKmode)
2480     return 1;
2481 
2482   if (fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr, varies))
2483     return 0;
2484 
2485   return rtx_refs_may_alias_p (x, mem, true);
2486 }
2487 
2488 /* Returns nonzero if a write to X might alias a previous read from
2489    (or, if WRITEP is nonzero, a write to) MEM.  */
2490 
2491 static int
2492 write_dependence_p (const_rtx mem, const_rtx x, int writep)
2493 {
2494   rtx x_addr, mem_addr;
2495   const_rtx fixed_scalar;
2496   rtx base;
2497   int ret;
2498 
2499   if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2500     return 1;
2501 
2502   /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2503      This is used in epilogue deallocation functions.  */
2504   if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2505     return 1;
2506   if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2507     return 1;
2508   if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2509       || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2510     return 1;
2511 
2512   /* A read from read-only memory can't conflict with read-write memory.  */
2513   if (!writep && MEM_READONLY_P (mem))
2514     return 0;
2515 
2516   /* If we have MEMs refering to different address spaces (which can
2517      potentially overlap), we cannot easily tell from the addresses
2518      whether the references overlap.  */
2519   if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2520     return 1;
2521 
2522   x_addr = XEXP (x, 0);
2523   mem_addr = XEXP (mem, 0);
2524   if (!((GET_CODE (x_addr) == VALUE
2525 	 && GET_CODE (mem_addr) != VALUE
2526 	 && reg_mentioned_p (x_addr, mem_addr))
2527 	|| (GET_CODE (x_addr) != VALUE
2528 	    && GET_CODE (mem_addr) == VALUE
2529 	    && reg_mentioned_p (mem_addr, x_addr))))
2530     {
2531       x_addr = get_addr (x_addr);
2532       mem_addr = get_addr (mem_addr);
2533     }
2534 
2535   if (! writep)
2536     {
2537       base = find_base_term (mem_addr);
2538       if (base && (GET_CODE (base) == LABEL_REF
2539 		   || (GET_CODE (base) == SYMBOL_REF
2540 		       && CONSTANT_POOL_ADDRESS_P (base))))
2541 	return 0;
2542     }
2543 
2544   if (! base_alias_check (x_addr, mem_addr, GET_MODE (x),
2545 			  GET_MODE (mem)))
2546     return 0;
2547 
2548   x_addr = canon_rtx (x_addr);
2549   mem_addr = canon_rtx (mem_addr);
2550 
2551   if ((ret = memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr,
2552 				 SIZE_FOR_MODE (x), x_addr, 0)) != -1)
2553     return ret;
2554 
2555   if (nonoverlapping_memrefs_p (x, mem))
2556     return 0;
2557 
2558   fixed_scalar
2559     = fixed_scalar_and_varying_struct_p (mem, x, mem_addr, x_addr,
2560 					 rtx_addr_varies_p);
2561 
2562   if ((fixed_scalar == mem && !aliases_everything_p (x))
2563       || (fixed_scalar == x && !aliases_everything_p (mem)))
2564     return 0;
2565 
2566   return rtx_refs_may_alias_p (x, mem, false);
2567 }
2568 
2569 /* Anti dependence: X is written after read in MEM takes place.  */
2570 
2571 int
2572 anti_dependence (const_rtx mem, const_rtx x)
2573 {
2574   return write_dependence_p (mem, x, /*writep=*/0);
2575 }
2576 
2577 /* Output dependence: X is written after store in MEM takes place.  */
2578 
2579 int
2580 output_dependence (const_rtx mem, const_rtx x)
2581 {
2582   return write_dependence_p (mem, x, /*writep=*/1);
2583 }
2584 
2585 
2586 void
2587 init_alias_target (void)
2588 {
2589   int i;
2590 
2591   memset (static_reg_base_value, 0, sizeof static_reg_base_value);
2592 
2593   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2594     /* Check whether this register can hold an incoming pointer
2595        argument.  FUNCTION_ARG_REGNO_P tests outgoing register
2596        numbers, so translate if necessary due to register windows.  */
2597     if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (i))
2598 	&& HARD_REGNO_MODE_OK (i, Pmode))
2599       static_reg_base_value[i]
2600 	= gen_rtx_ADDRESS (VOIDmode, gen_rtx_REG (Pmode, i));
2601 
2602   static_reg_base_value[STACK_POINTER_REGNUM]
2603     = gen_rtx_ADDRESS (Pmode, stack_pointer_rtx);
2604   static_reg_base_value[ARG_POINTER_REGNUM]
2605     = gen_rtx_ADDRESS (Pmode, arg_pointer_rtx);
2606   static_reg_base_value[FRAME_POINTER_REGNUM]
2607     = gen_rtx_ADDRESS (Pmode, frame_pointer_rtx);
2608 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2609   static_reg_base_value[HARD_FRAME_POINTER_REGNUM]
2610     = gen_rtx_ADDRESS (Pmode, hard_frame_pointer_rtx);
2611 #endif
2612 }
2613 
2614 /* Set MEMORY_MODIFIED when X modifies DATA (that is assumed
2615    to be memory reference.  */
2616 static bool memory_modified;
2617 static void
2618 memory_modified_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
2619 {
2620   if (MEM_P (x))
2621     {
2622       if (anti_dependence (x, (const_rtx)data) || output_dependence (x, (const_rtx)data))
2623 	memory_modified = true;
2624     }
2625 }
2626 
2627 
2628 /* Return true when INSN possibly modify memory contents of MEM
2629    (i.e. address can be modified).  */
2630 bool
2631 memory_modified_in_insn_p (const_rtx mem, const_rtx insn)
2632 {
2633   if (!INSN_P (insn))
2634     return false;
2635   memory_modified = false;
2636   note_stores (PATTERN (insn), memory_modified_1, CONST_CAST_RTX(mem));
2637   return memory_modified;
2638 }
2639 
2640 /* Initialize the aliasing machinery.  Initialize the REG_KNOWN_VALUE
2641    array.  */
2642 
2643 void
2644 init_alias_analysis (void)
2645 {
2646   unsigned int maxreg = max_reg_num ();
2647   int changed, pass;
2648   int i;
2649   unsigned int ui;
2650   rtx insn;
2651 
2652   timevar_push (TV_ALIAS_ANALYSIS);
2653 
2654   reg_known_value_size = maxreg - FIRST_PSEUDO_REGISTER;
2655   reg_known_value = GGC_CNEWVEC (rtx, reg_known_value_size);
2656   reg_known_equiv_p = XCNEWVEC (bool, reg_known_value_size);
2657 
2658   /* If we have memory allocated from the previous run, use it.  */
2659   if (old_reg_base_value)
2660     reg_base_value = old_reg_base_value;
2661 
2662   if (reg_base_value)
2663     VEC_truncate (rtx, reg_base_value, 0);
2664 
2665   VEC_safe_grow_cleared (rtx, gc, reg_base_value, maxreg);
2666 
2667   new_reg_base_value = XNEWVEC (rtx, maxreg);
2668   reg_seen = XNEWVEC (char, maxreg);
2669 
2670   /* The basic idea is that each pass through this loop will use the
2671      "constant" information from the previous pass to propagate alias
2672      information through another level of assignments.
2673 
2674      This could get expensive if the assignment chains are long.  Maybe
2675      we should throttle the number of iterations, possibly based on
2676      the optimization level or flag_expensive_optimizations.
2677 
2678      We could propagate more information in the first pass by making use
2679      of DF_REG_DEF_COUNT to determine immediately that the alias information
2680      for a pseudo is "constant".
2681 
2682      A program with an uninitialized variable can cause an infinite loop
2683      here.  Instead of doing a full dataflow analysis to detect such problems
2684      we just cap the number of iterations for the loop.
2685 
2686      The state of the arrays for the set chain in question does not matter
2687      since the program has undefined behavior.  */
2688 
2689   pass = 0;
2690   do
2691     {
2692       /* Assume nothing will change this iteration of the loop.  */
2693       changed = 0;
2694 
2695       /* We want to assign the same IDs each iteration of this loop, so
2696 	 start counting from zero each iteration of the loop.  */
2697       unique_id = 0;
2698 
2699       /* We're at the start of the function each iteration through the
2700 	 loop, so we're copying arguments.  */
2701       copying_arguments = true;
2702 
2703       /* Wipe the potential alias information clean for this pass.  */
2704       memset (new_reg_base_value, 0, maxreg * sizeof (rtx));
2705 
2706       /* Wipe the reg_seen array clean.  */
2707       memset (reg_seen, 0, maxreg);
2708 
2709       /* Mark all hard registers which may contain an address.
2710 	 The stack, frame and argument pointers may contain an address.
2711 	 An argument register which can hold a Pmode value may contain
2712 	 an address even if it is not in BASE_REGS.
2713 
2714 	 The address expression is VOIDmode for an argument and
2715 	 Pmode for other registers.  */
2716 
2717       memcpy (new_reg_base_value, static_reg_base_value,
2718 	      FIRST_PSEUDO_REGISTER * sizeof (rtx));
2719 
2720       /* Walk the insns adding values to the new_reg_base_value array.  */
2721       for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2722 	{
2723 	  if (INSN_P (insn))
2724 	    {
2725 	      rtx note, set;
2726 
2727 #if defined (HAVE_prologue) || defined (HAVE_epilogue)
2728 	      /* The prologue/epilogue insns are not threaded onto the
2729 		 insn chain until after reload has completed.  Thus,
2730 		 there is no sense wasting time checking if INSN is in
2731 		 the prologue/epilogue until after reload has completed.  */
2732 	      if (reload_completed
2733 		  && prologue_epilogue_contains (insn))
2734 		continue;
2735 #endif
2736 
2737 	      /* If this insn has a noalias note, process it,  Otherwise,
2738 		 scan for sets.  A simple set will have no side effects
2739 		 which could change the base value of any other register.  */
2740 
2741 	      if (GET_CODE (PATTERN (insn)) == SET
2742 		  && REG_NOTES (insn) != 0
2743 		  && find_reg_note (insn, REG_NOALIAS, NULL_RTX))
2744 		record_set (SET_DEST (PATTERN (insn)), NULL_RTX, NULL);
2745 	      else
2746 		note_stores (PATTERN (insn), record_set, NULL);
2747 
2748 	      set = single_set (insn);
2749 
2750 	      if (set != 0
2751 		  && REG_P (SET_DEST (set))
2752 		  && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
2753 		{
2754 		  unsigned int regno = REGNO (SET_DEST (set));
2755 		  rtx src = SET_SRC (set);
2756 		  rtx t;
2757 
2758 		  note = find_reg_equal_equiv_note (insn);
2759 		  if (note && REG_NOTE_KIND (note) == REG_EQUAL
2760 		      && DF_REG_DEF_COUNT (regno) != 1)
2761 		    note = NULL_RTX;
2762 
2763 		  if (note != NULL_RTX
2764 		      && GET_CODE (XEXP (note, 0)) != EXPR_LIST
2765 		      && ! rtx_varies_p (XEXP (note, 0), 1)
2766 		      && ! reg_overlap_mentioned_p (SET_DEST (set),
2767 						    XEXP (note, 0)))
2768 		    {
2769 		      set_reg_known_value (regno, XEXP (note, 0));
2770 		      set_reg_known_equiv_p (regno,
2771 			REG_NOTE_KIND (note) == REG_EQUIV);
2772 		    }
2773 		  else if (DF_REG_DEF_COUNT (regno) == 1
2774 			   && GET_CODE (src) == PLUS
2775 			   && REG_P (XEXP (src, 0))
2776 			   && (t = get_reg_known_value (REGNO (XEXP (src, 0))))
2777 			   && CONST_INT_P (XEXP (src, 1)))
2778 		    {
2779 		      t = plus_constant (t, INTVAL (XEXP (src, 1)));
2780 		      set_reg_known_value (regno, t);
2781 		      set_reg_known_equiv_p (regno, 0);
2782 		    }
2783 		  else if (DF_REG_DEF_COUNT (regno) == 1
2784 			   && ! rtx_varies_p (src, 1))
2785 		    {
2786 		      set_reg_known_value (regno, src);
2787 		      set_reg_known_equiv_p (regno, 0);
2788 		    }
2789 		}
2790 	    }
2791 	  else if (NOTE_P (insn)
2792 		   && NOTE_KIND (insn) == NOTE_INSN_FUNCTION_BEG)
2793 	    copying_arguments = false;
2794 	}
2795 
2796       /* Now propagate values from new_reg_base_value to reg_base_value.  */
2797       gcc_assert (maxreg == (unsigned int) max_reg_num ());
2798 
2799       for (ui = 0; ui < maxreg; ui++)
2800 	{
2801 	  if (new_reg_base_value[ui]
2802 	      && new_reg_base_value[ui] != VEC_index (rtx, reg_base_value, ui)
2803 	      && ! rtx_equal_p (new_reg_base_value[ui],
2804 				VEC_index (rtx, reg_base_value, ui)))
2805 	    {
2806 	      VEC_replace (rtx, reg_base_value, ui, new_reg_base_value[ui]);
2807 	      changed = 1;
2808 	    }
2809 	}
2810     }
2811   while (changed && ++pass < MAX_ALIAS_LOOP_PASSES);
2812 
2813   /* Fill in the remaining entries.  */
2814   for (i = 0; i < (int)reg_known_value_size; i++)
2815     if (reg_known_value[i] == 0)
2816       reg_known_value[i] = regno_reg_rtx[i + FIRST_PSEUDO_REGISTER];
2817 
2818   /* Clean up.  */
2819   free (new_reg_base_value);
2820   new_reg_base_value = 0;
2821   free (reg_seen);
2822   reg_seen = 0;
2823   timevar_pop (TV_ALIAS_ANALYSIS);
2824 }
2825 
2826 void
2827 end_alias_analysis (void)
2828 {
2829   old_reg_base_value = reg_base_value;
2830   ggc_free (reg_known_value);
2831   reg_known_value = 0;
2832   reg_known_value_size = 0;
2833   free (reg_known_equiv_p);
2834   reg_known_equiv_p = 0;
2835 }
2836 
2837 #include "gt-alias.h"
2838