xref: /netbsd-src/external/gpl3/gcc/dist/gcc/ipa-modref-tree.cc (revision b1e838363e3c6fc78a55519254d99869742dd33c)
1 /* Data structure for the modref pass.
2    Copyright (C) 2020-2022 Free Software Foundation, Inc.
3    Contributed by David Cepelik and Jan Hubicka
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "ipa-modref-tree.h"
27 #include "selftest.h"
28 #include "tree-ssa-alias.h"
29 #include "gimple.h"
30 #include "cgraph.h"
31 #include "tree-streamer.h"
32 
33 /* Return true if both accesses are the same.  */
34 bool
operator ==(modref_access_node & a) const35 modref_access_node::operator == (modref_access_node &a) const
36 {
37   if (parm_index != a.parm_index)
38     return false;
39   if (parm_index != MODREF_UNKNOWN_PARM
40       && parm_index != MODREF_GLOBAL_MEMORY_PARM)
41     {
42       if (parm_offset_known != a.parm_offset_known)
43 	return false;
44       if (parm_offset_known
45 	  && !known_eq (parm_offset, a.parm_offset))
46 	return false;
47     }
48   if (range_info_useful_p () != a.range_info_useful_p ())
49     return false;
50   if (range_info_useful_p ()
51       && (!known_eq (a.offset, offset)
52 	  || !known_eq (a.size, size)
53 	  || !known_eq (a.max_size, max_size)))
54     return false;
55   return true;
56 }
57 
58 /* Return true A is a subaccess.  */
59 bool
contains(const modref_access_node & a) const60 modref_access_node::contains (const modref_access_node &a) const
61 {
62   poly_int64 aoffset_adj = 0;
63   if (parm_index != MODREF_UNKNOWN_PARM)
64     {
65       if (parm_index != a.parm_index)
66 	return false;
67       if (parm_offset_known)
68 	{
69 	   if (!a.parm_offset_known)
70 	     return false;
71 	   /* Accesses are never below parm_offset, so look
72 	      for smaller offset.
73 	      If access ranges are known still allow merging
74 	      when bit offsets comparison passes.  */
75 	   if (!known_le (parm_offset, a.parm_offset)
76 	       && !range_info_useful_p ())
77 	     return false;
78 	   /* We allow negative aoffset_adj here in case
79 	      there is an useful range.  This is because adding
80 	      a.offset may result in non-negative offset again.
81 	      Ubsan fails on val << LOG_BITS_PER_UNIT where val
82 	      is negative.  */
83 	   aoffset_adj = (a.parm_offset - parm_offset)
84 			 * BITS_PER_UNIT;
85 	}
86     }
87   if (range_info_useful_p ())
88     {
89       if (!a.range_info_useful_p ())
90 	return false;
91       /* Sizes of stores are used to check that object is big enough
92 	 to fit the store, so smaller or unknown store is more general
93 	 than large store.  */
94       if (known_size_p (size)
95 	  && (!known_size_p (a.size)
96 	      || !known_le (size, a.size)))
97 	return false;
98       if (known_size_p (max_size))
99 	return known_subrange_p (a.offset + aoffset_adj,
100 				 a.max_size, offset, max_size);
101       else
102 	return known_le (offset, a.offset + aoffset_adj);
103     }
104   return true;
105 }
106 
107 /* Update access range to new parameters.
108    If RECORD_ADJUSTMENTS is true, record number of changes in the access
109    and if threshold is exceeded start dropping precision
110    so only constantly many updates are possible.  This makes dataflow
111    to converge.  */
112 void
update(poly_int64 parm_offset1,poly_int64 offset1,poly_int64 size1,poly_int64 max_size1,bool record_adjustments)113 modref_access_node::update (poly_int64 parm_offset1,
114 			    poly_int64 offset1, poly_int64 size1,
115 			    poly_int64 max_size1, bool record_adjustments)
116 {
117   if (known_eq (parm_offset, parm_offset1)
118       && known_eq (offset, offset1)
119       && known_eq (size, size1)
120       && known_eq (max_size, max_size1))
121     return;
122   if (!record_adjustments
123       || (++adjustments) < param_modref_max_adjustments)
124     {
125       parm_offset = parm_offset1;
126       offset = offset1;
127       size = size1;
128       max_size = max_size1;
129     }
130   else
131     {
132       if (dump_file)
133 	fprintf (dump_file, "--param modref-max-adjustments limit reached:");
134       if (!known_eq (parm_offset, parm_offset1))
135 	{
136 	  if (dump_file)
137 	    fprintf (dump_file, " parm_offset cleared");
138 	  parm_offset_known = false;
139 	}
140       if (!known_eq (size, size1))
141 	{
142 	  size = -1;
143 	  if (dump_file)
144 	    fprintf (dump_file, " size cleared");
145 	}
146       if (!known_eq (max_size, max_size1))
147 	{
148 	  max_size = -1;
149 	  if (dump_file)
150 	    fprintf (dump_file, " max_size cleared");
151 	}
152       if (!known_eq (offset, offset1))
153 	{
154 	  offset = 0;
155 	  if (dump_file)
156 	    fprintf (dump_file, " offset cleared");
157 	}
158       if (dump_file)
159 	fprintf (dump_file, "\n");
160     }
161 }
162 
163 /* Merge in access A if it is possible to do without losing
164    precision.  Return true if successful.
165    If RECORD_ADJUSTMENTs is true, remember how many interval
166    was prolonged and punt when there are too many.  */
167 bool
merge(const modref_access_node & a,bool record_adjustments)168 modref_access_node::merge (const modref_access_node &a,
169 			   bool record_adjustments)
170 {
171   poly_int64 offset1 = 0;
172   poly_int64 aoffset1 = 0;
173   poly_int64 new_parm_offset = 0;
174 
175   /* We assume that containment was tested earlier.  */
176   gcc_checking_assert (!contains (a) && !a.contains (*this));
177   if (parm_index != MODREF_UNKNOWN_PARM)
178     {
179       if (parm_index != a.parm_index)
180 	return false;
181       if (parm_offset_known)
182 	{
183 	  if (!a.parm_offset_known)
184 	    return false;
185 	  if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
186 	    return false;
187 	}
188     }
189   /* See if we can merge ranges.  */
190   if (range_info_useful_p ())
191     {
192       /* In this case we have containment that should be
193 	 handled earlier.  */
194       gcc_checking_assert (a.range_info_useful_p ());
195 
196       /* If a.size is less specified than size, merge only
197 	 if intervals are otherwise equivalent.  */
198       if (known_size_p (size)
199 	  && (!known_size_p (a.size) || known_lt (a.size, size)))
200 	{
201 	  if (((known_size_p (max_size) || known_size_p (a.max_size))
202 	       && !known_eq (max_size, a.max_size))
203 	       || !known_eq (offset1, aoffset1))
204 	    return false;
205 	  update (new_parm_offset, offset1, a.size, max_size,
206 		  record_adjustments);
207 	  return true;
208 	}
209       /* If sizes are same, we can extend the interval.  */
210       if ((known_size_p (size) || known_size_p (a.size))
211 	  && !known_eq (size, a.size))
212 	return false;
213       if (known_le (offset1, aoffset1))
214 	{
215 	  if (!known_size_p (max_size)
216 	      || known_ge (offset1 + max_size, aoffset1))
217 	    {
218 	      update2 (new_parm_offset, offset1, size, max_size,
219 		       aoffset1, a.size, a.max_size,
220 		       record_adjustments);
221 	      return true;
222 	    }
223 	}
224       else if (known_le (aoffset1, offset1))
225 	{
226 	  if (!known_size_p (a.max_size)
227 	      || known_ge (aoffset1 + a.max_size, offset1))
228 	    {
229 	      update2 (new_parm_offset, offset1, size, max_size,
230 		       aoffset1, a.size, a.max_size,
231 		       record_adjustments);
232 	      return true;
233 	    }
234 	}
235       return false;
236     }
237   update (new_parm_offset, offset1,
238 	  size, max_size, record_adjustments);
239   return true;
240 }
241 
242 /* Return true if A1 and B1 can be merged with lower information
243    less than A2 and B2.
244    Assume that no containment or lossless merging is possible.  */
245 bool
closer_pair_p(const modref_access_node & a1,const modref_access_node & b1,const modref_access_node & a2,const modref_access_node & b2)246 modref_access_node::closer_pair_p (const modref_access_node &a1,
247 				   const modref_access_node &b1,
248 				   const modref_access_node &a2,
249 				   const modref_access_node &b2)
250 {
251   /* Merging different parm indexes comes to complete loss
252      of range info.  */
253   if (a1.parm_index != b1.parm_index)
254     return false;
255   if (a2.parm_index != b2.parm_index)
256     return true;
257   /* If parm is known and parm indexes are the same we should
258      already have containment.  */
259   gcc_checking_assert (a1.parm_offset_known && b1.parm_offset_known);
260   gcc_checking_assert (a2.parm_offset_known && b2.parm_offset_known);
261 
262   /* First normalize offsets for parm offsets.  */
263   poly_int64 new_parm_offset, offseta1, offsetb1, offseta2, offsetb2;
264   if (!a1.combined_offsets (b1, &new_parm_offset, &offseta1, &offsetb1)
265       || !a2.combined_offsets (b2, &new_parm_offset, &offseta2, &offsetb2))
266     gcc_unreachable ();
267 
268 
269   /* Now compute distance of the intervals.  */
270   poly_offset_int dist1, dist2;
271   if (known_le (offseta1, offsetb1))
272     {
273       if (!known_size_p (a1.max_size))
274 	dist1 = 0;
275       else
276 	dist1 = (poly_offset_int)offsetb1
277 		- (poly_offset_int)offseta1
278 		- (poly_offset_int)a1.max_size;
279     }
280   else
281     {
282       if (!known_size_p (b1.max_size))
283 	dist1 = 0;
284       else
285 	dist1 = (poly_offset_int)offseta1
286 		 - (poly_offset_int)offsetb1
287 		 - (poly_offset_int)b1.max_size;
288     }
289   if (known_le (offseta2, offsetb2))
290     {
291       if (!known_size_p (a2.max_size))
292 	dist2 = 0;
293       else
294 	dist2 = (poly_offset_int)offsetb2
295 		- (poly_offset_int)offseta2
296 		- (poly_offset_int)a2.max_size;
297     }
298   else
299     {
300       if (!known_size_p (b2.max_size))
301 	dist2 = 0;
302       else
303 	dist2 = offseta2
304 		- (poly_offset_int)offsetb2
305 		- (poly_offset_int)b2.max_size;
306     }
307   /* It may happen that intervals overlap in case size
308      is different.  Prefer the overlap to non-overlap.  */
309   if (known_lt (dist1, 0) && known_ge (dist2, 0))
310     return true;
311   if (known_lt (dist2, 0) && known_ge (dist1, 0))
312     return false;
313   if (known_lt (dist1, 0))
314     /* If both overlaps minimize overlap.  */
315     return known_le (dist2, dist1);
316   else
317     /* If both are disjoint look for smaller distance.  */
318     return known_le (dist1, dist2);
319 }
320 
321 /* Merge in access A while losing precision.  */
322 void
forced_merge(const modref_access_node & a,bool record_adjustments)323 modref_access_node::forced_merge (const modref_access_node &a,
324 				  bool record_adjustments)
325 {
326   if (parm_index != a.parm_index)
327     {
328       gcc_checking_assert (parm_index != MODREF_UNKNOWN_PARM);
329       parm_index = MODREF_UNKNOWN_PARM;
330       return;
331     }
332 
333   /* We assume that containment and lossless merging
334      was tested earlier.  */
335   gcc_checking_assert (!contains (a) && !a.contains (*this)
336 		       && !merge (a, record_adjustments));
337   gcc_checking_assert (parm_offset_known && a.parm_offset_known);
338 
339   poly_int64 new_parm_offset, offset1, aoffset1;
340   if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
341     {
342       parm_offset_known = false;
343       return;
344     }
345   gcc_checking_assert (range_info_useful_p ()
346 		       && a.range_info_useful_p ());
347   if (record_adjustments)
348     adjustments += a.adjustments;
349   update2 (new_parm_offset,
350 	   offset1, size, max_size,
351 	   aoffset1, a.size, a.max_size,
352 	   record_adjustments);
353 }
354 
355 /* Merge two ranges both starting at parm_offset1 and update THIS
356    with result.  */
357 void
update2(poly_int64 parm_offset1,poly_int64 offset1,poly_int64 size1,poly_int64 max_size1,poly_int64 offset2,poly_int64 size2,poly_int64 max_size2,bool record_adjustments)358 modref_access_node::update2 (poly_int64 parm_offset1,
359 			     poly_int64 offset1, poly_int64 size1,
360 			     poly_int64 max_size1,
361 			     poly_int64 offset2, poly_int64 size2,
362 			     poly_int64 max_size2,
363 			     bool record_adjustments)
364 {
365   poly_int64 new_size = size1;
366 
367   if (!known_size_p (size2)
368       || known_le (size2, size1))
369     new_size = size2;
370   else
371     gcc_checking_assert (known_le (size1, size2));
372 
373   if (known_le (offset1, offset2))
374     ;
375   else if (known_le (offset2, offset1))
376     {
377       std::swap (offset1, offset2);
378       std::swap (max_size1, max_size2);
379     }
380   else
381     gcc_unreachable ();
382 
383   poly_int64 new_max_size;
384 
385   if (!known_size_p (max_size1))
386     new_max_size = max_size1;
387   else if (!known_size_p (max_size2))
388     new_max_size = max_size2;
389   else
390     {
391       poly_offset_int s = (poly_offset_int)max_size2
392 			  + (poly_offset_int)offset2
393 			  - (poly_offset_int)offset1;
394       if (s.to_shwi (&new_max_size))
395 	{
396 	  if (known_le (new_max_size, max_size1))
397 	    new_max_size = max_size1;
398 	}
399       else
400 	new_max_size = -1;
401     }
402 
403   update (parm_offset1, offset1,
404 	  new_size, new_max_size, record_adjustments);
405 }
406 
407 /* Given access nodes THIS and A, return true if they
408    can be done with common parm_offsets.  In this case
409    return parm offset in new_parm_offset, new_offset
410    which is start of range in THIS and new_aoffset that
411    is start of range in A.  */
412 bool
combined_offsets(const modref_access_node & a,poly_int64 * new_parm_offset,poly_int64 * new_offset,poly_int64 * new_aoffset) const413 modref_access_node::combined_offsets (const modref_access_node &a,
414 				      poly_int64 *new_parm_offset,
415 				      poly_int64 *new_offset,
416 				      poly_int64 *new_aoffset) const
417 {
418   gcc_checking_assert (parm_offset_known && a.parm_offset_known);
419   if (known_le (a.parm_offset, parm_offset))
420     {
421       *new_offset = offset
422 		    + ((parm_offset - a.parm_offset)
423 		       << LOG2_BITS_PER_UNIT);
424       *new_aoffset = a.offset;
425       *new_parm_offset = a.parm_offset;
426       return true;
427     }
428   else if (known_le (parm_offset, a.parm_offset))
429     {
430       *new_aoffset = a.offset
431 		      + ((a.parm_offset - parm_offset)
432 			 << LOG2_BITS_PER_UNIT);
433       *new_offset = offset;
434       *new_parm_offset = parm_offset;
435       return true;
436     }
437   else
438     return false;
439 }
440 
441 /* Try to optimize the access ACCESSES list after entry INDEX was modified.  */
442 void
try_merge_with(vec<modref_access_node,va_gc> * & accesses,size_t index)443 modref_access_node::try_merge_with (vec <modref_access_node, va_gc> *&accesses,
444 				    size_t index)
445 {
446   size_t i;
447 
448   for (i = 0; i < accesses->length ();)
449     if (i != index)
450       {
451 	bool found = false, restart = false;
452 	modref_access_node *a = &(*accesses)[i];
453 	modref_access_node *n = &(*accesses)[index];
454 
455 	if (n->contains (*a))
456 	  found = true;
457 	if (!found && n->merge (*a, false))
458 	  found = restart = true;
459 	gcc_checking_assert (found || !a->merge (*n, false));
460 	if (found)
461 	  {
462 	    accesses->unordered_remove (i);
463 	    if (index == accesses->length ())
464 	      {
465 		index = i;
466 		i++;
467 	      }
468 	    if (restart)
469 	      i = 0;
470 	  }
471 	else
472 	  i++;
473       }
474     else
475       i++;
476 }
477 
478 /* Stream out to OB.  */
479 
480 void
stream_out(struct output_block * ob) const481 modref_access_node::stream_out (struct output_block *ob) const
482 {
483   streamer_write_hwi (ob, parm_index);
484   if (parm_index != MODREF_UNKNOWN_PARM)
485     {
486       streamer_write_uhwi (ob, parm_offset_known);
487       if (parm_offset_known)
488 	{
489 	  streamer_write_poly_int64 (ob, parm_offset);
490 	  streamer_write_poly_int64 (ob, offset);
491 	  streamer_write_poly_int64 (ob, size);
492 	  streamer_write_poly_int64 (ob, max_size);
493 	}
494     }
495 }
496 
497 modref_access_node
stream_in(struct lto_input_block * ib)498 modref_access_node::stream_in (struct lto_input_block *ib)
499 {
500   int parm_index = streamer_read_hwi (ib);
501   bool parm_offset_known = false;
502   poly_int64 parm_offset = 0;
503   poly_int64 offset = 0;
504   poly_int64 size = -1;
505   poly_int64 max_size = -1;
506 
507   if (parm_index != MODREF_UNKNOWN_PARM)
508     {
509       parm_offset_known = streamer_read_uhwi (ib);
510       if (parm_offset_known)
511 	{
512 	  parm_offset = streamer_read_poly_int64 (ib);
513 	  offset = streamer_read_poly_int64 (ib);
514 	  size = streamer_read_poly_int64 (ib);
515 	  max_size = streamer_read_poly_int64 (ib);
516 	}
517     }
518   return {offset, size, max_size, parm_offset, parm_index,
519 	  parm_offset_known, false};
520 }
521 
522 /* Insert access with OFFSET and SIZE.
523    Collapse tree if it has more than MAX_ACCESSES entries.
524    If RECORD_ADJUSTMENTs is true avoid too many interval extensions.
525    Return true if record was changed.
526 
527    Return 0 if nothing changed, 1 if insert was successful and -1
528    if entries should be collapsed.  */
529 int
insert(vec<modref_access_node,va_gc> * & accesses,modref_access_node a,size_t max_accesses,bool record_adjustments)530 modref_access_node::insert (vec <modref_access_node, va_gc> *&accesses,
531 			    modref_access_node a, size_t max_accesses,
532 			    bool record_adjustments)
533 {
534   size_t i, j;
535   modref_access_node *a2;
536 
537   /* Verify that list does not contain redundant accesses.  */
538   if (flag_checking)
539     {
540       size_t i, i2;
541       modref_access_node *a, *a2;
542 
543       FOR_EACH_VEC_SAFE_ELT (accesses, i, a)
544 	{
545 	  FOR_EACH_VEC_SAFE_ELT (accesses, i2, a2)
546 	    if (i != i2)
547 	      gcc_assert (!a->contains (*a2));
548 	}
549     }
550 
551   FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
552     {
553       if (a2->contains (a))
554 	return 0;
555       if (a.contains (*a2))
556 	{
557 	  a.adjustments = 0;
558 	  a2->parm_index = a.parm_index;
559 	  a2->parm_offset_known = a.parm_offset_known;
560 	  a2->update (a.parm_offset, a.offset, a.size, a.max_size,
561 		      record_adjustments);
562 	  modref_access_node::try_merge_with (accesses, i);
563 	  return 1;
564 	}
565       if (a2->merge (a, record_adjustments))
566 	{
567 	  modref_access_node::try_merge_with (accesses, i);
568 	  return 1;
569 	}
570       gcc_checking_assert (!(a == *a2));
571     }
572 
573   /* If this base->ref pair has too many accesses stored, we will clear
574      all accesses and bail out.  */
575   if (accesses && accesses->length () >= max_accesses)
576     {
577       if (max_accesses < 2)
578 	return -1;
579       /* Find least harmful merge and perform it.  */
580       int best1 = -1, best2 = -1;
581       FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
582 	{
583 	  for (j = i + 1; j < accesses->length (); j++)
584 	    if (best1 < 0
585 		|| modref_access_node::closer_pair_p
586 		     (*a2, (*accesses)[j],
587 		      (*accesses)[best1],
588 		      best2 < 0 ? a : (*accesses)[best2]))
589 	      {
590 		best1 = i;
591 		best2 = j;
592 	      }
593 	  if (modref_access_node::closer_pair_p
594 		     (*a2, a,
595 		      (*accesses)[best1],
596 		      best2 < 0 ? a : (*accesses)[best2]))
597 	    {
598 	      best1 = i;
599 	      best2 = -1;
600 	    }
601 	}
602       (*accesses)[best1].forced_merge (best2 < 0 ? a : (*accesses)[best2],
603 				       record_adjustments);
604       /* Check that merging indeed merged ranges.  */
605       gcc_checking_assert ((*accesses)[best1].contains
606 			       (best2 < 0 ? a : (*accesses)[best2]));
607       if (!(*accesses)[best1].useful_p ())
608 	return -1;
609       if (dump_file && best2 >= 0)
610 	fprintf (dump_file,
611 		 "--param modref-max-accesses limit reached;"
612 		 " merging %i and %i\n", best1, best2);
613       else if (dump_file)
614 	fprintf (dump_file,
615 		 "--param modref-max-accesses limit reached;"
616 		 " merging with %i\n", best1);
617       modref_access_node::try_merge_with (accesses, best1);
618       if (best2 >= 0)
619 	insert (accesses, a, max_accesses, record_adjustments);
620       return 1;
621     }
622   a.adjustments = 0;
623   vec_safe_push (accesses, a);
624   return 1;
625 }
626 
627 /* Return true if range info is useful.  */
628 bool
range_info_useful_p() const629 modref_access_node::range_info_useful_p () const
630 {
631   return parm_index != MODREF_UNKNOWN_PARM
632 	 && parm_index != MODREF_GLOBAL_MEMORY_PARM
633 	 && parm_offset_known
634 	 && (known_size_p (size)
635 	     || known_size_p (max_size)
636 	     || known_ge (offset, 0));
637 }
638 
639 /* Dump range to debug OUT.  */
640 void
dump(FILE * out)641 modref_access_node::dump (FILE *out)
642 {
643   if (parm_index != MODREF_UNKNOWN_PARM)
644     {
645       if (parm_index == MODREF_GLOBAL_MEMORY_PARM)
646 	fprintf (out, " Base in global memory");
647       else if (parm_index >= 0)
648 	fprintf (out, " Parm %i", parm_index);
649       else if (parm_index == MODREF_STATIC_CHAIN_PARM)
650 	fprintf (out, " Static chain");
651       else
652 	gcc_unreachable ();
653       if (parm_offset_known)
654 	{
655 	  fprintf (out, " param offset:");
656 	  print_dec ((poly_int64_pod)parm_offset, out, SIGNED);
657 	}
658     }
659   if (range_info_useful_p ())
660     {
661       fprintf (out, " offset:");
662       print_dec ((poly_int64_pod)offset, out, SIGNED);
663       fprintf (out, " size:");
664       print_dec ((poly_int64_pod)size, out, SIGNED);
665       fprintf (out, " max_size:");
666       print_dec ((poly_int64_pod)max_size, out, SIGNED);
667       if (adjustments)
668 	fprintf (out, " adjusted %i times", adjustments);
669     }
670   fprintf (out, "\n");
671 }
672 
673 /* Return tree corresponding to parameter of the range in STMT.  */
674 tree
get_call_arg(const gcall * stmt) const675 modref_access_node::get_call_arg (const gcall *stmt) const
676 {
677   if (parm_index == MODREF_UNKNOWN_PARM
678       || parm_index == MODREF_GLOBAL_MEMORY_PARM)
679     return NULL;
680   if (parm_index == MODREF_STATIC_CHAIN_PARM)
681     return gimple_call_chain (stmt);
682   /* MODREF_RETSLOT_PARM should not happen in access trees since the store
683      is seen explicitly in the caller.  */
684   gcc_checking_assert (parm_index >= 0);
685   if (parm_index >= (int)gimple_call_num_args (stmt))
686     return NULL;
687   return gimple_call_arg (stmt, parm_index);
688 }
689 
690 /* Return tree corresponding to parameter of the range in STMT.  */
691 bool
get_ao_ref(const gcall * stmt,ao_ref * ref) const692 modref_access_node::get_ao_ref (const gcall *stmt, ao_ref *ref) const
693 {
694   tree arg;
695 
696   if (!parm_offset_known
697       || !(arg = get_call_arg (stmt))
698       || !POINTER_TYPE_P (TREE_TYPE (arg)))
699     return false;
700   poly_offset_int off = (poly_offset_int)offset
701 	+ ((poly_offset_int)parm_offset << LOG2_BITS_PER_UNIT);
702   poly_int64 off2;
703   if (!off.to_shwi (&off2))
704     return false;
705   ao_ref_init_from_ptr_and_range (ref, arg, true, off2, size, max_size);
706   return true;
707 }
708 
709 /* Return true A is a subkill.  */
710 bool
contains_for_kills(const modref_access_node & a) const711 modref_access_node::contains_for_kills (const modref_access_node &a) const
712 {
713   poly_int64 aoffset_adj = 0;
714 
715   gcc_checking_assert (parm_index != MODREF_UNKNOWN_PARM
716 		       && a.parm_index != MODREF_UNKNOWN_PARM);
717   if (parm_index != a.parm_index)
718     return false;
719   gcc_checking_assert (parm_offset_known && a.parm_offset_known);
720   aoffset_adj = (a.parm_offset - parm_offset)
721 		* BITS_PER_UNIT;
722   gcc_checking_assert (range_info_useful_p () && a.range_info_useful_p ());
723   return known_subrange_p (a.offset + aoffset_adj,
724 			   a.max_size, offset, max_size);
725 }
726 
727 /* Merge two ranges both starting at parm_offset1 and update THIS
728    with result.  */
729 bool
update_for_kills(poly_int64 parm_offset1,poly_int64 offset1,poly_int64 max_size1,poly_int64 offset2,poly_int64 max_size2,bool record_adjustments)730 modref_access_node::update_for_kills (poly_int64 parm_offset1,
731 				      poly_int64 offset1,
732 				      poly_int64 max_size1,
733 				      poly_int64 offset2,
734 				      poly_int64 max_size2,
735 				      bool record_adjustments)
736 {
737   if (known_le (offset1, offset2))
738     ;
739   else if (known_le (offset2, offset1))
740     {
741       std::swap (offset1, offset2);
742       std::swap (max_size1, max_size2);
743     }
744   else
745     gcc_unreachable ();
746 
747   poly_int64 new_max_size = max_size2 + offset2 - offset1;
748   if (known_le (new_max_size, max_size1))
749     new_max_size = max_size1;
750   if (known_eq (parm_offset, parm_offset1)
751       && known_eq (offset, offset1)
752       && known_eq (size, new_max_size)
753       && known_eq (max_size, new_max_size))
754     return false;
755 
756   if (!record_adjustments
757       || (++adjustments) < param_modref_max_adjustments)
758     {
759       parm_offset = parm_offset1;
760       offset = offset1;
761       max_size = new_max_size;
762       size = new_max_size;
763       gcc_checking_assert (useful_for_kill_p ());
764       return true;
765     }
766   return false;
767 }
768 
769 /* Merge in access A if it is possible to do without losing
770    precision.  Return true if successful.
771    Unlike merge assume that both accesses are always executed
772    and merge size the same was as max_size.  */
773 bool
merge_for_kills(const modref_access_node & a,bool record_adjustments)774 modref_access_node::merge_for_kills (const modref_access_node &a,
775 				     bool record_adjustments)
776 {
777   poly_int64 offset1 = 0;
778   poly_int64 aoffset1 = 0;
779   poly_int64 new_parm_offset = 0;
780 
781   /* We assume that containment was tested earlier.  */
782   gcc_checking_assert (!contains_for_kills (a) && !a.contains_for_kills (*this)
783 		       && useful_for_kill_p () && a.useful_for_kill_p ());
784 
785   if (parm_index != a.parm_index
786       || !combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
787     return false;
788 
789   if (known_le (offset1, aoffset1))
790    {
791      if (!known_size_p (max_size)
792 	 || known_ge (offset1 + max_size, aoffset1))
793        return update_for_kills (new_parm_offset, offset1, max_size,
794 				aoffset1, a.max_size, record_adjustments);
795    }
796   else if (known_le (aoffset1, offset1))
797    {
798      if (!known_size_p (a.max_size)
799 	 || known_ge (aoffset1 + a.max_size, offset1))
800        return update_for_kills (new_parm_offset, offset1, max_size,
801 				aoffset1, a.max_size, record_adjustments);
802    }
803   return false;
804 }
805 
806 /* Insert new kill A into KILLS.  If RECORD_ADJUSTMENTS is true limit number
807    of changes to each entry.  Return true if something changed.  */
808 
809 bool
insert_kill(vec<modref_access_node> & kills,modref_access_node & a,bool record_adjustments)810 modref_access_node::insert_kill (vec<modref_access_node> &kills,
811 				 modref_access_node &a, bool record_adjustments)
812 {
813   size_t index;
814   modref_access_node *a2;
815   bool merge = false;
816 
817   gcc_checking_assert (a.useful_for_kill_p ());
818 
819   /* See if we have corresponding entry already or we can merge with
820      neighboring entry.  */
821   FOR_EACH_VEC_ELT (kills, index, a2)
822     {
823       if (a2->contains_for_kills (a))
824 	return false;
825       if (a.contains_for_kills (*a2))
826 	{
827 	  a.adjustments = 0;
828 	  *a2 = a;
829 	  merge = true;
830 	  break;
831 	}
832       if (a2->merge_for_kills (a, record_adjustments))
833 	{
834 	  merge = true;
835 	  break;
836 	}
837     }
838   /* If entry was not found, insert it.  */
839   if (!merge)
840     {
841       if ((int)kills.length () >= param_modref_max_accesses)
842 	{
843 	  if (dump_file)
844 	    fprintf (dump_file, "--param modref-max-accesses limit reached:");
845 	  return false;
846 	}
847       a.adjustments = 0;
848       kills.safe_push (a);
849       return true;
850     }
851   /* Extending range in an entry may make it possible to merge it with
852      other entries.  */
853   size_t i;
854 
855   for (i = 0; i < kills.length ();)
856     if (i != index)
857       {
858 	bool found = false, restart = false;
859 	modref_access_node *a = &kills[i];
860 	modref_access_node *n = &kills[index];
861 
862 	if (n->contains_for_kills (*a))
863 	  found = true;
864 	if (!found && n->merge_for_kills (*a, false))
865 	  found = restart = true;
866 	gcc_checking_assert (found || !a->merge_for_kills (*n, false));
867 	if (found)
868 	  {
869 	    kills.unordered_remove (i);
870 	    if (index == kills.length ())
871 	      {
872 		index = i;
873 		i++;
874 	      }
875 	    if (restart)
876 	      i = 0;
877 	  }
878 	else
879 	  i++;
880       }
881     else
882       i++;
883   return true;
884 }
885 
886 
887 #if CHECKING_P
888 
889 namespace selftest {
890 
891 static void
test_insert_search_collapse()892 test_insert_search_collapse ()
893 {
894   modref_base_node<alias_set_type> *base_node;
895   modref_ref_node<alias_set_type> *ref_node;
896   modref_access_node a = unspecified_modref_access_node;
897 
898   modref_tree<alias_set_type> *t = new modref_tree<alias_set_type>();
899   ASSERT_FALSE (t->every_base);
900 
901   /* Insert into an empty tree.  */
902   t->insert (1, 2, 2, 1, 2, a, false);
903   ASSERT_NE (t->bases, NULL);
904   ASSERT_EQ (t->bases->length (), 1);
905   ASSERT_FALSE (t->every_base);
906   ASSERT_EQ (t->search (2), NULL);
907 
908   base_node = t->search (1);
909   ASSERT_NE (base_node, NULL);
910   ASSERT_EQ (base_node->base, 1);
911   ASSERT_NE (base_node->refs, NULL);
912   ASSERT_EQ (base_node->refs->length (), 1);
913   ASSERT_EQ (base_node->search (1), NULL);
914 
915   ref_node = base_node->search (2);
916   ASSERT_NE (ref_node, NULL);
917   ASSERT_EQ (ref_node->ref, 2);
918 
919   /* Insert when base exists but ref does not.  */
920   t->insert (1, 2, 2, 1, 3, a, false);
921   ASSERT_NE (t->bases, NULL);
922   ASSERT_EQ (t->bases->length (), 1);
923   ASSERT_EQ (t->search (1), base_node);
924   ASSERT_EQ (t->search (2), NULL);
925   ASSERT_NE (base_node->refs, NULL);
926   ASSERT_EQ (base_node->refs->length (), 2);
927 
928   ref_node = base_node->search (3);
929   ASSERT_NE (ref_node, NULL);
930 
931   /* Insert when base and ref exist, but access is not dominated by nor
932      dominates other accesses.  */
933   t->insert (1, 2, 2, 1, 2, a, false);
934   ASSERT_EQ (t->bases->length (), 1);
935   ASSERT_EQ (t->search (1), base_node);
936 
937   ref_node = base_node->search (2);
938   ASSERT_NE (ref_node, NULL);
939 
940   /* Insert when base and ref exist and access is dominated.  */
941   t->insert (1, 2, 2, 1, 2, a, false);
942   ASSERT_EQ (t->search (1), base_node);
943   ASSERT_EQ (base_node->search (2), ref_node);
944 
945   /* Insert ref to trigger ref list collapse for base 1.  */
946   t->insert (1, 2, 2, 1, 4, a, false);
947   ASSERT_EQ (t->search (1), base_node);
948   ASSERT_EQ (base_node->refs, NULL);
949   ASSERT_EQ (base_node->search (2), NULL);
950   ASSERT_EQ (base_node->search (3), NULL);
951   ASSERT_TRUE (base_node->every_ref);
952 
953   /* Further inserts to collapsed ref list are ignored.  */
954   t->insert (1, 2, 2, 1, 5, a, false);
955   ASSERT_EQ (t->search (1), base_node);
956   ASSERT_EQ (base_node->refs, NULL);
957   ASSERT_EQ (base_node->search (2), NULL);
958   ASSERT_EQ (base_node->search (3), NULL);
959   ASSERT_TRUE (base_node->every_ref);
960 
961   /* Insert base to trigger base list collapse.  */
962   t->insert (1, 2, 2, 5, 0, a, false);
963   ASSERT_TRUE (t->every_base);
964   ASSERT_EQ (t->bases, NULL);
965   ASSERT_EQ (t->search (1), NULL);
966 
967   /* Further inserts to collapsed base list are ignored.  */
968   t->insert (1, 2, 2, 7, 8, a, false);
969   ASSERT_TRUE (t->every_base);
970   ASSERT_EQ (t->bases, NULL);
971   ASSERT_EQ (t->search (1), NULL);
972 
973   delete t;
974 }
975 
976 static void
test_merge()977 test_merge ()
978 {
979   modref_tree<alias_set_type> *t1, *t2;
980   modref_base_node<alias_set_type> *base_node;
981   modref_access_node a = unspecified_modref_access_node;
982 
983   t1 = new modref_tree<alias_set_type>();
984   t1->insert (3, 4, 1, 1, 1, a, false);
985   t1->insert (3, 4, 1, 1, 2, a, false);
986   t1->insert (3, 4, 1, 1, 3, a, false);
987   t1->insert (3, 4, 1, 2, 1, a, false);
988   t1->insert (3, 4, 1, 3, 1, a, false);
989 
990   t2 = new modref_tree<alias_set_type>();
991   t2->insert (10, 10, 10, 1, 2, a, false);
992   t2->insert (10, 10, 10, 1, 3, a, false);
993   t2->insert (10, 10, 10, 1, 4, a, false);
994   t2->insert (10, 10, 10, 3, 2, a, false);
995   t2->insert (10, 10, 10, 3, 3, a, false);
996   t2->insert (10, 10, 10, 3, 4, a, false);
997   t2->insert (10, 10, 10, 3, 5, a, false);
998 
999   t1->merge (3, 4, 1, t2, NULL, NULL, false);
1000 
1001   ASSERT_FALSE (t1->every_base);
1002   ASSERT_NE (t1->bases, NULL);
1003   ASSERT_EQ (t1->bases->length (), 3);
1004 
1005   base_node = t1->search (1);
1006   ASSERT_NE (base_node->refs, NULL);
1007   ASSERT_FALSE (base_node->every_ref);
1008   ASSERT_EQ (base_node->refs->length (), 4);
1009 
1010   base_node = t1->search (2);
1011   ASSERT_NE (base_node->refs, NULL);
1012   ASSERT_FALSE (base_node->every_ref);
1013   ASSERT_EQ (base_node->refs->length (), 1);
1014 
1015   base_node = t1->search (3);
1016   ASSERT_EQ (base_node->refs, NULL);
1017   ASSERT_TRUE (base_node->every_ref);
1018 
1019   delete t1;
1020   delete t2;
1021 }
1022 
1023 
1024 void
ipa_modref_tree_cc_tests()1025 ipa_modref_tree_cc_tests ()
1026 {
1027   test_insert_search_collapse ();
1028   test_merge ();
1029 }
1030 
1031 } // namespace selftest
1032 
1033 #endif
1034 
1035 void
gt_ggc_mx(modref_tree<int> * const & tt)1036 gt_ggc_mx (modref_tree < int >*const &tt)
1037 {
1038   if (tt->bases)
1039     {
1040       ggc_test_and_set_mark (tt->bases);
1041       gt_ggc_mx (tt->bases);
1042     }
1043 }
1044 
1045 void
gt_ggc_mx(modref_tree<tree_node * > * const & tt)1046 gt_ggc_mx (modref_tree < tree_node * >*const &tt)
1047 {
1048   if (tt->bases)
1049     {
1050       ggc_test_and_set_mark (tt->bases);
1051       gt_ggc_mx (tt->bases);
1052     }
1053 }
1054 
gt_pch_nx(modref_tree<int> * const &)1055 void gt_pch_nx (modref_tree<int>* const&) {}
gt_pch_nx(modref_tree<tree_node * > * const &)1056 void gt_pch_nx (modref_tree<tree_node*>* const&) {}
gt_pch_nx(modref_tree<int> * const &,gt_pointer_operator,void *)1057 void gt_pch_nx (modref_tree<int>* const&, gt_pointer_operator, void *) {}
gt_pch_nx(modref_tree<tree_node * > * const &,gt_pointer_operator,void *)1058 void gt_pch_nx (modref_tree<tree_node*>* const&, gt_pointer_operator, void *) {}
1059 
gt_ggc_mx(modref_base_node<int> * & b)1060 void gt_ggc_mx (modref_base_node<int>* &b)
1061 {
1062   ggc_test_and_set_mark (b);
1063   if (b->refs)
1064     {
1065       ggc_test_and_set_mark (b->refs);
1066       gt_ggc_mx (b->refs);
1067     }
1068 }
1069 
gt_ggc_mx(modref_base_node<tree_node * > * & b)1070 void gt_ggc_mx (modref_base_node<tree_node*>* &b)
1071 {
1072   ggc_test_and_set_mark (b);
1073   if (b->refs)
1074     {
1075       ggc_test_and_set_mark (b->refs);
1076       gt_ggc_mx (b->refs);
1077     }
1078   if (b->base)
1079     gt_ggc_mx (b->base);
1080 }
1081 
gt_pch_nx(modref_base_node<int> *)1082 void gt_pch_nx (modref_base_node<int>*) {}
gt_pch_nx(modref_base_node<tree_node * > *)1083 void gt_pch_nx (modref_base_node<tree_node*>*) {}
gt_pch_nx(modref_base_node<int> *,gt_pointer_operator,void *)1084 void gt_pch_nx (modref_base_node<int>*, gt_pointer_operator, void *) {}
gt_pch_nx(modref_base_node<tree_node * > *,gt_pointer_operator,void *)1085 void gt_pch_nx (modref_base_node<tree_node*>*, gt_pointer_operator, void *) {}
1086 
gt_ggc_mx(modref_ref_node<int> * & r)1087 void gt_ggc_mx (modref_ref_node<int>* &r)
1088 {
1089   ggc_test_and_set_mark (r);
1090   if (r->accesses)
1091     {
1092       ggc_test_and_set_mark (r->accesses);
1093       gt_ggc_mx (r->accesses);
1094     }
1095 }
1096 
gt_ggc_mx(modref_ref_node<tree_node * > * & r)1097 void gt_ggc_mx (modref_ref_node<tree_node*>* &r)
1098 {
1099   ggc_test_and_set_mark (r);
1100   if (r->accesses)
1101     {
1102       ggc_test_and_set_mark (r->accesses);
1103       gt_ggc_mx (r->accesses);
1104     }
1105   if (r->ref)
1106     gt_ggc_mx (r->ref);
1107 }
1108 
gt_pch_nx(modref_ref_node<int> *)1109 void gt_pch_nx (modref_ref_node<int>* ) {}
gt_pch_nx(modref_ref_node<tree_node * > *)1110 void gt_pch_nx (modref_ref_node<tree_node*>*) {}
gt_pch_nx(modref_ref_node<int> *,gt_pointer_operator,void *)1111 void gt_pch_nx (modref_ref_node<int>*, gt_pointer_operator, void *) {}
gt_pch_nx(modref_ref_node<tree_node * > *,gt_pointer_operator,void *)1112 void gt_pch_nx (modref_ref_node<tree_node*>*, gt_pointer_operator, void *) {}
1113 
gt_ggc_mx(modref_access_node &)1114 void gt_ggc_mx (modref_access_node &)
1115 {
1116 }
1117