xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/ggc-common.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /* Simple garbage collection for the GNU compiler.
2    Copyright (C) 1999-2019 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 /* Generic garbage collection (GC) functions and data, not specific to
21    any particular GC implementation.  */
22 
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "timevar.h"
27 #include "diagnostic-core.h"
28 #include "ggc-internal.h"
29 #include "params.h"
30 #include "hosthooks.h"
31 #include "plugin.h"
32 
33 /* When set, ggc_collect will do collection.  */
34 bool ggc_force_collect;
35 
36 /* When true, protect the contents of the identifier hash table.  */
37 bool ggc_protect_identifiers = true;
38 
39 /* Statistics about the allocation.  */
40 static ggc_statistics *ggc_stats;
41 
42 struct traversal_state;
43 
44 static int compare_ptr_data (const void *, const void *);
45 static void relocate_ptrs (void *, void *);
46 static void write_pch_globals (const struct ggc_root_tab * const *tab,
47 			       struct traversal_state *state);
48 
49 /* Maintain global roots that are preserved during GC.  */
50 
51 /* This extra vector of dynamically registered root_tab-s is used by
52    ggc_mark_roots and gives the ability to dynamically add new GGC root
53    tables, for instance from some plugins; this vector is on the heap
54    since it is used by GGC internally.  */
55 typedef const struct ggc_root_tab *const_ggc_root_tab_t;
56 static vec<const_ggc_root_tab_t> extra_root_vec;
57 
58 /* Dynamically register a new GGC root table RT. This is useful for
59    plugins. */
60 
61 void
62 ggc_register_root_tab (const struct ggc_root_tab* rt)
63 {
64   if (rt)
65     extra_root_vec.safe_push (rt);
66 }
67 
68 /* Mark all the roots in the table RT.  */
69 
70 static void
71 ggc_mark_root_tab (const_ggc_root_tab_t rt)
72 {
73   size_t i;
74 
75   for ( ; rt->base != NULL; rt++)
76     for (i = 0; i < rt->nelt; i++)
77       (*rt->cb) (*(void **) ((char *)rt->base + rt->stride * i));
78 }
79 
80 /* Iterate through all registered roots and mark each element.  */
81 
82 void
83 ggc_mark_roots (void)
84 {
85   const struct ggc_root_tab *const *rt;
86   const_ggc_root_tab_t rtp, rti;
87   size_t i;
88 
89   for (rt = gt_ggc_deletable_rtab; *rt; rt++)
90     for (rti = *rt; rti->base != NULL; rti++)
91       memset (rti->base, 0, rti->stride);
92 
93   for (rt = gt_ggc_rtab; *rt; rt++)
94     ggc_mark_root_tab (*rt);
95 
96   FOR_EACH_VEC_ELT (extra_root_vec, i, rtp)
97     ggc_mark_root_tab (rtp);
98 
99   if (ggc_protect_identifiers)
100     ggc_mark_stringpool ();
101 
102   gt_clear_caches ();
103 
104   if (! ggc_protect_identifiers)
105     ggc_purge_stringpool ();
106 
107   /* Some plugins may call ggc_set_mark from here.  */
108   invoke_plugin_callbacks (PLUGIN_GGC_MARKING, NULL);
109 }
110 
111 /* Allocate a block of memory, then clear it.  */
112 void *
113 ggc_internal_cleared_alloc (size_t size, void (*f)(void *), size_t s, size_t n
114 			    MEM_STAT_DECL)
115 {
116   void *buf = ggc_internal_alloc (size, f, s, n PASS_MEM_STAT);
117   memset (buf, 0, size);
118   return buf;
119 }
120 
121 /* Resize a block of memory, possibly re-allocating it.  */
122 void *
123 ggc_realloc (void *x, size_t size MEM_STAT_DECL)
124 {
125   void *r;
126   size_t old_size;
127 
128   if (x == NULL)
129     return ggc_internal_alloc (size PASS_MEM_STAT);
130 
131   old_size = ggc_get_size (x);
132 
133   if (size <= old_size)
134     {
135       /* Mark the unwanted memory as unaccessible.  We also need to make
136 	 the "new" size accessible, since ggc_get_size returns the size of
137 	 the pool, not the size of the individually allocated object, the
138 	 size which was previously made accessible.  Unfortunately, we
139 	 don't know that previously allocated size.  Without that
140 	 knowledge we have to lose some initialization-tracking for the
141 	 old parts of the object.  An alternative is to mark the whole
142 	 old_size as reachable, but that would lose tracking of writes
143 	 after the end of the object (by small offsets).  Discard the
144 	 handle to avoid handle leak.  */
145       VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x + size,
146 						    old_size - size));
147       VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, size));
148       return x;
149     }
150 
151   r = ggc_internal_alloc (size PASS_MEM_STAT);
152 
153   /* Since ggc_get_size returns the size of the pool, not the size of the
154      individually allocated object, we'd access parts of the old object
155      that were marked invalid with the memcpy below.  We lose a bit of the
156      initialization-tracking since some of it may be uninitialized.  */
157   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, old_size));
158 
159   memcpy (r, x, old_size);
160 
161   /* The old object is not supposed to be used anymore.  */
162   ggc_free (x);
163 
164   return r;
165 }
166 
167 void *
168 ggc_cleared_alloc_htab_ignore_args (size_t c ATTRIBUTE_UNUSED,
169 				    size_t n ATTRIBUTE_UNUSED)
170 {
171   gcc_assert (c * n == sizeof (struct htab));
172   return ggc_cleared_alloc<htab> ();
173 }
174 
175 /* TODO: once we actually use type information in GGC, create a new tag
176    gt_gcc_ptr_array and use it for pointer arrays.  */
177 void *
178 ggc_cleared_alloc_ptr_array_two_args (size_t c, size_t n)
179 {
180   gcc_assert (sizeof (PTR *) == n);
181   return ggc_cleared_vec_alloc<PTR *> (c);
182 }
183 
184 /* These are for splay_tree_new_ggc.  */
185 void *
186 ggc_splay_alloc (int sz, void *nl)
187 {
188   gcc_assert (!nl);
189   return ggc_internal_alloc (sz);
190 }
191 
192 void
193 ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
194 {
195   gcc_assert (!nl);
196 }
197 
198 void
199 ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
200 			     ggc_statistics *stats)
201 {
202   /* Set the pointer so that during collection we will actually gather
203      the statistics.  */
204   ggc_stats = stats;
205 
206   /* Then do one collection to fill in the statistics.  */
207   ggc_collect ();
208 
209   /* At present, we don't really gather any interesting statistics.  */
210 
211   /* Don't gather statistics any more.  */
212   ggc_stats = NULL;
213 }
214 
215 /* Functions for saving and restoring GCable memory to disk.  */
216 
217 struct ptr_data
218 {
219   void *obj;
220   void *note_ptr_cookie;
221   gt_note_pointers note_ptr_fn;
222   gt_handle_reorder reorder_fn;
223   size_t size;
224   void *new_addr;
225 };
226 
227 #define POINTER_HASH(x) (hashval_t)((intptr_t)x >> 3)
228 
229 /* Helper for hashing saving_htab.  */
230 
231 struct saving_hasher : free_ptr_hash <ptr_data>
232 {
233   typedef void *compare_type;
234   static inline hashval_t hash (const ptr_data *);
235   static inline bool equal (const ptr_data *, const void *);
236 };
237 
238 inline hashval_t
239 saving_hasher::hash (const ptr_data *p)
240 {
241   return POINTER_HASH (p->obj);
242 }
243 
244 inline bool
245 saving_hasher::equal (const ptr_data *p1, const void *p2)
246 {
247   return p1->obj == p2;
248 }
249 
250 static hash_table<saving_hasher> *saving_htab;
251 
252 /* Register an object in the hash table.  */
253 
254 int
255 gt_pch_note_object (void *obj, void *note_ptr_cookie,
256 		    gt_note_pointers note_ptr_fn)
257 {
258   struct ptr_data **slot;
259 
260   if (obj == NULL || obj == (void *) 1)
261     return 0;
262 
263   slot = (struct ptr_data **)
264     saving_htab->find_slot_with_hash (obj, POINTER_HASH (obj), INSERT);
265   if (*slot != NULL)
266     {
267       gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn
268 		  && (*slot)->note_ptr_cookie == note_ptr_cookie);
269       return 0;
270     }
271 
272   *slot = XCNEW (struct ptr_data);
273   (*slot)->obj = obj;
274   (*slot)->note_ptr_fn = note_ptr_fn;
275   (*slot)->note_ptr_cookie = note_ptr_cookie;
276   if (note_ptr_fn == gt_pch_p_S)
277     (*slot)->size = strlen ((const char *)obj) + 1;
278   else
279     (*slot)->size = ggc_get_size (obj);
280   return 1;
281 }
282 
283 /* Register an object in the hash table.  */
284 
285 void
286 gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
287 		     gt_handle_reorder reorder_fn)
288 {
289   struct ptr_data *data;
290 
291   if (obj == NULL || obj == (void *) 1)
292     return;
293 
294   data = (struct ptr_data *)
295     saving_htab->find_with_hash (obj, POINTER_HASH (obj));
296   gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
297 
298   data->reorder_fn = reorder_fn;
299 }
300 
301 /* Handy state for the traversal functions.  */
302 
303 struct traversal_state
304 {
305   FILE *f;
306   struct ggc_pch_data *d;
307   size_t count;
308   struct ptr_data **ptrs;
309   size_t ptrs_i;
310 };
311 
312 /* Callbacks for htab_traverse.  */
313 
314 int
315 ggc_call_count (ptr_data **slot, traversal_state *state)
316 {
317   struct ptr_data *d = *slot;
318 
319   ggc_pch_count_object (state->d, d->obj, d->size,
320 			d->note_ptr_fn == gt_pch_p_S);
321   state->count++;
322   return 1;
323 }
324 
325 int
326 ggc_call_alloc (ptr_data **slot, traversal_state *state)
327 {
328   struct ptr_data *d = *slot;
329 
330   d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size,
331 				      d->note_ptr_fn == gt_pch_p_S);
332   state->ptrs[state->ptrs_i++] = d;
333   return 1;
334 }
335 
336 /* Callback for qsort.  */
337 
338 static int
339 compare_ptr_data (const void *p1_p, const void *p2_p)
340 {
341   const struct ptr_data *const p1 = *(const struct ptr_data *const *)p1_p;
342   const struct ptr_data *const p2 = *(const struct ptr_data *const *)p2_p;
343   return (((size_t)p1->new_addr > (size_t)p2->new_addr)
344 	  - ((size_t)p1->new_addr < (size_t)p2->new_addr));
345 }
346 
347 /* Callbacks for note_ptr_fn.  */
348 
349 static void
350 relocate_ptrs (void *ptr_p, void *state_p)
351 {
352   void **ptr = (void **)ptr_p;
353   struct traversal_state *state ATTRIBUTE_UNUSED
354     = (struct traversal_state *)state_p;
355   struct ptr_data *result;
356 
357   if (*ptr == NULL || *ptr == (void *)1)
358     return;
359 
360   result = (struct ptr_data *)
361     saving_htab->find_with_hash (*ptr, POINTER_HASH (*ptr));
362   gcc_assert (result);
363   *ptr = result->new_addr;
364 }
365 
366 /* Write out, after relocation, the pointers in TAB.  */
367 static void
368 write_pch_globals (const struct ggc_root_tab * const *tab,
369 		   struct traversal_state *state)
370 {
371   const struct ggc_root_tab *const *rt;
372   const struct ggc_root_tab *rti;
373   size_t i;
374 
375   for (rt = tab; *rt; rt++)
376     for (rti = *rt; rti->base != NULL; rti++)
377       for (i = 0; i < rti->nelt; i++)
378 	{
379 	  void *ptr = *(void **)((char *)rti->base + rti->stride * i);
380 	  struct ptr_data *new_ptr;
381 	  if (ptr == NULL || ptr == (void *)1)
382 	    {
383 	      if (fwrite (&ptr, sizeof (void *), 1, state->f)
384 		  != 1)
385 		fatal_error (input_location, "can%'t write PCH file: %m");
386 	    }
387 	  else
388 	    {
389 	      new_ptr = (struct ptr_data *)
390 		saving_htab->find_with_hash (ptr, POINTER_HASH (ptr));
391 	      if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
392 		  != 1)
393 		fatal_error (input_location, "can%'t write PCH file: %m");
394 	    }
395 	}
396 }
397 
398 /* Hold the information we need to mmap the file back in.  */
399 
400 struct mmap_info
401 {
402   size_t offset;
403   size_t size;
404   void *preferred_base;
405 };
406 
407 /* Write out the state of the compiler to F.  */
408 
409 void
410 gt_pch_save (FILE *f)
411 {
412   const struct ggc_root_tab *const *rt;
413   const struct ggc_root_tab *rti;
414   size_t i;
415   struct traversal_state state;
416   char *this_object = NULL;
417   size_t this_object_size = 0;
418   struct mmap_info mmi;
419   const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity ();
420 
421   gt_pch_save_stringpool ();
422 
423   timevar_push (TV_PCH_PTR_REALLOC);
424   saving_htab = new hash_table<saving_hasher> (50000);
425 
426   for (rt = gt_ggc_rtab; *rt; rt++)
427     for (rti = *rt; rti->base != NULL; rti++)
428       for (i = 0; i < rti->nelt; i++)
429 	(*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
430 
431   /* Prepare the objects for writing, determine addresses and such.  */
432   state.f = f;
433   state.d = init_ggc_pch ();
434   state.count = 0;
435   saving_htab->traverse <traversal_state *, ggc_call_count> (&state);
436 
437   mmi.size = ggc_pch_total_size (state.d);
438 
439   /* Try to arrange things so that no relocation is necessary, but
440      don't try very hard.  On most platforms, this will always work,
441      and on the rest it's a lot of work to do better.
442      (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
443      HOST_HOOKS_GT_PCH_USE_ADDRESS.)  */
444   mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
445 
446   ggc_pch_this_base (state.d, mmi.preferred_base);
447 
448   state.ptrs = XNEWVEC (struct ptr_data *, state.count);
449   state.ptrs_i = 0;
450 
451   saving_htab->traverse <traversal_state *, ggc_call_alloc> (&state);
452   timevar_pop (TV_PCH_PTR_REALLOC);
453 
454   timevar_push (TV_PCH_PTR_SORT);
455   qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
456   timevar_pop (TV_PCH_PTR_SORT);
457 
458   /* Write out all the scalar variables.  */
459   for (rt = gt_pch_scalar_rtab; *rt; rt++)
460     for (rti = *rt; rti->base != NULL; rti++)
461       if (fwrite (rti->base, rti->stride, 1, f) != 1)
462 	fatal_error (input_location, "can%'t write PCH file: %m");
463 
464   /* Write out all the global pointers, after translation.  */
465   write_pch_globals (gt_ggc_rtab, &state);
466 
467   /* Pad the PCH file so that the mmapped area starts on an allocation
468      granularity (usually page) boundary.  */
469   {
470     long o;
471     o = ftell (state.f) + sizeof (mmi);
472     if (o == -1)
473       fatal_error (input_location, "can%'t get position in PCH file: %m");
474     mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment;
475     if (mmi.offset == mmap_offset_alignment)
476       mmi.offset = 0;
477     mmi.offset += o;
478   }
479   if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
480     fatal_error (input_location, "can%'t write PCH file: %m");
481   if (mmi.offset != 0
482       && fseek (state.f, mmi.offset, SEEK_SET) != 0)
483     fatal_error (input_location, "can%'t write padding to PCH file: %m");
484 
485   ggc_pch_prepare_write (state.d, state.f);
486 
487 #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
488   vec<char> vbits = vNULL;
489 #endif
490 
491   /* Actually write out the objects.  */
492   for (i = 0; i < state.count; i++)
493     {
494       if (this_object_size < state.ptrs[i]->size)
495 	{
496 	  this_object_size = state.ptrs[i]->size;
497 	  this_object = XRESIZEVAR (char, this_object, this_object_size);
498 	}
499 #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
500       /* obj might contain uninitialized bytes, e.g. in the trailing
501 	 padding of the object.  Avoid warnings by making the memory
502 	 temporarily defined and then restoring previous state.  */
503       int get_vbits = 0;
504       size_t valid_size = state.ptrs[i]->size;
505       if (__builtin_expect (RUNNING_ON_VALGRIND, 0))
506 	{
507 	  if (vbits.length () < valid_size)
508 	    vbits.safe_grow (valid_size);
509 	  get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
510 					  vbits.address (), valid_size);
511 	  if (get_vbits == 3)
512 	    {
513 	      /* We assume that first part of obj is addressable, and
514 		 the rest is unaddressable.  Find out where the boundary is
515 		 using binary search.  */
516 	      size_t lo = 0, hi = valid_size;
517 	      while (hi > lo)
518 		{
519 		  size_t mid = (lo + hi) / 2;
520 		  get_vbits = VALGRIND_GET_VBITS ((char *) state.ptrs[i]->obj
521 						  + mid, vbits.address (),
522 						  1);
523 		  if (get_vbits == 3)
524 		    hi = mid;
525 		  else if (get_vbits == 1)
526 		    lo = mid + 1;
527 		  else
528 		    break;
529 		}
530 	      if (get_vbits == 1 || get_vbits == 3)
531 		{
532 		  valid_size = lo;
533 		  get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
534 						  vbits.address (),
535 						  valid_size);
536 		}
537 	    }
538 	  if (get_vbits == 1)
539 	    VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (state.ptrs[i]->obj,
540 							 state.ptrs[i]->size));
541 	}
542 #endif
543       memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
544       if (state.ptrs[i]->reorder_fn != NULL)
545 	state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
546 				   state.ptrs[i]->note_ptr_cookie,
547 				   relocate_ptrs, &state);
548       state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
549 				  state.ptrs[i]->note_ptr_cookie,
550 				  relocate_ptrs, &state);
551       ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
552 			    state.ptrs[i]->new_addr, state.ptrs[i]->size,
553 			    state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
554       if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
555 	memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
556 #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
557       if (__builtin_expect (get_vbits == 1, 0))
558 	{
559 	  (void) VALGRIND_SET_VBITS (state.ptrs[i]->obj, vbits.address (),
560 				     valid_size);
561 	  if (valid_size != state.ptrs[i]->size)
562 	    VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *)
563 							  state.ptrs[i]->obj
564 							  + valid_size,
565 							  state.ptrs[i]->size
566 							  - valid_size));
567 	}
568 #endif
569     }
570 #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
571   vbits.release ();
572 #endif
573 
574   ggc_pch_finish (state.d, state.f);
575   gt_pch_fixup_stringpool ();
576 
577   XDELETE (state.ptrs);
578   XDELETE (this_object);
579   delete saving_htab;
580   saving_htab = NULL;
581 }
582 
583 /* Read the state of the compiler back in from F.  */
584 
585 void
586 gt_pch_restore (FILE *f)
587 {
588   const struct ggc_root_tab *const *rt;
589   const struct ggc_root_tab *rti;
590   size_t i;
591   struct mmap_info mmi;
592   int result;
593   struct line_maps * old_line_table = line_table;
594   location_t old_input_loc = input_location;
595 
596   /* Delete any deletable objects.  This makes ggc_pch_read much
597      faster, as it can be sure that no GCable objects remain other
598      than the ones just read in.  */
599   for (rt = gt_ggc_deletable_rtab; *rt; rt++)
600     for (rti = *rt; rti->base != NULL; rti++)
601       memset (rti->base, 0, rti->stride);
602 
603   /* Read in all the scalar variables.  */
604   for (rt = gt_pch_scalar_rtab; *rt; rt++)
605     for (rti = *rt; rti->base != NULL; rti++)
606       if (fread (rti->base, rti->stride, 1, f) != 1) {
607         line_table = old_line_table;
608 	input_location = old_input_loc;
609 	fatal_error (input_location, "can%'t read PCH file: %m");
610       }
611 
612   /* Read in all the global pointers, in 6 easy loops.  */
613   for (rt = gt_ggc_rtab; *rt; rt++)
614     for (rti = *rt; rti->base != NULL; rti++)
615       for (i = 0; i < rti->nelt; i++)
616 	if (fread ((char *)rti->base + rti->stride * i,
617 		   sizeof (void *), 1, f) != 1) {
618           line_table = old_line_table;
619 	  input_location = old_input_loc;
620 	  fatal_error (input_location, "can%'t read PCH file: %m");
621         }
622 
623   if (fread (&mmi, sizeof (mmi), 1, f) != 1) {
624     line_table = old_line_table;
625     input_location = old_input_loc;
626     fatal_error (input_location, "can%'t read PCH file: %m");
627   }
628 
629   result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size,
630 					  fileno (f), mmi.offset);
631   if (result < 0) {
632     line_table = old_line_table;
633     input_location = old_input_loc;
634     fatal_error (input_location, "had to relocate PCH");
635   }
636   if (result == 0)
637     {
638       if (fseek (f, mmi.offset, SEEK_SET) != 0
639 	  || fread (mmi.preferred_base, mmi.size, 1, f) != 1) {
640         line_table = old_line_table;
641         input_location = old_input_loc;
642 	fatal_error (input_location, "can%'t read PCH file: %m");
643       }
644     }
645   else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0) {
646     line_table = old_line_table;
647     input_location = old_input_loc;
648     fatal_error (input_location, "can%'t read PCH file: %m");
649   }
650 
651   ggc_pch_read (f, mmi.preferred_base);
652 
653   gt_pch_restore_stringpool ();
654 }
655 
656 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
657    Select no address whatsoever, and let gt_pch_save choose what it will with
658    malloc, presumably.  */
659 
660 void *
661 default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED,
662 			    int fd ATTRIBUTE_UNUSED)
663 {
664   return NULL;
665 }
666 
667 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
668    Allocate SIZE bytes with malloc.  Return 0 if the address we got is the
669    same as base, indicating that the memory has been allocated but needs to
670    be read in from the file.  Return -1 if the address differs, to relocation
671    of the PCH file would be required.  */
672 
673 int
674 default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED,
675 			    size_t offset ATTRIBUTE_UNUSED)
676 {
677   void *addr = xmalloc (size);
678   return (addr == base) - 1;
679 }
680 
681 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS.   Return the
682    alignment required for allocating virtual memory. Usually this is the
683    same as pagesize.  */
684 
685 size_t
686 default_gt_pch_alloc_granularity (void)
687 {
688   return getpagesize ();
689 }
690 
691 #if HAVE_MMAP_FILE
692 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
693    We temporarily allocate SIZE bytes, and let the kernel place the data
694    wherever it will.  If it worked, that's our spot, if not we're likely
695    to be in trouble.  */
696 
697 void *
698 mmap_gt_pch_get_address (size_t size, int fd)
699 {
700   void *ret;
701 
702   ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
703   if (ret == (void *) MAP_FAILED)
704     ret = NULL;
705   else
706     munmap ((caddr_t) ret, size);
707 
708   return ret;
709 }
710 
711 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
712    Map SIZE bytes of FD+OFFSET at BASE.  Return 1 if we succeeded at
713    mapping the data at BASE, -1 if we couldn't.
714 
715    This version assumes that the kernel honors the START operand of mmap
716    even without MAP_FIXED if START through START+SIZE are not currently
717    mapped with something.  */
718 
719 int
720 mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
721 {
722   void *addr;
723 
724   /* We're called with size == 0 if we're not planning to load a PCH
725      file at all.  This allows the hook to free any static space that
726      we might have allocated at link time.  */
727   if (size == 0)
728     return -1;
729 
730   addr = mmap ((caddr_t) base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
731 	       fd, offset);
732 
733   return addr == base ? 1 : -1;
734 }
735 #endif /* HAVE_MMAP_FILE */
736 
737 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
738 
739 /* Modify the bound based on rlimits.  */
740 static double
741 ggc_rlimit_bound (double limit)
742 {
743 #if defined(HAVE_GETRLIMIT)
744   struct rlimit rlim;
745 # if defined (RLIMIT_AS)
746   /* RLIMIT_AS is what POSIX says is the limit on mmap.  Presumably
747      any OS which has RLIMIT_AS also has a working mmap that GCC will use.  */
748   if (getrlimit (RLIMIT_AS, &rlim) == 0
749       && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
750       && rlim.rlim_cur < limit)
751     limit = rlim.rlim_cur;
752 # elif defined (RLIMIT_DATA)
753   /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we
754      might be on an OS that has a broken mmap.  (Others don't bound
755      mmap at all, apparently.)  */
756   if (getrlimit (RLIMIT_DATA, &rlim) == 0
757       && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
758       && rlim.rlim_cur < limit
759       /* Darwin has this horribly bogus default setting of
760 	 RLIMIT_DATA, to 6144Kb.  No-one notices because RLIMIT_DATA
761 	 appears to be ignored.  Ignore such silliness.  If a limit
762 	 this small was actually effective for mmap, GCC wouldn't even
763 	 start up.  */
764       && rlim.rlim_cur >= 8 * 1024 * 1024)
765     limit = rlim.rlim_cur;
766 # endif /* RLIMIT_AS or RLIMIT_DATA */
767 #endif /* HAVE_GETRLIMIT */
768 
769   return limit;
770 }
771 
772 /* Heuristic to set a default for GGC_MIN_EXPAND.  */
773 static int
774 ggc_min_expand_heuristic (void)
775 {
776   double min_expand = physmem_total ();
777 
778   /* Adjust for rlimits.  */
779   min_expand = ggc_rlimit_bound (min_expand);
780 
781   /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
782      a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB).  */
783   min_expand /= 1024*1024*1024;
784   min_expand *= 70;
785   min_expand = MIN (min_expand, 70);
786   min_expand += 30;
787 
788   return min_expand;
789 }
790 
791 /* Heuristic to set a default for GGC_MIN_HEAPSIZE.  */
792 static int
793 ggc_min_heapsize_heuristic (void)
794 {
795   double phys_kbytes = physmem_total ();
796   double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2);
797 
798   phys_kbytes /= 1024; /* Convert to Kbytes.  */
799   limit_kbytes /= 1024;
800 
801   /* The heuristic is RAM/8, with a lower bound of 4M and an upper
802      bound of 128M (when RAM >= 1GB).  */
803   phys_kbytes /= 8;
804 
805 #if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
806   /* Try not to overrun the RSS limit while doing garbage collection.
807      The RSS limit is only advisory, so no margin is subtracted.  */
808  {
809    struct rlimit rlim;
810    if (getrlimit (RLIMIT_RSS, &rlim) == 0
811        && rlim.rlim_cur != (rlim_t) RLIM_INFINITY)
812      phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / 1024);
813  }
814 # endif
815 
816   /* Don't blindly run over our data limit; do GC at least when the
817      *next* GC would be within 20Mb of the limit or within a quarter of
818      the limit, whichever is larger.  If GCC does hit the data limit,
819      compilation will fail, so this tries to be conservative.  */
820   limit_kbytes = MAX (0, limit_kbytes - MAX (limit_kbytes / 4, 20 * 1024));
821   limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic ());
822   phys_kbytes = MIN (phys_kbytes, limit_kbytes);
823 
824   phys_kbytes = MAX (phys_kbytes, 4 * 1024);
825   phys_kbytes = MIN (phys_kbytes, 128 * 1024);
826 
827   return phys_kbytes;
828 }
829 #endif
830 
831 void
832 init_ggc_heuristics (void)
833 {
834 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
835   set_default_param_value (GGC_MIN_EXPAND, ggc_min_expand_heuristic ());
836   set_default_param_value (GGC_MIN_HEAPSIZE, ggc_min_heapsize_heuristic ());
837 #endif
838 }
839 
840 /* GGC memory usage.  */
841 struct ggc_usage: public mem_usage
842 {
843   /* Default constructor.  */
844   ggc_usage (): m_freed (0), m_collected (0), m_overhead (0) {}
845   /* Constructor.  */
846   ggc_usage (size_t allocated, size_t times, size_t peak,
847 	     size_t freed, size_t collected, size_t overhead)
848     : mem_usage (allocated, times, peak),
849     m_freed (freed), m_collected (collected), m_overhead (overhead) {}
850 
851   /* Equality operator.  */
852   inline bool
853   operator== (const ggc_usage &second) const
854   {
855     return (get_balance () == second.get_balance ()
856 	    && m_peak == second.m_peak
857 	    && m_times == second.m_times);
858   }
859 
860   /* Comparison operator.  */
861   inline bool
862   operator< (const ggc_usage &second) const
863   {
864     if (*this == second)
865       return false;
866 
867     return (get_balance () == second.get_balance () ?
868 	    (m_peak == second.m_peak ? m_times < second.m_times
869 	     : m_peak < second.m_peak)
870 	      : get_balance () < second.get_balance ());
871   }
872 
873   /* Register overhead of ALLOCATED and OVERHEAD bytes.  */
874   inline void
875   register_overhead (size_t allocated, size_t overhead)
876   {
877     m_allocated += allocated;
878     m_overhead += overhead;
879     m_times++;
880   }
881 
882   /* Release overhead of SIZE bytes.  */
883   inline void
884   release_overhead (size_t size)
885   {
886     m_freed += size;
887   }
888 
889   /* Sum the usage with SECOND usage.  */
890   ggc_usage
891   operator+ (const ggc_usage &second)
892   {
893     return ggc_usage (m_allocated + second.m_allocated,
894 		      m_times + second.m_times,
895 		      m_peak + second.m_peak,
896 		      m_freed + second.m_freed,
897 		      m_collected + second.m_collected,
898 		      m_overhead + second.m_overhead);
899   }
900 
901   /* Dump usage with PREFIX, where TOTAL is sum of all rows.  */
902   inline void
903   dump (const char *prefix, ggc_usage &total) const
904   {
905     size_t balance = get_balance ();
906     fprintf (stderr,
907 	     "%-48s " PRsa (9) ":%5.1f%%" PRsa (9) ":%5.1f%%"
908 	     PRsa (9) ":%5.1f%%" PRsa (9) ":%5.1f%%" PRsa (9) "\n",
909 	     prefix, SIZE_AMOUNT (m_collected),
910 	     get_percent (m_collected, total.m_collected),
911 	     SIZE_AMOUNT (m_freed), get_percent (m_freed, total.m_freed),
912 	     SIZE_AMOUNT (balance), get_percent (balance, total.get_balance ()),
913 	     SIZE_AMOUNT (m_overhead),
914 	     get_percent (m_overhead, total.m_overhead),
915 	     SIZE_AMOUNT (m_times));
916   }
917 
918   /* Dump usage coupled to LOC location, where TOTAL is sum of all rows.  */
919   inline void
920   dump (mem_location *loc, ggc_usage &total) const
921   {
922     char *location_string = loc->to_string ();
923 
924     dump (location_string, total);
925 
926     free (location_string);
927   }
928 
929   /* Dump footer.  */
930   inline void
931   dump_footer ()
932   {
933     dump ("Total", *this);
934   }
935 
936   /* Get balance which is GGC allocation leak.  */
937   inline size_t
938   get_balance () const
939   {
940     return m_allocated + m_overhead - m_collected - m_freed;
941   }
942 
943   typedef std::pair<mem_location *, ggc_usage *> mem_pair_t;
944 
945   /* Compare wrapper used by qsort method.  */
946   static int
947   compare (const void *first, const void *second)
948   {
949     const mem_pair_t f = *(const mem_pair_t *)first;
950     const mem_pair_t s = *(const mem_pair_t *)second;
951 
952     return s.second->get_balance () - f.second->get_balance ();
953   }
954 
955   /* Compare rows in final GGC summary dump.  */
956   static int
957   compare_final (const void *first, const void *second)
958   {
959     typedef std::pair<mem_location *, ggc_usage *> mem_pair_t;
960 
961     const ggc_usage *f = ((const mem_pair_t *)first)->second;
962     const ggc_usage *s = ((const mem_pair_t *)second)->second;
963 
964     size_t a = f->m_allocated + f->m_overhead - f->m_freed;
965     size_t b = s->m_allocated + s->m_overhead - s->m_freed;
966 
967     return a == b ? 0 : (a < b ? 1 : -1);
968   }
969 
970   /* Dump header with NAME.  */
971   static inline void
972   dump_header (const char *name)
973   {
974     fprintf (stderr, "%-48s %11s%17s%17s%16s%17s\n", name, "Garbage", "Freed",
975 	     "Leak", "Overhead", "Times");
976   }
977 
978   /* Freed memory in bytes.  */
979   size_t m_freed;
980   /* Collected memory in bytes.  */
981   size_t m_collected;
982   /* Overhead memory in bytes.  */
983   size_t m_overhead;
984 };
985 
986 /* GCC memory description.  */
987 static mem_alloc_description<ggc_usage> ggc_mem_desc;
988 
989 /* Dump per-site memory statistics.  */
990 
991 void
992 dump_ggc_loc_statistics (bool final)
993 {
994   if (! GATHER_STATISTICS)
995     return;
996 
997   ggc_force_collect = true;
998   ggc_collect ();
999 
1000   ggc_mem_desc.dump (GGC_ORIGIN, final ? ggc_usage::compare_final : NULL);
1001 
1002   ggc_force_collect = false;
1003 }
1004 
1005 /* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION).  */
1006 void
1007 ggc_record_overhead (size_t allocated, size_t overhead, void *ptr MEM_STAT_DECL)
1008 {
1009   ggc_usage *usage = ggc_mem_desc.register_descriptor (ptr, GGC_ORIGIN, false
1010 						       FINAL_PASS_MEM_STAT);
1011 
1012   ggc_mem_desc.register_object_overhead (usage, allocated + overhead, ptr);
1013   usage->register_overhead (allocated, overhead);
1014 }
1015 
1016 /* Notice that the pointer has been freed.  */
1017 void
1018 ggc_free_overhead (void *ptr)
1019 {
1020   ggc_mem_desc.release_object_overhead (ptr);
1021 }
1022 
1023 /* After live values has been marked, walk all recorded pointers and see if
1024    they are still live.  */
1025 void
1026 ggc_prune_overhead_list (void)
1027 {
1028   typedef hash_map<const void *, std::pair<ggc_usage *, size_t > > map_t;
1029 
1030   map_t::iterator it = ggc_mem_desc.m_reverse_object_map->begin ();
1031 
1032   for (; it != ggc_mem_desc.m_reverse_object_map->end (); ++it)
1033     if (!ggc_marked_p ((*it).first))
1034       (*it).second.first->m_collected += (*it).second.second;
1035 
1036   delete ggc_mem_desc.m_reverse_object_map;
1037   ggc_mem_desc.m_reverse_object_map = new map_t (13, false, false);
1038 }
1039