xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/ggc-common.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /* Simple garbage collection for the GNU compiler.
2    Copyright (C) 1999-2015 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 /* Generic garbage collection (GC) functions and data, not specific to
21    any particular GC implementation.  */
22 
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "hash-table.h"
27 #include "ggc.h"
28 #include "ggc-internal.h"
29 #include "diagnostic-core.h"
30 #include "params.h"
31 #include "hosthooks.h"
32 #include "hosthooks-def.h"
33 #include "plugin.h"
34 #include "vec.h"
35 #include "timevar.h"
36 
37 /* When set, ggc_collect will do collection.  */
38 bool ggc_force_collect;
39 
40 /* When true, protect the contents of the identifier hash table.  */
41 bool ggc_protect_identifiers = true;
42 
43 /* Statistics about the allocation.  */
44 static ggc_statistics *ggc_stats;
45 
46 struct traversal_state;
47 
48 static int compare_ptr_data (const void *, const void *);
49 static void relocate_ptrs (void *, void *);
50 static void write_pch_globals (const struct ggc_root_tab * const *tab,
51 			       struct traversal_state *state);
52 
53 /* Maintain global roots that are preserved during GC.  */
54 
55 /* This extra vector of dynamically registered root_tab-s is used by
56    ggc_mark_roots and gives the ability to dynamically add new GGC root
57    tables, for instance from some plugins; this vector is on the heap
58    since it is used by GGC internally.  */
59 typedef const struct ggc_root_tab *const_ggc_root_tab_t;
60 static vec<const_ggc_root_tab_t> extra_root_vec;
61 
62 /* Dynamically register a new GGC root table RT. This is useful for
63    plugins. */
64 
65 void
66 ggc_register_root_tab (const struct ggc_root_tab* rt)
67 {
68   if (rt)
69     extra_root_vec.safe_push (rt);
70 }
71 
72 /* Mark all the roots in the table RT.  */
73 
74 static void
75 ggc_mark_root_tab (const_ggc_root_tab_t rt)
76 {
77   size_t i;
78 
79   for ( ; rt->base != NULL; rt++)
80     for (i = 0; i < rt->nelt; i++)
81       (*rt->cb) (*(void **) ((char *)rt->base + rt->stride * i));
82 }
83 
84 /* Iterate through all registered roots and mark each element.  */
85 
86 void
87 ggc_mark_roots (void)
88 {
89   const struct ggc_root_tab *const *rt;
90   const_ggc_root_tab_t rtp, rti;
91   size_t i;
92 
93   for (rt = gt_ggc_deletable_rtab; *rt; rt++)
94     for (rti = *rt; rti->base != NULL; rti++)
95       memset (rti->base, 0, rti->stride);
96 
97   for (rt = gt_ggc_rtab; *rt; rt++)
98     ggc_mark_root_tab (*rt);
99 
100   FOR_EACH_VEC_ELT (extra_root_vec, i, rtp)
101     ggc_mark_root_tab (rtp);
102 
103   if (ggc_protect_identifiers)
104     ggc_mark_stringpool ();
105 
106   gt_clear_caches ();
107 
108   if (! ggc_protect_identifiers)
109     ggc_purge_stringpool ();
110 
111   /* Some plugins may call ggc_set_mark from here.  */
112   invoke_plugin_callbacks (PLUGIN_GGC_MARKING, NULL);
113 }
114 
115 /* Allocate a block of memory, then clear it.  */
116 void *
117 ggc_internal_cleared_alloc (size_t size, void (*f)(void *), size_t s, size_t n
118 			    MEM_STAT_DECL)
119 {
120   void *buf = ggc_internal_alloc (size, f, s, n PASS_MEM_STAT);
121   memset (buf, 0, size);
122   return buf;
123 }
124 
125 /* Resize a block of memory, possibly re-allocating it.  */
126 void *
127 ggc_realloc (void *x, size_t size MEM_STAT_DECL)
128 {
129   void *r;
130   size_t old_size;
131 
132   if (x == NULL)
133     return ggc_internal_alloc (size PASS_MEM_STAT);
134 
135   old_size = ggc_get_size (x);
136 
137   if (size <= old_size)
138     {
139       /* Mark the unwanted memory as unaccessible.  We also need to make
140 	 the "new" size accessible, since ggc_get_size returns the size of
141 	 the pool, not the size of the individually allocated object, the
142 	 size which was previously made accessible.  Unfortunately, we
143 	 don't know that previously allocated size.  Without that
144 	 knowledge we have to lose some initialization-tracking for the
145 	 old parts of the object.  An alternative is to mark the whole
146 	 old_size as reachable, but that would lose tracking of writes
147 	 after the end of the object (by small offsets).  Discard the
148 	 handle to avoid handle leak.  */
149       VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x + size,
150 						    old_size - size));
151       VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, size));
152       return x;
153     }
154 
155   r = ggc_internal_alloc (size PASS_MEM_STAT);
156 
157   /* Since ggc_get_size returns the size of the pool, not the size of the
158      individually allocated object, we'd access parts of the old object
159      that were marked invalid with the memcpy below.  We lose a bit of the
160      initialization-tracking since some of it may be uninitialized.  */
161   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, old_size));
162 
163   memcpy (r, x, old_size);
164 
165   /* The old object is not supposed to be used anymore.  */
166   ggc_free (x);
167 
168   return r;
169 }
170 
171 void *
172 ggc_cleared_alloc_htab_ignore_args (size_t c ATTRIBUTE_UNUSED,
173 				    size_t n ATTRIBUTE_UNUSED)
174 {
175   gcc_assert (c * n == sizeof (struct htab));
176   return ggc_cleared_alloc<htab> ();
177 }
178 
179 /* TODO: once we actually use type information in GGC, create a new tag
180    gt_gcc_ptr_array and use it for pointer arrays.  */
181 void *
182 ggc_cleared_alloc_ptr_array_two_args (size_t c, size_t n)
183 {
184   gcc_assert (sizeof (PTR *) == n);
185   return ggc_cleared_vec_alloc<PTR *> (c);
186 }
187 
188 /* These are for splay_tree_new_ggc.  */
189 void *
190 ggc_splay_alloc (int sz, void *nl)
191 {
192   gcc_assert (!nl);
193   return ggc_internal_alloc (sz);
194 }
195 
196 void
197 ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
198 {
199   gcc_assert (!nl);
200 }
201 
202 /* Print statistics that are independent of the collector in use.  */
203 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
204 		  ? (x) \
205 		  : ((x) < 1024*1024*10 \
206 		     ? (x) / 1024 \
207 		     : (x) / (1024*1024))))
208 #define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
209 
210 void
211 ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
212 			     ggc_statistics *stats)
213 {
214   /* Set the pointer so that during collection we will actually gather
215      the statistics.  */
216   ggc_stats = stats;
217 
218   /* Then do one collection to fill in the statistics.  */
219   ggc_collect ();
220 
221   /* At present, we don't really gather any interesting statistics.  */
222 
223   /* Don't gather statistics any more.  */
224   ggc_stats = NULL;
225 }
226 
227 /* Functions for saving and restoring GCable memory to disk.  */
228 
229 struct ptr_data
230 {
231   void *obj;
232   void *note_ptr_cookie;
233   gt_note_pointers note_ptr_fn;
234   gt_handle_reorder reorder_fn;
235   size_t size;
236   void *new_addr;
237 };
238 
239 #define POINTER_HASH(x) (hashval_t)((intptr_t)x >> 3)
240 
241 /* Helper for hashing saving_htab.  */
242 
243 struct saving_hasher : typed_free_remove <ptr_data>
244 {
245   typedef ptr_data value_type;
246   typedef void compare_type;
247   static inline hashval_t hash (const value_type *);
248   static inline bool equal (const value_type *, const compare_type *);
249 };
250 
251 inline hashval_t
252 saving_hasher::hash (const value_type *p)
253 {
254   return POINTER_HASH (p->obj);
255 }
256 
257 inline bool
258 saving_hasher::equal (const value_type *p1, const compare_type *p2)
259 {
260   return p1->obj == p2;
261 }
262 
263 static hash_table<saving_hasher> *saving_htab;
264 
265 /* Register an object in the hash table.  */
266 
267 int
268 gt_pch_note_object (void *obj, void *note_ptr_cookie,
269 		    gt_note_pointers note_ptr_fn)
270 {
271   struct ptr_data **slot;
272 
273   if (obj == NULL || obj == (void *) 1)
274     return 0;
275 
276   slot = (struct ptr_data **)
277     saving_htab->find_slot_with_hash (obj, POINTER_HASH (obj), INSERT);
278   if (*slot != NULL)
279     {
280       gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn
281 		  && (*slot)->note_ptr_cookie == note_ptr_cookie);
282       return 0;
283     }
284 
285   *slot = XCNEW (struct ptr_data);
286   (*slot)->obj = obj;
287   (*slot)->note_ptr_fn = note_ptr_fn;
288   (*slot)->note_ptr_cookie = note_ptr_cookie;
289   if (note_ptr_fn == gt_pch_p_S)
290     (*slot)->size = strlen ((const char *)obj) + 1;
291   else
292     (*slot)->size = ggc_get_size (obj);
293   return 1;
294 }
295 
296 /* Register an object in the hash table.  */
297 
298 void
299 gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
300 		     gt_handle_reorder reorder_fn)
301 {
302   struct ptr_data *data;
303 
304   if (obj == NULL || obj == (void *) 1)
305     return;
306 
307   data = (struct ptr_data *)
308     saving_htab->find_with_hash (obj, POINTER_HASH (obj));
309   gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
310 
311   data->reorder_fn = reorder_fn;
312 }
313 
314 /* Handy state for the traversal functions.  */
315 
316 struct traversal_state
317 {
318   FILE *f;
319   struct ggc_pch_data *d;
320   size_t count;
321   struct ptr_data **ptrs;
322   size_t ptrs_i;
323 };
324 
325 /* Callbacks for htab_traverse.  */
326 
327 int
328 ggc_call_count (ptr_data **slot, traversal_state *state)
329 {
330   struct ptr_data *d = *slot;
331 
332   ggc_pch_count_object (state->d, d->obj, d->size,
333 			d->note_ptr_fn == gt_pch_p_S);
334   state->count++;
335   return 1;
336 }
337 
338 int
339 ggc_call_alloc (ptr_data **slot, traversal_state *state)
340 {
341   struct ptr_data *d = *slot;
342 
343   d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size,
344 				      d->note_ptr_fn == gt_pch_p_S);
345   state->ptrs[state->ptrs_i++] = d;
346   return 1;
347 }
348 
349 /* Callback for qsort.  */
350 
351 static int
352 compare_ptr_data (const void *p1_p, const void *p2_p)
353 {
354   const struct ptr_data *const p1 = *(const struct ptr_data *const *)p1_p;
355   const struct ptr_data *const p2 = *(const struct ptr_data *const *)p2_p;
356   return (((size_t)p1->new_addr > (size_t)p2->new_addr)
357 	  - ((size_t)p1->new_addr < (size_t)p2->new_addr));
358 }
359 
360 /* Callbacks for note_ptr_fn.  */
361 
362 static void
363 relocate_ptrs (void *ptr_p, void *state_p)
364 {
365   void **ptr = (void **)ptr_p;
366   struct traversal_state *state ATTRIBUTE_UNUSED
367     = (struct traversal_state *)state_p;
368   struct ptr_data *result;
369 
370   if (*ptr == NULL || *ptr == (void *)1)
371     return;
372 
373   result = (struct ptr_data *)
374     saving_htab->find_with_hash (*ptr, POINTER_HASH (*ptr));
375   gcc_assert (result);
376   *ptr = result->new_addr;
377 }
378 
379 /* Write out, after relocation, the pointers in TAB.  */
380 static void
381 write_pch_globals (const struct ggc_root_tab * const *tab,
382 		   struct traversal_state *state)
383 {
384   const struct ggc_root_tab *const *rt;
385   const struct ggc_root_tab *rti;
386   size_t i;
387 
388   for (rt = tab; *rt; rt++)
389     for (rti = *rt; rti->base != NULL; rti++)
390       for (i = 0; i < rti->nelt; i++)
391 	{
392 	  void *ptr = *(void **)((char *)rti->base + rti->stride * i);
393 	  struct ptr_data *new_ptr;
394 	  if (ptr == NULL || ptr == (void *)1)
395 	    {
396 	      if (fwrite (&ptr, sizeof (void *), 1, state->f)
397 		  != 1)
398 		fatal_error (input_location, "can%'t write PCH file: %m");
399 	    }
400 	  else
401 	    {
402 	      new_ptr = (struct ptr_data *)
403 		saving_htab->find_with_hash (ptr, POINTER_HASH (ptr));
404 	      if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
405 		  != 1)
406 		fatal_error (input_location, "can%'t write PCH file: %m");
407 	    }
408 	}
409 }
410 
411 /* Hold the information we need to mmap the file back in.  */
412 
413 struct mmap_info
414 {
415   size_t offset;
416   size_t size;
417   void *preferred_base;
418 };
419 
420 /* Write out the state of the compiler to F.  */
421 
422 void
423 gt_pch_save (FILE *f)
424 {
425   const struct ggc_root_tab *const *rt;
426   const struct ggc_root_tab *rti;
427   size_t i;
428   struct traversal_state state;
429   char *this_object = NULL;
430   size_t this_object_size = 0;
431   struct mmap_info mmi;
432   const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity ();
433 
434   gt_pch_save_stringpool ();
435 
436   timevar_push (TV_PCH_PTR_REALLOC);
437   saving_htab = new hash_table<saving_hasher> (50000);
438 
439   for (rt = gt_ggc_rtab; *rt; rt++)
440     for (rti = *rt; rti->base != NULL; rti++)
441       for (i = 0; i < rti->nelt; i++)
442 	(*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
443 
444   /* Prepare the objects for writing, determine addresses and such.  */
445   state.f = f;
446   state.d = init_ggc_pch ();
447   state.count = 0;
448   saving_htab->traverse <traversal_state *, ggc_call_count> (&state);
449 
450   mmi.size = ggc_pch_total_size (state.d);
451 
452   /* Try to arrange things so that no relocation is necessary, but
453      don't try very hard.  On most platforms, this will always work,
454      and on the rest it's a lot of work to do better.
455      (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
456      HOST_HOOKS_GT_PCH_USE_ADDRESS.)  */
457   mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
458 
459   ggc_pch_this_base (state.d, mmi.preferred_base);
460 
461   state.ptrs = XNEWVEC (struct ptr_data *, state.count);
462   state.ptrs_i = 0;
463 
464   saving_htab->traverse <traversal_state *, ggc_call_alloc> (&state);
465   timevar_pop (TV_PCH_PTR_REALLOC);
466 
467   timevar_push (TV_PCH_PTR_SORT);
468   qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
469   timevar_pop (TV_PCH_PTR_SORT);
470 
471   /* Write out all the scalar variables.  */
472   for (rt = gt_pch_scalar_rtab; *rt; rt++)
473     for (rti = *rt; rti->base != NULL; rti++)
474       if (fwrite (rti->base, rti->stride, 1, f) != 1)
475 	fatal_error (input_location, "can%'t write PCH file: %m");
476 
477   /* Write out all the global pointers, after translation.  */
478   write_pch_globals (gt_ggc_rtab, &state);
479 
480   /* Pad the PCH file so that the mmapped area starts on an allocation
481      granularity (usually page) boundary.  */
482   {
483     long o;
484     o = ftell (state.f) + sizeof (mmi);
485     if (o == -1)
486       fatal_error (input_location, "can%'t get position in PCH file: %m");
487     mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment;
488     if (mmi.offset == mmap_offset_alignment)
489       mmi.offset = 0;
490     mmi.offset += o;
491   }
492   if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
493     fatal_error (input_location, "can%'t write PCH file: %m");
494   if (mmi.offset != 0
495       && fseek (state.f, mmi.offset, SEEK_SET) != 0)
496     fatal_error (input_location, "can%'t write padding to PCH file: %m");
497 
498   ggc_pch_prepare_write (state.d, state.f);
499 
500 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
501   vec<char> vbits = vNULL;
502 #endif
503 
504   /* Actually write out the objects.  */
505   for (i = 0; i < state.count; i++)
506     {
507       if (this_object_size < state.ptrs[i]->size)
508 	{
509 	  this_object_size = state.ptrs[i]->size;
510 	  this_object = XRESIZEVAR (char, this_object, this_object_size);
511 	}
512 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
513       /* obj might contain uninitialized bytes, e.g. in the trailing
514 	 padding of the object.  Avoid warnings by making the memory
515 	 temporarily defined and then restoring previous state.  */
516       int get_vbits = 0;
517       size_t valid_size = state.ptrs[i]->size;
518       if (__builtin_expect (RUNNING_ON_VALGRIND, 0))
519 	{
520 	  if (vbits.length () < valid_size)
521 	    vbits.safe_grow (valid_size);
522 	  get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
523 					  vbits.address (), valid_size);
524 	  if (get_vbits == 3)
525 	    {
526 	      /* We assume that first part of obj is addressable, and
527 		 the rest is unaddressable.  Find out where the boundary is
528 		 using binary search.  */
529 	      size_t lo = 0, hi = valid_size;
530 	      while (hi > lo)
531 		{
532 		  size_t mid = (lo + hi) / 2;
533 		  get_vbits = VALGRIND_GET_VBITS ((char *) state.ptrs[i]->obj
534 						  + mid, vbits.address (),
535 						  1);
536 		  if (get_vbits == 3)
537 		    hi = mid;
538 		  else if (get_vbits == 1)
539 		    lo = mid + 1;
540 		  else
541 		    break;
542 		}
543 	      if (get_vbits == 1 || get_vbits == 3)
544 		{
545 		  valid_size = lo;
546 		  get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
547 						  vbits.address (),
548 						  valid_size);
549 		}
550 	    }
551 	  if (get_vbits == 1)
552 	    VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (state.ptrs[i]->obj,
553 							 state.ptrs[i]->size));
554 	}
555 #endif
556       memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
557       if (state.ptrs[i]->reorder_fn != NULL)
558 	state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
559 				   state.ptrs[i]->note_ptr_cookie,
560 				   relocate_ptrs, &state);
561       state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
562 				  state.ptrs[i]->note_ptr_cookie,
563 				  relocate_ptrs, &state);
564       ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
565 			    state.ptrs[i]->new_addr, state.ptrs[i]->size,
566 			    state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
567       if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
568 	memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
569 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
570       if (__builtin_expect (get_vbits == 1, 0))
571 	{
572 	  (void) VALGRIND_SET_VBITS (state.ptrs[i]->obj, vbits.address (),
573 				     valid_size);
574 	  if (valid_size != state.ptrs[i]->size)
575 	    VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *)
576 							  state.ptrs[i]->obj
577 							  + valid_size,
578 							  state.ptrs[i]->size
579 							  - valid_size));
580 	}
581 #endif
582     }
583 #if defined ENABLE_VALGRIND_CHECKING && defined VALGRIND_GET_VBITS
584   vbits.release ();
585 #endif
586 
587   ggc_pch_finish (state.d, state.f);
588   gt_pch_fixup_stringpool ();
589 
590   XDELETE (state.ptrs);
591   XDELETE (this_object);
592   delete saving_htab;
593   saving_htab = NULL;
594 }
595 
596 /* Read the state of the compiler back in from F.  */
597 
598 void
599 gt_pch_restore (FILE *f)
600 {
601   const struct ggc_root_tab *const *rt;
602   const struct ggc_root_tab *rti;
603   size_t i;
604   struct mmap_info mmi;
605   int result;
606   struct line_maps * old_line_table = line_table;
607   location_t old_input_loc = input_location;
608 
609   /* Delete any deletable objects.  This makes ggc_pch_read much
610      faster, as it can be sure that no GCable objects remain other
611      than the ones just read in.  */
612   for (rt = gt_ggc_deletable_rtab; *rt; rt++)
613     for (rti = *rt; rti->base != NULL; rti++)
614       memset (rti->base, 0, rti->stride);
615 
616   /* Read in all the scalar variables.  */
617   for (rt = gt_pch_scalar_rtab; *rt; rt++)
618     for (rti = *rt; rti->base != NULL; rti++)
619       if (fread (rti->base, rti->stride, 1, f) != 1) {
620         line_table = old_line_table;
621 	input_location = old_input_loc;
622 	fatal_error (input_location, "can%'t read PCH file: %m");
623       }
624 
625   /* Read in all the global pointers, in 6 easy loops.  */
626   for (rt = gt_ggc_rtab; *rt; rt++)
627     for (rti = *rt; rti->base != NULL; rti++)
628       for (i = 0; i < rti->nelt; i++)
629 	if (fread ((char *)rti->base + rti->stride * i,
630 		   sizeof (void *), 1, f) != 1) {
631           line_table = old_line_table;
632 	  input_location = old_input_loc;
633 	  fatal_error (input_location, "can%'t read PCH file: %m");
634         }
635 
636   if (fread (&mmi, sizeof (mmi), 1, f) != 1) {
637     line_table = old_line_table;
638     input_location = old_input_loc;
639     fatal_error (input_location, "can%'t read PCH file: %m");
640   }
641 
642   result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size,
643 					  fileno (f), mmi.offset);
644   if (result < 0) {
645     line_table = old_line_table;
646     input_location = old_input_loc;
647     fatal_error (input_location, "had to relocate PCH");
648   }
649   if (result == 0)
650     {
651       if (fseek (f, mmi.offset, SEEK_SET) != 0
652 	  || fread (mmi.preferred_base, mmi.size, 1, f) != 1) {
653         line_table = old_line_table;
654         input_location = old_input_loc;
655 	fatal_error (input_location, "can%'t read PCH file: %m");
656       }
657     }
658   else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0) {
659     line_table = old_line_table;
660     input_location = old_input_loc;
661     fatal_error (input_location, "can%'t read PCH file: %m");
662   }
663 
664   ggc_pch_read (f, mmi.preferred_base);
665 
666   gt_pch_restore_stringpool ();
667 }
668 
669 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
670    Select no address whatsoever, and let gt_pch_save choose what it will with
671    malloc, presumably.  */
672 
673 void *
674 default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED,
675 			    int fd ATTRIBUTE_UNUSED)
676 {
677   return NULL;
678 }
679 
680 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
681    Allocate SIZE bytes with malloc.  Return 0 if the address we got is the
682    same as base, indicating that the memory has been allocated but needs to
683    be read in from the file.  Return -1 if the address differs, to relocation
684    of the PCH file would be required.  */
685 
686 int
687 default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED,
688 			    size_t offset ATTRIBUTE_UNUSED)
689 {
690   void *addr = xmalloc (size);
691   return (addr == base) - 1;
692 }
693 
694 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS.   Return the
695    alignment required for allocating virtual memory. Usually this is the
696    same as pagesize.  */
697 
698 size_t
699 default_gt_pch_alloc_granularity (void)
700 {
701   return getpagesize ();
702 }
703 
704 #if HAVE_MMAP_FILE
705 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
706    We temporarily allocate SIZE bytes, and let the kernel place the data
707    wherever it will.  If it worked, that's our spot, if not we're likely
708    to be in trouble.  */
709 
710 void *
711 mmap_gt_pch_get_address (size_t size, int fd)
712 {
713   void *ret;
714 
715   ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
716   if (ret == (void *) MAP_FAILED)
717     ret = NULL;
718   else
719     munmap ((caddr_t) ret, size);
720 
721   return ret;
722 }
723 
724 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
725    Map SIZE bytes of FD+OFFSET at BASE.  Return 1 if we succeeded at
726    mapping the data at BASE, -1 if we couldn't.
727 
728    This version assumes that the kernel honors the START operand of mmap
729    even without MAP_FIXED if START through START+SIZE are not currently
730    mapped with something.  */
731 
732 int
733 mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
734 {
735   void *addr;
736 
737   /* We're called with size == 0 if we're not planning to load a PCH
738      file at all.  This allows the hook to free any static space that
739      we might have allocated at link time.  */
740   if (size == 0)
741     return -1;
742 
743   addr = mmap ((caddr_t) base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
744 	       fd, offset);
745 
746   return addr == base ? 1 : -1;
747 }
748 #endif /* HAVE_MMAP_FILE */
749 
750 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
751 
752 /* Modify the bound based on rlimits.  */
753 static double
754 ggc_rlimit_bound (double limit)
755 {
756 #if defined(HAVE_GETRLIMIT)
757   struct rlimit rlim;
758 # if defined (RLIMIT_AS)
759   /* RLIMIT_AS is what POSIX says is the limit on mmap.  Presumably
760      any OS which has RLIMIT_AS also has a working mmap that GCC will use.  */
761   if (getrlimit (RLIMIT_AS, &rlim) == 0
762       && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
763       && rlim.rlim_cur < limit)
764     limit = rlim.rlim_cur;
765 # elif defined (RLIMIT_DATA)
766   /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we
767      might be on an OS that has a broken mmap.  (Others don't bound
768      mmap at all, apparently.)  */
769   if (getrlimit (RLIMIT_DATA, &rlim) == 0
770       && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
771       && rlim.rlim_cur < limit
772       /* Darwin has this horribly bogus default setting of
773 	 RLIMIT_DATA, to 6144Kb.  No-one notices because RLIMIT_DATA
774 	 appears to be ignored.  Ignore such silliness.  If a limit
775 	 this small was actually effective for mmap, GCC wouldn't even
776 	 start up.  */
777       && rlim.rlim_cur >= 8 * 1024 * 1024)
778     limit = rlim.rlim_cur;
779 # endif /* RLIMIT_AS or RLIMIT_DATA */
780 #endif /* HAVE_GETRLIMIT */
781 
782   return limit;
783 }
784 
785 /* Heuristic to set a default for GGC_MIN_EXPAND.  */
786 static int
787 ggc_min_expand_heuristic (void)
788 {
789   double min_expand = physmem_total ();
790 
791   /* Adjust for rlimits.  */
792   min_expand = ggc_rlimit_bound (min_expand);
793 
794   /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
795      a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB).  */
796   min_expand /= 1024*1024*1024;
797   min_expand *= 70;
798   min_expand = MIN (min_expand, 70);
799   min_expand += 30;
800 
801   return min_expand;
802 }
803 
804 /* Heuristic to set a default for GGC_MIN_HEAPSIZE.  */
805 static int
806 ggc_min_heapsize_heuristic (void)
807 {
808   double phys_kbytes = physmem_total ();
809   double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2);
810 
811   phys_kbytes /= 1024; /* Convert to Kbytes.  */
812   limit_kbytes /= 1024;
813 
814   /* The heuristic is RAM/8, with a lower bound of 4M and an upper
815      bound of 128M (when RAM >= 1GB).  */
816   phys_kbytes /= 8;
817 
818 #if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
819   /* Try not to overrun the RSS limit while doing garbage collection.
820      The RSS limit is only advisory, so no margin is subtracted.  */
821  {
822    struct rlimit rlim;
823    if (getrlimit (RLIMIT_RSS, &rlim) == 0
824        && rlim.rlim_cur != (rlim_t) RLIM_INFINITY)
825      phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / 1024);
826  }
827 # endif
828 
829   /* Don't blindly run over our data limit; do GC at least when the
830      *next* GC would be within 20Mb of the limit or within a quarter of
831      the limit, whichever is larger.  If GCC does hit the data limit,
832      compilation will fail, so this tries to be conservative.  */
833   limit_kbytes = MAX (0, limit_kbytes - MAX (limit_kbytes / 4, 20 * 1024));
834   limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic ());
835   phys_kbytes = MIN (phys_kbytes, limit_kbytes);
836 
837   phys_kbytes = MAX (phys_kbytes, 4 * 1024);
838   phys_kbytes = MIN (phys_kbytes, 128 * 1024);
839 
840   return phys_kbytes;
841 }
842 #endif
843 
844 void
845 init_ggc_heuristics (void)
846 {
847 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
848   set_default_param_value (GGC_MIN_EXPAND, ggc_min_expand_heuristic ());
849   set_default_param_value (GGC_MIN_HEAPSIZE, ggc_min_heapsize_heuristic ());
850 #endif
851 }
852 
853 /* Datastructure used to store per-call-site statistics.  */
854 struct ggc_loc_descriptor
855 {
856   const char *file;
857   int line;
858   const char *function;
859   int times;
860   size_t allocated;
861   size_t overhead;
862   size_t freed;
863   size_t collected;
864 };
865 
866 /* Hash table helper.  */
867 
868 struct ggc_loc_desc_hasher : typed_noop_remove <ggc_loc_descriptor>
869 {
870   typedef ggc_loc_descriptor value_type;
871   typedef ggc_loc_descriptor compare_type;
872   static inline hashval_t hash (const value_type *);
873   static inline bool equal (const value_type *, const compare_type *);
874 };
875 
876 inline hashval_t
877 ggc_loc_desc_hasher::hash (const value_type *d)
878 {
879   return htab_hash_pointer (d->function) | d->line;
880 }
881 
882 inline bool
883 ggc_loc_desc_hasher::equal (const value_type *d, const compare_type *d2)
884 {
885   return (d->file == d2->file && d->line == d2->line
886 	  && d->function == d2->function);
887 }
888 
889 /* Hashtable used for statistics.  */
890 static hash_table<ggc_loc_desc_hasher> *loc_hash;
891 
892 struct ggc_ptr_hash_entry
893 {
894   void *ptr;
895   struct ggc_loc_descriptor *loc;
896   size_t size;
897 };
898 
899 /* Helper for ptr_hash table.  */
900 
901 struct ptr_hash_hasher : typed_noop_remove <ggc_ptr_hash_entry>
902 {
903   typedef ggc_ptr_hash_entry value_type;
904   typedef void compare_type;
905   static inline hashval_t hash (const value_type *);
906   static inline bool equal (const value_type *, const compare_type *);
907 };
908 
909 inline hashval_t
910 ptr_hash_hasher::hash (const value_type *d)
911 {
912   return htab_hash_pointer (d->ptr);
913 }
914 
915 inline bool
916 ptr_hash_hasher::equal (const value_type *p, const compare_type *p2)
917 {
918   return (p->ptr == p2);
919 }
920 
921 /* Hashtable converting address of allocated field to loc descriptor.  */
922 static hash_table<ptr_hash_hasher> *ptr_hash;
923 
924 /* Return descriptor for given call site, create new one if needed.  */
925 static struct ggc_loc_descriptor *
926 make_loc_descriptor (const char *name, int line, const char *function)
927 {
928   struct ggc_loc_descriptor loc;
929   struct ggc_loc_descriptor **slot;
930 
931   loc.file = name;
932   loc.line = line;
933   loc.function = function;
934   if (!loc_hash)
935     loc_hash = new hash_table<ggc_loc_desc_hasher> (10);
936 
937   slot = loc_hash->find_slot (&loc, INSERT);
938   if (*slot)
939     return *slot;
940   *slot = XCNEW (struct ggc_loc_descriptor);
941   (*slot)->file = name;
942   (*slot)->line = line;
943   (*slot)->function = function;
944   return *slot;
945 }
946 
947 /* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION).  */
948 void
949 ggc_record_overhead (size_t allocated, size_t overhead, void *ptr,
950 		     const char *name, int line, const char *function)
951 {
952   struct ggc_loc_descriptor *loc = make_loc_descriptor (name, line, function);
953   struct ggc_ptr_hash_entry *p = XNEW (struct ggc_ptr_hash_entry);
954   ggc_ptr_hash_entry **slot;
955 
956   p->ptr = ptr;
957   p->loc = loc;
958   p->size = allocated + overhead;
959   if (!ptr_hash)
960     ptr_hash = new hash_table<ptr_hash_hasher> (10);
961   slot = ptr_hash->find_slot_with_hash (ptr, htab_hash_pointer (ptr), INSERT);
962   gcc_assert (!*slot);
963   *slot = p;
964 
965   loc->times++;
966   loc->allocated+=allocated;
967   loc->overhead+=overhead;
968 }
969 
970 /* Helper function for prune_overhead_list.  See if SLOT is still marked and
971    remove it from hashtable if it is not.  */
972 int
973 ggc_prune_ptr (ggc_ptr_hash_entry **slot, void *b ATTRIBUTE_UNUSED)
974 {
975   struct ggc_ptr_hash_entry *p = *slot;
976   if (!ggc_marked_p (p->ptr))
977     {
978       p->loc->collected += p->size;
979       ptr_hash->clear_slot (slot);
980       free (p);
981     }
982   return 1;
983 }
984 
985 /* After live values has been marked, walk all recorded pointers and see if
986    they are still live.  */
987 void
988 ggc_prune_overhead_list (void)
989 {
990   ptr_hash->traverse <void *, ggc_prune_ptr> (NULL);
991 }
992 
993 /* Notice that the pointer has been freed.  */
994 void
995 ggc_free_overhead (void *ptr)
996 {
997   ggc_ptr_hash_entry **slot
998     = ptr_hash->find_slot_with_hash (ptr, htab_hash_pointer (ptr), NO_INSERT);
999   struct ggc_ptr_hash_entry *p;
1000   /* The pointer might be not found if a PCH read happened between allocation
1001      and ggc_free () call.  FIXME: account memory properly in the presence of
1002      PCH. */
1003   if (!slot)
1004       return;
1005   p = (struct ggc_ptr_hash_entry *) *slot;
1006   p->loc->freed += p->size;
1007   ptr_hash->clear_slot (slot);
1008   free (p);
1009 }
1010 
1011 /* Helper for qsort; sort descriptors by amount of memory consumed.  */
1012 static int
1013 final_cmp_statistic (const void *loc1, const void *loc2)
1014 {
1015   const struct ggc_loc_descriptor *const l1 =
1016     *(const struct ggc_loc_descriptor *const *) loc1;
1017   const struct ggc_loc_descriptor *const l2 =
1018     *(const struct ggc_loc_descriptor *const *) loc2;
1019   long diff;
1020   diff = ((long)(l1->allocated + l1->overhead - l1->freed) -
1021 	  (l2->allocated + l2->overhead - l2->freed));
1022   return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1023 }
1024 
1025 /* Helper for qsort; sort descriptors by amount of memory consumed.  */
1026 static int
1027 cmp_statistic (const void *loc1, const void *loc2)
1028 {
1029   const struct ggc_loc_descriptor *const l1 =
1030     *(const struct ggc_loc_descriptor *const *) loc1;
1031   const struct ggc_loc_descriptor *const l2 =
1032     *(const struct ggc_loc_descriptor *const *) loc2;
1033   long diff;
1034 
1035   diff = ((long)(l1->allocated + l1->overhead - l1->freed - l1->collected) -
1036 	  (l2->allocated + l2->overhead - l2->freed - l2->collected));
1037   if (diff)
1038     return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1039   diff =  ((long)(l1->allocated + l1->overhead - l1->freed) -
1040 	   (l2->allocated + l2->overhead - l2->freed));
1041   return diff > 0 ? 1 : diff < 0 ? -1 : 0;
1042 }
1043 
1044 /* Collect array of the descriptors from hashtable.  */
1045 static struct ggc_loc_descriptor **loc_array;
1046 int
1047 ggc_add_statistics (ggc_loc_descriptor **slot, int *n)
1048 {
1049   loc_array[*n] = *slot;
1050   (*n)++;
1051   return 1;
1052 }
1053 
1054 /* Dump per-site memory statistics.  */
1055 
1056 void
1057 dump_ggc_loc_statistics (bool final)
1058 {
1059   int nentries = 0;
1060   char s[4096];
1061   size_t collected = 0, freed = 0, allocated = 0, overhead = 0, times = 0;
1062   int i;
1063 
1064   if (! GATHER_STATISTICS)
1065     return;
1066 
1067   ggc_force_collect = true;
1068   ggc_collect ();
1069 
1070   loc_array = XCNEWVEC (struct ggc_loc_descriptor *,
1071 			loc_hash->elements_with_deleted ());
1072   fprintf (stderr, "-------------------------------------------------------\n");
1073   fprintf (stderr, "\n%-48s %10s       %10s       %10s       %10s       %10s\n",
1074 	   "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
1075   fprintf (stderr, "-------------------------------------------------------\n");
1076   loc_hash->traverse <int *, ggc_add_statistics> (&nentries);
1077   qsort (loc_array, nentries, sizeof (*loc_array),
1078 	 final ? final_cmp_statistic : cmp_statistic);
1079   for (i = 0; i < nentries; i++)
1080     {
1081       struct ggc_loc_descriptor *d = loc_array[i];
1082       allocated += d->allocated;
1083       times += d->times;
1084       freed += d->freed;
1085       collected += d->collected;
1086       overhead += d->overhead;
1087     }
1088   for (i = 0; i < nentries; i++)
1089     {
1090       struct ggc_loc_descriptor *d = loc_array[i];
1091       if (d->allocated)
1092 	{
1093 	  const char *s1 = d->file;
1094 	  const char *s2;
1095 	  while ((s2 = strstr (s1, "gcc/")))
1096 	    s1 = s2 + 4;
1097 	  sprintf (s, "%s:%i (%s)", s1, d->line, d->function);
1098 	  s[48] = 0;
1099 	  fprintf (stderr, "%-48s %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li:%4.1f%% %10li\n", s,
1100 		   (long)d->collected,
1101 		   (d->collected) * 100.0 / collected,
1102 		   (long)d->freed,
1103 		   (d->freed) * 100.0 / freed,
1104 		   (long)(d->allocated + d->overhead - d->freed - d->collected),
1105 		   (d->allocated + d->overhead - d->freed - d->collected) * 100.0
1106 		   / (allocated + overhead - freed - collected),
1107 		   (long)d->overhead,
1108 		   d->overhead * 100.0 / overhead,
1109 		   (long)d->times);
1110 	}
1111     }
1112   fprintf (stderr, "%-48s %10ld       %10ld       %10ld       %10ld       %10ld\n",
1113 	   "Total", (long)collected, (long)freed,
1114 	   (long)(allocated + overhead - freed - collected), (long)overhead,
1115 	   (long)times);
1116   fprintf (stderr, "%-48s %10s       %10s       %10s       %10s       %10s\n",
1117 	   "source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
1118   fprintf (stderr, "-------------------------------------------------------\n");
1119   ggc_force_collect = false;
1120 }
1121