xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/ggc-common.c (revision 8feb0f0b7eaff0608f8350bbfa3098827b4bb91b)
1 /* Simple garbage collection for the GNU compiler.
2    Copyright (C) 1999-2020 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 /* Generic garbage collection (GC) functions and data, not specific to
21    any particular GC implementation.  */
22 
23 #include "config.h"
24 #define INCLUDE_MALLOC_H
25 #include "system.h"
26 #include "coretypes.h"
27 #include "timevar.h"
28 #include "diagnostic-core.h"
29 #include "ggc-internal.h"
30 #include "hosthooks.h"
31 #include "plugin.h"
32 #include "options.h"
33 
34 /* When set, ggc_collect will do collection.  */
35 bool ggc_force_collect;
36 
37 /* When true, protect the contents of the identifier hash table.  */
38 bool ggc_protect_identifiers = true;
39 
40 /* Statistics about the allocation.  */
41 static ggc_statistics *ggc_stats;
42 
43 struct traversal_state;
44 
45 static int compare_ptr_data (const void *, const void *);
46 static void relocate_ptrs (void *, void *);
47 static void write_pch_globals (const struct ggc_root_tab * const *tab,
48 			       struct traversal_state *state);
49 
50 /* Maintain global roots that are preserved during GC.  */
51 
52 /* This extra vector of dynamically registered root_tab-s is used by
53    ggc_mark_roots and gives the ability to dynamically add new GGC root
54    tables, for instance from some plugins; this vector is on the heap
55    since it is used by GGC internally.  */
56 typedef const struct ggc_root_tab *const_ggc_root_tab_t;
57 static vec<const_ggc_root_tab_t> extra_root_vec;
58 
59 /* Dynamically register a new GGC root table RT. This is useful for
60    plugins. */
61 
62 void
ggc_register_root_tab(const struct ggc_root_tab * rt)63 ggc_register_root_tab (const struct ggc_root_tab* rt)
64 {
65   if (rt)
66     extra_root_vec.safe_push (rt);
67 }
68 
69 /* Mark all the roots in the table RT.  */
70 
71 static void
ggc_mark_root_tab(const_ggc_root_tab_t rt)72 ggc_mark_root_tab (const_ggc_root_tab_t rt)
73 {
74   size_t i;
75 
76   for ( ; rt->base != NULL; rt++)
77     for (i = 0; i < rt->nelt; i++)
78       (*rt->cb) (*(void **) ((char *)rt->base + rt->stride * i));
79 }
80 
81 /* Iterate through all registered roots and mark each element.  */
82 
83 void
ggc_mark_roots(void)84 ggc_mark_roots (void)
85 {
86   const struct ggc_root_tab *const *rt;
87   const_ggc_root_tab_t rtp, rti;
88   size_t i;
89 
90   for (rt = gt_ggc_deletable_rtab; *rt; rt++)
91     for (rti = *rt; rti->base != NULL; rti++)
92       memset (rti->base, 0, rti->stride);
93 
94   for (rt = gt_ggc_rtab; *rt; rt++)
95     ggc_mark_root_tab (*rt);
96 
97   FOR_EACH_VEC_ELT (extra_root_vec, i, rtp)
98     ggc_mark_root_tab (rtp);
99 
100   if (ggc_protect_identifiers)
101     ggc_mark_stringpool ();
102 
103   gt_clear_caches ();
104 
105   if (! ggc_protect_identifiers)
106     ggc_purge_stringpool ();
107 
108   /* Some plugins may call ggc_set_mark from here.  */
109   invoke_plugin_callbacks (PLUGIN_GGC_MARKING, NULL);
110 }
111 
112 /* Allocate a block of memory, then clear it.  */
113 void *
ggc_internal_cleared_alloc(size_t size,void (* f)(void *),size_t s,size_t n MEM_STAT_DECL)114 ggc_internal_cleared_alloc (size_t size, void (*f)(void *), size_t s, size_t n
115 			    MEM_STAT_DECL)
116 {
117   void *buf = ggc_internal_alloc (size, f, s, n PASS_MEM_STAT);
118   memset (buf, 0, size);
119   return buf;
120 }
121 
122 /* Resize a block of memory, possibly re-allocating it.  */
123 void *
ggc_realloc(void * x,size_t size MEM_STAT_DECL)124 ggc_realloc (void *x, size_t size MEM_STAT_DECL)
125 {
126   void *r;
127   size_t old_size;
128 
129   if (x == NULL)
130     return ggc_internal_alloc (size PASS_MEM_STAT);
131 
132   old_size = ggc_get_size (x);
133 
134   if (size <= old_size)
135     {
136       /* Mark the unwanted memory as unaccessible.  We also need to make
137 	 the "new" size accessible, since ggc_get_size returns the size of
138 	 the pool, not the size of the individually allocated object, the
139 	 size which was previously made accessible.  Unfortunately, we
140 	 don't know that previously allocated size.  Without that
141 	 knowledge we have to lose some initialization-tracking for the
142 	 old parts of the object.  An alternative is to mark the whole
143 	 old_size as reachable, but that would lose tracking of writes
144 	 after the end of the object (by small offsets).  Discard the
145 	 handle to avoid handle leak.  */
146       VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x + size,
147 						    old_size - size));
148       VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, size));
149       return x;
150     }
151 
152   r = ggc_internal_alloc (size PASS_MEM_STAT);
153 
154   /* Since ggc_get_size returns the size of the pool, not the size of the
155      individually allocated object, we'd access parts of the old object
156      that were marked invalid with the memcpy below.  We lose a bit of the
157      initialization-tracking since some of it may be uninitialized.  */
158   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, old_size));
159 
160   memcpy (r, x, old_size);
161 
162   /* The old object is not supposed to be used anymore.  */
163   ggc_free (x);
164 
165   return r;
166 }
167 
168 void *
ggc_cleared_alloc_htab_ignore_args(size_t c ATTRIBUTE_UNUSED,size_t n ATTRIBUTE_UNUSED)169 ggc_cleared_alloc_htab_ignore_args (size_t c ATTRIBUTE_UNUSED,
170 				    size_t n ATTRIBUTE_UNUSED)
171 {
172   gcc_assert (c * n == sizeof (struct htab));
173   return ggc_cleared_alloc<htab> ();
174 }
175 
176 /* TODO: once we actually use type information in GGC, create a new tag
177    gt_gcc_ptr_array and use it for pointer arrays.  */
178 void *
ggc_cleared_alloc_ptr_array_two_args(size_t c,size_t n)179 ggc_cleared_alloc_ptr_array_two_args (size_t c, size_t n)
180 {
181   gcc_assert (sizeof (PTR *) == n);
182   return ggc_cleared_vec_alloc<PTR *> (c);
183 }
184 
185 /* These are for splay_tree_new_ggc.  */
186 void *
ggc_splay_alloc(int sz,void * nl)187 ggc_splay_alloc (int sz, void *nl)
188 {
189   gcc_assert (!nl);
190   return ggc_internal_alloc (sz);
191 }
192 
193 void
ggc_splay_dont_free(void * x ATTRIBUTE_UNUSED,void * nl)194 ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
195 {
196   gcc_assert (!nl);
197 }
198 
199 void
ggc_print_common_statistics(FILE * stream ATTRIBUTE_UNUSED,ggc_statistics * stats)200 ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
201 			     ggc_statistics *stats)
202 {
203   /* Set the pointer so that during collection we will actually gather
204      the statistics.  */
205   ggc_stats = stats;
206 
207   /* Then do one collection to fill in the statistics.  */
208   ggc_collect ();
209 
210   /* At present, we don't really gather any interesting statistics.  */
211 
212   /* Don't gather statistics any more.  */
213   ggc_stats = NULL;
214 }
215 
216 /* Functions for saving and restoring GCable memory to disk.  */
217 
218 struct ptr_data
219 {
220   void *obj;
221   void *note_ptr_cookie;
222   gt_note_pointers note_ptr_fn;
223   gt_handle_reorder reorder_fn;
224   size_t size;
225   void *new_addr;
226 };
227 
228 #define POINTER_HASH(x) (hashval_t)((intptr_t)x >> 3)
229 
230 /* Helper for hashing saving_htab.  */
231 
232 struct saving_hasher : free_ptr_hash <ptr_data>
233 {
234   typedef void *compare_type;
235   static inline hashval_t hash (const ptr_data *);
236   static inline bool equal (const ptr_data *, const void *);
237 };
238 
239 inline hashval_t
hash(const ptr_data * p)240 saving_hasher::hash (const ptr_data *p)
241 {
242   return POINTER_HASH (p->obj);
243 }
244 
245 inline bool
equal(const ptr_data * p1,const void * p2)246 saving_hasher::equal (const ptr_data *p1, const void *p2)
247 {
248   return p1->obj == p2;
249 }
250 
251 static hash_table<saving_hasher> *saving_htab;
252 
253 /* Register an object in the hash table.  */
254 
255 int
gt_pch_note_object(void * obj,void * note_ptr_cookie,gt_note_pointers note_ptr_fn)256 gt_pch_note_object (void *obj, void *note_ptr_cookie,
257 		    gt_note_pointers note_ptr_fn)
258 {
259   struct ptr_data **slot;
260 
261   if (obj == NULL || obj == (void *) 1)
262     return 0;
263 
264   slot = (struct ptr_data **)
265     saving_htab->find_slot_with_hash (obj, POINTER_HASH (obj), INSERT);
266   if (*slot != NULL)
267     {
268       gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn
269 		  && (*slot)->note_ptr_cookie == note_ptr_cookie);
270       return 0;
271     }
272 
273   *slot = XCNEW (struct ptr_data);
274   (*slot)->obj = obj;
275   (*slot)->note_ptr_fn = note_ptr_fn;
276   (*slot)->note_ptr_cookie = note_ptr_cookie;
277   if (note_ptr_fn == gt_pch_p_S)
278     (*slot)->size = strlen ((const char *)obj) + 1;
279   else
280     (*slot)->size = ggc_get_size (obj);
281   return 1;
282 }
283 
284 /* Register an object in the hash table.  */
285 
286 void
gt_pch_note_reorder(void * obj,void * note_ptr_cookie,gt_handle_reorder reorder_fn)287 gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
288 		     gt_handle_reorder reorder_fn)
289 {
290   struct ptr_data *data;
291 
292   if (obj == NULL || obj == (void *) 1)
293     return;
294 
295   data = (struct ptr_data *)
296     saving_htab->find_with_hash (obj, POINTER_HASH (obj));
297   gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
298 
299   data->reorder_fn = reorder_fn;
300 }
301 
302 /* Handy state for the traversal functions.  */
303 
304 struct traversal_state
305 {
306   FILE *f;
307   struct ggc_pch_data *d;
308   size_t count;
309   struct ptr_data **ptrs;
310   size_t ptrs_i;
311 };
312 
313 /* Callbacks for htab_traverse.  */
314 
315 int
ggc_call_count(ptr_data ** slot,traversal_state * state)316 ggc_call_count (ptr_data **slot, traversal_state *state)
317 {
318   struct ptr_data *d = *slot;
319 
320   ggc_pch_count_object (state->d, d->obj, d->size,
321 			d->note_ptr_fn == gt_pch_p_S);
322   state->count++;
323   return 1;
324 }
325 
326 int
ggc_call_alloc(ptr_data ** slot,traversal_state * state)327 ggc_call_alloc (ptr_data **slot, traversal_state *state)
328 {
329   struct ptr_data *d = *slot;
330 
331   d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size,
332 				      d->note_ptr_fn == gt_pch_p_S);
333   state->ptrs[state->ptrs_i++] = d;
334   return 1;
335 }
336 
337 /* Callback for qsort.  */
338 
339 static int
compare_ptr_data(const void * p1_p,const void * p2_p)340 compare_ptr_data (const void *p1_p, const void *p2_p)
341 {
342   const struct ptr_data *const p1 = *(const struct ptr_data *const *)p1_p;
343   const struct ptr_data *const p2 = *(const struct ptr_data *const *)p2_p;
344   return (((size_t)p1->new_addr > (size_t)p2->new_addr)
345 	  - ((size_t)p1->new_addr < (size_t)p2->new_addr));
346 }
347 
348 /* Callbacks for note_ptr_fn.  */
349 
350 static void
relocate_ptrs(void * ptr_p,void * state_p)351 relocate_ptrs (void *ptr_p, void *state_p)
352 {
353   void **ptr = (void **)ptr_p;
354   struct traversal_state *state ATTRIBUTE_UNUSED
355     = (struct traversal_state *)state_p;
356   struct ptr_data *result;
357 
358   if (*ptr == NULL || *ptr == (void *)1)
359     return;
360 
361   result = (struct ptr_data *)
362     saving_htab->find_with_hash (*ptr, POINTER_HASH (*ptr));
363   gcc_assert (result);
364   *ptr = result->new_addr;
365 }
366 
367 /* Write out, after relocation, the pointers in TAB.  */
368 static void
write_pch_globals(const struct ggc_root_tab * const * tab,struct traversal_state * state)369 write_pch_globals (const struct ggc_root_tab * const *tab,
370 		   struct traversal_state *state)
371 {
372   const struct ggc_root_tab *const *rt;
373   const struct ggc_root_tab *rti;
374   size_t i;
375 
376   for (rt = tab; *rt; rt++)
377     for (rti = *rt; rti->base != NULL; rti++)
378       for (i = 0; i < rti->nelt; i++)
379 	{
380 	  void *ptr = *(void **)((char *)rti->base + rti->stride * i);
381 	  struct ptr_data *new_ptr;
382 	  if (ptr == NULL || ptr == (void *)1)
383 	    {
384 	      if (fwrite (&ptr, sizeof (void *), 1, state->f)
385 		  != 1)
386 		fatal_error (input_location, "cannot write PCH file: %m");
387 	    }
388 	  else
389 	    {
390 	      new_ptr = (struct ptr_data *)
391 		saving_htab->find_with_hash (ptr, POINTER_HASH (ptr));
392 	      if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
393 		  != 1)
394 		fatal_error (input_location, "cannot write PCH file: %m");
395 	    }
396 	}
397 }
398 
399 /* Hold the information we need to mmap the file back in.  */
400 
401 struct mmap_info
402 {
403   size_t offset;
404   size_t size;
405   void *preferred_base;
406 };
407 
408 /* Write out the state of the compiler to F.  */
409 
410 void
gt_pch_save(FILE * f)411 gt_pch_save (FILE *f)
412 {
413   const struct ggc_root_tab *const *rt;
414   const struct ggc_root_tab *rti;
415   size_t i;
416   struct traversal_state state;
417   char *this_object = NULL;
418   size_t this_object_size = 0;
419   struct mmap_info mmi;
420   const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity ();
421 
422   gt_pch_save_stringpool ();
423 
424   timevar_push (TV_PCH_PTR_REALLOC);
425   saving_htab = new hash_table<saving_hasher> (50000);
426 
427   for (rt = gt_ggc_rtab; *rt; rt++)
428     for (rti = *rt; rti->base != NULL; rti++)
429       for (i = 0; i < rti->nelt; i++)
430 	(*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
431 
432   /* Prepare the objects for writing, determine addresses and such.  */
433   state.f = f;
434   state.d = init_ggc_pch ();
435   state.count = 0;
436   saving_htab->traverse <traversal_state *, ggc_call_count> (&state);
437 
438   mmi.size = ggc_pch_total_size (state.d);
439 
440   /* Try to arrange things so that no relocation is necessary, but
441      don't try very hard.  On most platforms, this will always work,
442      and on the rest it's a lot of work to do better.
443      (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
444      HOST_HOOKS_GT_PCH_USE_ADDRESS.)  */
445   mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
446 
447   ggc_pch_this_base (state.d, mmi.preferred_base);
448 
449   state.ptrs = XNEWVEC (struct ptr_data *, state.count);
450   state.ptrs_i = 0;
451 
452   saving_htab->traverse <traversal_state *, ggc_call_alloc> (&state);
453   timevar_pop (TV_PCH_PTR_REALLOC);
454 
455   timevar_push (TV_PCH_PTR_SORT);
456   qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
457   timevar_pop (TV_PCH_PTR_SORT);
458 
459   /* Write out all the scalar variables.  */
460   for (rt = gt_pch_scalar_rtab; *rt; rt++)
461     for (rti = *rt; rti->base != NULL; rti++)
462       if (fwrite (rti->base, rti->stride, 1, f) != 1)
463 	fatal_error (input_location, "cannot write PCH file: %m");
464 
465   /* Write out all the global pointers, after translation.  */
466   write_pch_globals (gt_ggc_rtab, &state);
467 
468   /* Pad the PCH file so that the mmapped area starts on an allocation
469      granularity (usually page) boundary.  */
470   {
471     long o;
472     o = ftell (state.f) + sizeof (mmi);
473     if (o == -1)
474       fatal_error (input_location, "cannot get position in PCH file: %m");
475     mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment;
476     if (mmi.offset == mmap_offset_alignment)
477       mmi.offset = 0;
478     mmi.offset += o;
479   }
480   if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
481     fatal_error (input_location, "cannot write PCH file: %m");
482   if (mmi.offset != 0
483       && fseek (state.f, mmi.offset, SEEK_SET) != 0)
484     fatal_error (input_location, "cannot write padding to PCH file: %m");
485 
486   ggc_pch_prepare_write (state.d, state.f);
487 
488 #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
489   vec<char> vbits = vNULL;
490 #endif
491 
492   /* Actually write out the objects.  */
493   for (i = 0; i < state.count; i++)
494     {
495       if (this_object_size < state.ptrs[i]->size)
496 	{
497 	  this_object_size = state.ptrs[i]->size;
498 	  this_object = XRESIZEVAR (char, this_object, this_object_size);
499 	}
500 #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
501       /* obj might contain uninitialized bytes, e.g. in the trailing
502 	 padding of the object.  Avoid warnings by making the memory
503 	 temporarily defined and then restoring previous state.  */
504       int get_vbits = 0;
505       size_t valid_size = state.ptrs[i]->size;
506       if (__builtin_expect (RUNNING_ON_VALGRIND, 0))
507 	{
508 	  if (vbits.length () < valid_size)
509 	    vbits.safe_grow (valid_size);
510 	  get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
511 					  vbits.address (), valid_size);
512 	  if (get_vbits == 3)
513 	    {
514 	      /* We assume that first part of obj is addressable, and
515 		 the rest is unaddressable.  Find out where the boundary is
516 		 using binary search.  */
517 	      size_t lo = 0, hi = valid_size;
518 	      while (hi > lo)
519 		{
520 		  size_t mid = (lo + hi) / 2;
521 		  get_vbits = VALGRIND_GET_VBITS ((char *) state.ptrs[i]->obj
522 						  + mid, vbits.address (),
523 						  1);
524 		  if (get_vbits == 3)
525 		    hi = mid;
526 		  else if (get_vbits == 1)
527 		    lo = mid + 1;
528 		  else
529 		    break;
530 		}
531 	      if (get_vbits == 1 || get_vbits == 3)
532 		{
533 		  valid_size = lo;
534 		  get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
535 						  vbits.address (),
536 						  valid_size);
537 		}
538 	    }
539 	  if (get_vbits == 1)
540 	    VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (state.ptrs[i]->obj,
541 							 state.ptrs[i]->size));
542 	}
543 #endif
544       memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
545       if (state.ptrs[i]->reorder_fn != NULL)
546 	state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
547 				   state.ptrs[i]->note_ptr_cookie,
548 				   relocate_ptrs, &state);
549       state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
550 				  state.ptrs[i]->note_ptr_cookie,
551 				  relocate_ptrs, &state);
552       ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
553 			    state.ptrs[i]->new_addr, state.ptrs[i]->size,
554 			    state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
555       if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
556 	memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
557 #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
558       if (__builtin_expect (get_vbits == 1, 0))
559 	{
560 	  (void) VALGRIND_SET_VBITS (state.ptrs[i]->obj, vbits.address (),
561 				     valid_size);
562 	  if (valid_size != state.ptrs[i]->size)
563 	    VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *)
564 							  state.ptrs[i]->obj
565 							  + valid_size,
566 							  state.ptrs[i]->size
567 							  - valid_size));
568 	}
569 #endif
570     }
571 #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
572   vbits.release ();
573 #endif
574 
575   ggc_pch_finish (state.d, state.f);
576   gt_pch_fixup_stringpool ();
577 
578   XDELETE (state.ptrs);
579   XDELETE (this_object);
580   delete saving_htab;
581   saving_htab = NULL;
582 }
583 
584 /* Read the state of the compiler back in from F.  */
585 
586 void
gt_pch_restore(FILE * f)587 gt_pch_restore (FILE *f)
588 {
589   const struct ggc_root_tab *const *rt;
590   const struct ggc_root_tab *rti;
591   size_t i;
592   struct mmap_info mmi;
593   int result;
594   struct line_maps * old_line_table = line_table;
595   location_t old_input_loc = input_location;
596 
597   /* Delete any deletable objects.  This makes ggc_pch_read much
598      faster, as it can be sure that no GCable objects remain other
599      than the ones just read in.  */
600   for (rt = gt_ggc_deletable_rtab; *rt; rt++)
601     for (rti = *rt; rti->base != NULL; rti++)
602       memset (rti->base, 0, rti->stride);
603 
604   /* Read in all the scalar variables.  */
605   for (rt = gt_pch_scalar_rtab; *rt; rt++)
606     for (rti = *rt; rti->base != NULL; rti++)
607       if (fread (rti->base, rti->stride, 1, f) != 1)
608 	{
609           line_table = old_line_table;
610 	  input_location = old_input_loc;
611 	  fatal_error (input_location, "cannot read PCH file: %m");
612         }
613 
614   /* Read in all the global pointers, in 6 easy loops.  */
615   for (rt = gt_ggc_rtab; *rt; rt++)
616     for (rti = *rt; rti->base != NULL; rti++)
617       for (i = 0; i < rti->nelt; i++)
618 	if (fread ((char *)rti->base + rti->stride * i,
619 		   sizeof (void *), 1, f) != 1)
620           {
621             line_table = old_line_table;
622 	    input_location = old_input_loc;
623 	    fatal_error (input_location, "cannot read PCH file: %m");
624           }
625 
626   if (fread (&mmi, sizeof (mmi), 1, f) != 1)
627     {
628       line_table = old_line_table;
629       input_location = old_input_loc;
630       fatal_error (input_location, "cannot read PCH file: %m");
631     }
632 
633   result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size,
634 					  fileno (f), mmi.offset);
635   if (result < 0) {
636     line_table = old_line_table;
637     input_location = old_input_loc;
638     fatal_error (input_location, "had to relocate PCH");
639   }
640   if (result == 0)
641     {
642       if (fseek (f, mmi.offset, SEEK_SET) != 0
643 	  || fread (mmi.preferred_base, mmi.size, 1, f) != 1)
644 	{
645           line_table = old_line_table;
646           input_location = old_input_loc;
647 	  fatal_error (input_location, "cannot read PCH file: %m");
648         }
649     }
650   else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
651     {
652       line_table = old_line_table;
653       input_location = old_input_loc;
654       fatal_error (input_location, "cannot read PCH file: %m");
655     }
656 
657   ggc_pch_read (f, mmi.preferred_base);
658 
659   gt_pch_restore_stringpool ();
660 }
661 
662 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
663    Select no address whatsoever, and let gt_pch_save choose what it will with
664    malloc, presumably.  */
665 
666 void *
default_gt_pch_get_address(size_t size ATTRIBUTE_UNUSED,int fd ATTRIBUTE_UNUSED)667 default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED,
668 			    int fd ATTRIBUTE_UNUSED)
669 {
670   return NULL;
671 }
672 
673 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
674    Allocate SIZE bytes with malloc.  Return 0 if the address we got is the
675    same as base, indicating that the memory has been allocated but needs to
676    be read in from the file.  Return -1 if the address differs, to relocation
677    of the PCH file would be required.  */
678 
679 int
default_gt_pch_use_address(void * base,size_t size,int fd ATTRIBUTE_UNUSED,size_t offset ATTRIBUTE_UNUSED)680 default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED,
681 			    size_t offset ATTRIBUTE_UNUSED)
682 {
683   void *addr = xmalloc (size);
684   return (addr == base) - 1;
685 }
686 
687 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS.   Return the
688    alignment required for allocating virtual memory. Usually this is the
689    same as pagesize.  */
690 
691 size_t
default_gt_pch_alloc_granularity(void)692 default_gt_pch_alloc_granularity (void)
693 {
694   return getpagesize ();
695 }
696 
697 #if HAVE_MMAP_FILE
698 /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
699    We temporarily allocate SIZE bytes, and let the kernel place the data
700    wherever it will.  If it worked, that's our spot, if not we're likely
701    to be in trouble.  */
702 
703 void *
mmap_gt_pch_get_address(size_t size,int fd)704 mmap_gt_pch_get_address (size_t size, int fd)
705 {
706   void *ret;
707 
708   ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
709   if (ret == (void *) MAP_FAILED)
710     ret = NULL;
711   else
712     munmap ((caddr_t) ret, size);
713 
714   return ret;
715 }
716 
717 /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
718    Map SIZE bytes of FD+OFFSET at BASE.  Return 1 if we succeeded at
719    mapping the data at BASE, -1 if we couldn't.
720 
721    This version assumes that the kernel honors the START operand of mmap
722    even without MAP_FIXED if START through START+SIZE are not currently
723    mapped with something.  */
724 
725 int
mmap_gt_pch_use_address(void * base,size_t size,int fd,size_t offset)726 mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
727 {
728   void *addr;
729 
730   /* We're called with size == 0 if we're not planning to load a PCH
731      file at all.  This allows the hook to free any static space that
732      we might have allocated at link time.  */
733   if (size == 0)
734     return -1;
735 
736   addr = mmap ((caddr_t) base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
737 	       fd, offset);
738 
739   return addr == base ? 1 : -1;
740 }
741 #endif /* HAVE_MMAP_FILE */
742 
743 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
744 
745 /* Modify the bound based on rlimits.  */
746 static double
ggc_rlimit_bound(double limit)747 ggc_rlimit_bound (double limit)
748 {
749 #if defined(HAVE_GETRLIMIT)
750   struct rlimit rlim;
751 # if defined (RLIMIT_AS)
752   /* RLIMIT_AS is what POSIX says is the limit on mmap.  Presumably
753      any OS which has RLIMIT_AS also has a working mmap that GCC will use.  */
754   if (getrlimit (RLIMIT_AS, &rlim) == 0
755       && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
756       && rlim.rlim_cur < limit)
757     limit = rlim.rlim_cur;
758 # elif defined (RLIMIT_DATA)
759   /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we
760      might be on an OS that has a broken mmap.  (Others don't bound
761      mmap at all, apparently.)  */
762   if (getrlimit (RLIMIT_DATA, &rlim) == 0
763       && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
764       && rlim.rlim_cur < limit
765       /* Darwin has this horribly bogus default setting of
766 	 RLIMIT_DATA, to 6144Kb.  No-one notices because RLIMIT_DATA
767 	 appears to be ignored.  Ignore such silliness.  If a limit
768 	 this small was actually effective for mmap, GCC wouldn't even
769 	 start up.  */
770       && rlim.rlim_cur >= 8 * 1024 * 1024)
771     limit = rlim.rlim_cur;
772 # endif /* RLIMIT_AS or RLIMIT_DATA */
773 #endif /* HAVE_GETRLIMIT */
774 
775   return limit;
776 }
777 
778 /* Heuristic to set a default for GGC_MIN_EXPAND.  */
779 static int
ggc_min_expand_heuristic(void)780 ggc_min_expand_heuristic (void)
781 {
782   double min_expand = physmem_total ();
783 
784   /* Adjust for rlimits.  */
785   min_expand = ggc_rlimit_bound (min_expand);
786 
787   /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
788      a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB).  */
789   min_expand /= 1024*1024*1024;
790   min_expand *= 70;
791   min_expand = MIN (min_expand, 70);
792   min_expand += 30;
793 
794   return min_expand;
795 }
796 
797 /* Heuristic to set a default for GGC_MIN_HEAPSIZE.  */
798 static int
ggc_min_heapsize_heuristic(void)799 ggc_min_heapsize_heuristic (void)
800 {
801   double phys_kbytes = physmem_total ();
802   double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2);
803 
804   phys_kbytes /= 1024; /* Convert to Kbytes.  */
805   limit_kbytes /= 1024;
806 
807   /* The heuristic is RAM/8, with a lower bound of 4M and an upper
808      bound of 128M (when RAM >= 1GB).  */
809   phys_kbytes /= 8;
810 
811 #if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
812   /* Try not to overrun the RSS limit while doing garbage collection.
813      The RSS limit is only advisory, so no margin is subtracted.  */
814  {
815    struct rlimit rlim;
816    if (getrlimit (RLIMIT_RSS, &rlim) == 0
817        && rlim.rlim_cur != (rlim_t) RLIM_INFINITY)
818      phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / 1024);
819  }
820 # endif
821 
822   /* Don't blindly run over our data limit; do GC at least when the
823      *next* GC would be within 20Mb of the limit or within a quarter of
824      the limit, whichever is larger.  If GCC does hit the data limit,
825      compilation will fail, so this tries to be conservative.  */
826   limit_kbytes = MAX (0, limit_kbytes - MAX (limit_kbytes / 4, 20 * 1024));
827   limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic ());
828   phys_kbytes = MIN (phys_kbytes, limit_kbytes);
829 
830   phys_kbytes = MAX (phys_kbytes, 4 * 1024);
831   phys_kbytes = MIN (phys_kbytes, 128 * 1024);
832 
833   return phys_kbytes;
834 }
835 #endif
836 
837 void
init_ggc_heuristics(void)838 init_ggc_heuristics (void)
839 {
840 #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
841   param_ggc_min_expand = ggc_min_expand_heuristic ();
842   param_ggc_min_heapsize = ggc_min_heapsize_heuristic ();
843 #endif
844 }
845 
846 /* GGC memory usage.  */
847 class ggc_usage: public mem_usage
848 {
849 public:
850   /* Default constructor.  */
ggc_usage()851   ggc_usage (): m_freed (0), m_collected (0), m_overhead (0) {}
852   /* Constructor.  */
ggc_usage(size_t allocated,size_t times,size_t peak,size_t freed,size_t collected,size_t overhead)853   ggc_usage (size_t allocated, size_t times, size_t peak,
854 	     size_t freed, size_t collected, size_t overhead)
855     : mem_usage (allocated, times, peak),
856     m_freed (freed), m_collected (collected), m_overhead (overhead) {}
857 
858   /* Equality operator.  */
859   inline bool
860   operator== (const ggc_usage &second) const
861   {
862     return (get_balance () == second.get_balance ()
863 	    && m_peak == second.m_peak
864 	    && m_times == second.m_times);
865   }
866 
867   /* Comparison operator.  */
868   inline bool
869   operator< (const ggc_usage &second) const
870   {
871     if (*this == second)
872       return false;
873 
874     return (get_balance () == second.get_balance () ?
875 	    (m_peak == second.m_peak ? m_times < second.m_times
876 	     : m_peak < second.m_peak)
877 	      : get_balance () < second.get_balance ());
878   }
879 
880   /* Register overhead of ALLOCATED and OVERHEAD bytes.  */
881   inline void
register_overhead(size_t allocated,size_t overhead)882   register_overhead (size_t allocated, size_t overhead)
883   {
884     m_allocated += allocated;
885     m_overhead += overhead;
886     m_times++;
887   }
888 
889   /* Release overhead of SIZE bytes.  */
890   inline void
release_overhead(size_t size)891   release_overhead (size_t size)
892   {
893     m_freed += size;
894   }
895 
896   /* Sum the usage with SECOND usage.  */
897   ggc_usage
898   operator+ (const ggc_usage &second)
899   {
900     return ggc_usage (m_allocated + second.m_allocated,
901 		      m_times + second.m_times,
902 		      m_peak + second.m_peak,
903 		      m_freed + second.m_freed,
904 		      m_collected + second.m_collected,
905 		      m_overhead + second.m_overhead);
906   }
907 
908   /* Dump usage with PREFIX, where TOTAL is sum of all rows.  */
909   inline void
dump(const char * prefix,ggc_usage & total)910   dump (const char *prefix, ggc_usage &total) const
911   {
912     size_t balance = get_balance ();
913     fprintf (stderr,
914 	     "%-48s " PRsa (9) ":%5.1f%%" PRsa (9) ":%5.1f%%"
915 	     PRsa (9) ":%5.1f%%" PRsa (9) ":%5.1f%%" PRsa (9) "\n",
916 	     prefix,
917 	     SIZE_AMOUNT (balance), get_percent (balance, total.get_balance ()),
918 	     SIZE_AMOUNT (m_collected),
919 	     get_percent (m_collected, total.m_collected),
920 	     SIZE_AMOUNT (m_freed), get_percent (m_freed, total.m_freed),
921 	     SIZE_AMOUNT (m_overhead),
922 	     get_percent (m_overhead, total.m_overhead),
923 	     SIZE_AMOUNT (m_times));
924   }
925 
926   /* Dump usage coupled to LOC location, where TOTAL is sum of all rows.  */
927   inline void
dump(mem_location * loc,ggc_usage & total)928   dump (mem_location *loc, ggc_usage &total) const
929   {
930     char *location_string = loc->to_string ();
931 
932     dump (location_string, total);
933 
934     free (location_string);
935   }
936 
937   /* Dump footer.  */
938   inline void
dump_footer()939   dump_footer ()
940   {
941     dump ("Total", *this);
942   }
943 
944   /* Get balance which is GGC allocation leak.  */
945   inline size_t
get_balance()946   get_balance () const
947   {
948     return m_allocated + m_overhead - m_collected - m_freed;
949   }
950 
951   typedef std::pair<mem_location *, ggc_usage *> mem_pair_t;
952 
953   /* Compare wrapper used by qsort method.  */
954   static int
compare(const void * first,const void * second)955   compare (const void *first, const void *second)
956   {
957     const mem_pair_t mem1 = *(const mem_pair_t *) first;
958     const mem_pair_t mem2 = *(const mem_pair_t *) second;
959 
960     size_t balance1 = mem1.second->get_balance ();
961     size_t balance2 = mem2.second->get_balance ();
962 
963     return balance1 == balance2 ? 0 : (balance1 < balance2 ? 1 : -1);
964   }
965 
966   /* Dump header with NAME.  */
967   static inline void
dump_header(const char * name)968   dump_header (const char *name)
969   {
970     fprintf (stderr, "%-48s %11s%17s%17s%16s%17s\n", name, "Leak", "Garbage",
971 	     "Freed", "Overhead", "Times");
972   }
973 
974   /* Freed memory in bytes.  */
975   size_t m_freed;
976   /* Collected memory in bytes.  */
977   size_t m_collected;
978   /* Overhead memory in bytes.  */
979   size_t m_overhead;
980 };
981 
982 /* GCC memory description.  */
983 static mem_alloc_description<ggc_usage> ggc_mem_desc;
984 
985 /* Dump per-site memory statistics.  */
986 
987 void
dump_ggc_loc_statistics()988 dump_ggc_loc_statistics ()
989 {
990   if (! GATHER_STATISTICS)
991     return;
992 
993   ggc_force_collect = true;
994   ggc_collect ();
995 
996   ggc_mem_desc.dump (GGC_ORIGIN);
997 
998   ggc_force_collect = false;
999 }
1000 
1001 /* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION).  */
1002 void
ggc_record_overhead(size_t allocated,size_t overhead,void * ptr MEM_STAT_DECL)1003 ggc_record_overhead (size_t allocated, size_t overhead, void *ptr MEM_STAT_DECL)
1004 {
1005   ggc_usage *usage = ggc_mem_desc.register_descriptor (ptr, GGC_ORIGIN, false
1006 						       FINAL_PASS_MEM_STAT);
1007 
1008   ggc_mem_desc.register_object_overhead (usage, allocated + overhead, ptr);
1009   usage->register_overhead (allocated, overhead);
1010 }
1011 
1012 /* Notice that the pointer has been freed.  */
1013 void
ggc_free_overhead(void * ptr)1014 ggc_free_overhead (void *ptr)
1015 {
1016   ggc_mem_desc.release_object_overhead (ptr);
1017 }
1018 
1019 /* After live values has been marked, walk all recorded pointers and see if
1020    they are still live.  */
1021 void
ggc_prune_overhead_list(void)1022 ggc_prune_overhead_list (void)
1023 {
1024   typedef hash_map<const void *, std::pair<ggc_usage *, size_t > > map_t;
1025 
1026   map_t::iterator it = ggc_mem_desc.m_reverse_object_map->begin ();
1027 
1028   for (; it != ggc_mem_desc.m_reverse_object_map->end (); ++it)
1029     if (!ggc_marked_p ((*it).first))
1030       {
1031         (*it).second.first->m_collected += (*it).second.second;
1032 	ggc_mem_desc.m_reverse_object_map->remove ((*it).first);
1033       }
1034 }
1035 
1036 /* Return memory used by heap in kb, 0 if this info is not available.  */
1037 
1038 void
report_heap_memory_use()1039 report_heap_memory_use ()
1040 {
1041 #ifdef HAVE_MALLINFO
1042   if (!quiet_flag)
1043     fprintf (stderr," {heap %luk}", (unsigned long)(mallinfo().arena / 1024));
1044 #endif
1045 }
1046