11debfc3dSmrg /* Simple garbage collection for the GNU compiler.
2*8feb0f0bSmrg Copyright (C) 1999-2020 Free Software Foundation, Inc.
31debfc3dSmrg
41debfc3dSmrg This file is part of GCC.
51debfc3dSmrg
61debfc3dSmrg GCC is free software; you can redistribute it and/or modify it under
71debfc3dSmrg the terms of the GNU General Public License as published by the Free
81debfc3dSmrg Software Foundation; either version 3, or (at your option) any later
91debfc3dSmrg version.
101debfc3dSmrg
111debfc3dSmrg GCC is distributed in the hope that it will be useful, but WITHOUT ANY
121debfc3dSmrg WARRANTY; without even the implied warranty of MERCHANTABILITY or
131debfc3dSmrg FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
141debfc3dSmrg for more details.
151debfc3dSmrg
161debfc3dSmrg You should have received a copy of the GNU General Public License
171debfc3dSmrg along with GCC; see the file COPYING3. If not see
181debfc3dSmrg <http://www.gnu.org/licenses/>. */
191debfc3dSmrg
201debfc3dSmrg /* Generic garbage collection (GC) functions and data, not specific to
211debfc3dSmrg any particular GC implementation. */
221debfc3dSmrg
231debfc3dSmrg #include "config.h"
24*8feb0f0bSmrg #define INCLUDE_MALLOC_H
251debfc3dSmrg #include "system.h"
261debfc3dSmrg #include "coretypes.h"
271debfc3dSmrg #include "timevar.h"
281debfc3dSmrg #include "diagnostic-core.h"
291debfc3dSmrg #include "ggc-internal.h"
301debfc3dSmrg #include "hosthooks.h"
311debfc3dSmrg #include "plugin.h"
32*8feb0f0bSmrg #include "options.h"
331debfc3dSmrg
341debfc3dSmrg /* When set, ggc_collect will do collection. */
351debfc3dSmrg bool ggc_force_collect;
361debfc3dSmrg
371debfc3dSmrg /* When true, protect the contents of the identifier hash table. */
381debfc3dSmrg bool ggc_protect_identifiers = true;
391debfc3dSmrg
401debfc3dSmrg /* Statistics about the allocation. */
411debfc3dSmrg static ggc_statistics *ggc_stats;
421debfc3dSmrg
431debfc3dSmrg struct traversal_state;
441debfc3dSmrg
451debfc3dSmrg static int compare_ptr_data (const void *, const void *);
461debfc3dSmrg static void relocate_ptrs (void *, void *);
471debfc3dSmrg static void write_pch_globals (const struct ggc_root_tab * const *tab,
481debfc3dSmrg struct traversal_state *state);
491debfc3dSmrg
501debfc3dSmrg /* Maintain global roots that are preserved during GC. */
511debfc3dSmrg
521debfc3dSmrg /* This extra vector of dynamically registered root_tab-s is used by
531debfc3dSmrg ggc_mark_roots and gives the ability to dynamically add new GGC root
541debfc3dSmrg tables, for instance from some plugins; this vector is on the heap
551debfc3dSmrg since it is used by GGC internally. */
561debfc3dSmrg typedef const struct ggc_root_tab *const_ggc_root_tab_t;
571debfc3dSmrg static vec<const_ggc_root_tab_t> extra_root_vec;
581debfc3dSmrg
591debfc3dSmrg /* Dynamically register a new GGC root table RT. This is useful for
601debfc3dSmrg plugins. */
611debfc3dSmrg
621debfc3dSmrg void
ggc_register_root_tab(const struct ggc_root_tab * rt)631debfc3dSmrg ggc_register_root_tab (const struct ggc_root_tab* rt)
641debfc3dSmrg {
651debfc3dSmrg if (rt)
661debfc3dSmrg extra_root_vec.safe_push (rt);
671debfc3dSmrg }
681debfc3dSmrg
691debfc3dSmrg /* Mark all the roots in the table RT. */
701debfc3dSmrg
711debfc3dSmrg static void
ggc_mark_root_tab(const_ggc_root_tab_t rt)721debfc3dSmrg ggc_mark_root_tab (const_ggc_root_tab_t rt)
731debfc3dSmrg {
741debfc3dSmrg size_t i;
751debfc3dSmrg
761debfc3dSmrg for ( ; rt->base != NULL; rt++)
771debfc3dSmrg for (i = 0; i < rt->nelt; i++)
781debfc3dSmrg (*rt->cb) (*(void **) ((char *)rt->base + rt->stride * i));
791debfc3dSmrg }
801debfc3dSmrg
811debfc3dSmrg /* Iterate through all registered roots and mark each element. */
821debfc3dSmrg
831debfc3dSmrg void
ggc_mark_roots(void)841debfc3dSmrg ggc_mark_roots (void)
851debfc3dSmrg {
861debfc3dSmrg const struct ggc_root_tab *const *rt;
871debfc3dSmrg const_ggc_root_tab_t rtp, rti;
881debfc3dSmrg size_t i;
891debfc3dSmrg
901debfc3dSmrg for (rt = gt_ggc_deletable_rtab; *rt; rt++)
911debfc3dSmrg for (rti = *rt; rti->base != NULL; rti++)
921debfc3dSmrg memset (rti->base, 0, rti->stride);
931debfc3dSmrg
941debfc3dSmrg for (rt = gt_ggc_rtab; *rt; rt++)
951debfc3dSmrg ggc_mark_root_tab (*rt);
961debfc3dSmrg
971debfc3dSmrg FOR_EACH_VEC_ELT (extra_root_vec, i, rtp)
981debfc3dSmrg ggc_mark_root_tab (rtp);
991debfc3dSmrg
1001debfc3dSmrg if (ggc_protect_identifiers)
1011debfc3dSmrg ggc_mark_stringpool ();
1021debfc3dSmrg
1031debfc3dSmrg gt_clear_caches ();
1041debfc3dSmrg
1051debfc3dSmrg if (! ggc_protect_identifiers)
1061debfc3dSmrg ggc_purge_stringpool ();
1071debfc3dSmrg
1081debfc3dSmrg /* Some plugins may call ggc_set_mark from here. */
1091debfc3dSmrg invoke_plugin_callbacks (PLUGIN_GGC_MARKING, NULL);
1101debfc3dSmrg }
1111debfc3dSmrg
1121debfc3dSmrg /* Allocate a block of memory, then clear it. */
1131debfc3dSmrg void *
ggc_internal_cleared_alloc(size_t size,void (* f)(void *),size_t s,size_t n MEM_STAT_DECL)1141debfc3dSmrg ggc_internal_cleared_alloc (size_t size, void (*f)(void *), size_t s, size_t n
1151debfc3dSmrg MEM_STAT_DECL)
1161debfc3dSmrg {
1171debfc3dSmrg void *buf = ggc_internal_alloc (size, f, s, n PASS_MEM_STAT);
1181debfc3dSmrg memset (buf, 0, size);
1191debfc3dSmrg return buf;
1201debfc3dSmrg }
1211debfc3dSmrg
1221debfc3dSmrg /* Resize a block of memory, possibly re-allocating it. */
1231debfc3dSmrg void *
ggc_realloc(void * x,size_t size MEM_STAT_DECL)1241debfc3dSmrg ggc_realloc (void *x, size_t size MEM_STAT_DECL)
1251debfc3dSmrg {
1261debfc3dSmrg void *r;
1271debfc3dSmrg size_t old_size;
1281debfc3dSmrg
1291debfc3dSmrg if (x == NULL)
1301debfc3dSmrg return ggc_internal_alloc (size PASS_MEM_STAT);
1311debfc3dSmrg
1321debfc3dSmrg old_size = ggc_get_size (x);
1331debfc3dSmrg
1341debfc3dSmrg if (size <= old_size)
1351debfc3dSmrg {
1361debfc3dSmrg /* Mark the unwanted memory as unaccessible. We also need to make
1371debfc3dSmrg the "new" size accessible, since ggc_get_size returns the size of
1381debfc3dSmrg the pool, not the size of the individually allocated object, the
1391debfc3dSmrg size which was previously made accessible. Unfortunately, we
1401debfc3dSmrg don't know that previously allocated size. Without that
1411debfc3dSmrg knowledge we have to lose some initialization-tracking for the
1421debfc3dSmrg old parts of the object. An alternative is to mark the whole
1431debfc3dSmrg old_size as reachable, but that would lose tracking of writes
1441debfc3dSmrg after the end of the object (by small offsets). Discard the
1451debfc3dSmrg handle to avoid handle leak. */
1461debfc3dSmrg VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) x + size,
1471debfc3dSmrg old_size - size));
1481debfc3dSmrg VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, size));
1491debfc3dSmrg return x;
1501debfc3dSmrg }
1511debfc3dSmrg
1521debfc3dSmrg r = ggc_internal_alloc (size PASS_MEM_STAT);
1531debfc3dSmrg
1541debfc3dSmrg /* Since ggc_get_size returns the size of the pool, not the size of the
1551debfc3dSmrg individually allocated object, we'd access parts of the old object
1561debfc3dSmrg that were marked invalid with the memcpy below. We lose a bit of the
1571debfc3dSmrg initialization-tracking since some of it may be uninitialized. */
1581debfc3dSmrg VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (x, old_size));
1591debfc3dSmrg
1601debfc3dSmrg memcpy (r, x, old_size);
1611debfc3dSmrg
1621debfc3dSmrg /* The old object is not supposed to be used anymore. */
1631debfc3dSmrg ggc_free (x);
1641debfc3dSmrg
1651debfc3dSmrg return r;
1661debfc3dSmrg }
1671debfc3dSmrg
1681debfc3dSmrg void *
ggc_cleared_alloc_htab_ignore_args(size_t c ATTRIBUTE_UNUSED,size_t n ATTRIBUTE_UNUSED)1691debfc3dSmrg ggc_cleared_alloc_htab_ignore_args (size_t c ATTRIBUTE_UNUSED,
1701debfc3dSmrg size_t n ATTRIBUTE_UNUSED)
1711debfc3dSmrg {
1721debfc3dSmrg gcc_assert (c * n == sizeof (struct htab));
1731debfc3dSmrg return ggc_cleared_alloc<htab> ();
1741debfc3dSmrg }
1751debfc3dSmrg
1761debfc3dSmrg /* TODO: once we actually use type information in GGC, create a new tag
1771debfc3dSmrg gt_gcc_ptr_array and use it for pointer arrays. */
1781debfc3dSmrg void *
ggc_cleared_alloc_ptr_array_two_args(size_t c,size_t n)1791debfc3dSmrg ggc_cleared_alloc_ptr_array_two_args (size_t c, size_t n)
1801debfc3dSmrg {
1811debfc3dSmrg gcc_assert (sizeof (PTR *) == n);
1821debfc3dSmrg return ggc_cleared_vec_alloc<PTR *> (c);
1831debfc3dSmrg }
1841debfc3dSmrg
1851debfc3dSmrg /* These are for splay_tree_new_ggc. */
1861debfc3dSmrg void *
ggc_splay_alloc(int sz,void * nl)1871debfc3dSmrg ggc_splay_alloc (int sz, void *nl)
1881debfc3dSmrg {
1891debfc3dSmrg gcc_assert (!nl);
1901debfc3dSmrg return ggc_internal_alloc (sz);
1911debfc3dSmrg }
1921debfc3dSmrg
1931debfc3dSmrg void
ggc_splay_dont_free(void * x ATTRIBUTE_UNUSED,void * nl)1941debfc3dSmrg ggc_splay_dont_free (void * x ATTRIBUTE_UNUSED, void *nl)
1951debfc3dSmrg {
1961debfc3dSmrg gcc_assert (!nl);
1971debfc3dSmrg }
1981debfc3dSmrg
1991debfc3dSmrg void
ggc_print_common_statistics(FILE * stream ATTRIBUTE_UNUSED,ggc_statistics * stats)2001debfc3dSmrg ggc_print_common_statistics (FILE *stream ATTRIBUTE_UNUSED,
2011debfc3dSmrg ggc_statistics *stats)
2021debfc3dSmrg {
2031debfc3dSmrg /* Set the pointer so that during collection we will actually gather
2041debfc3dSmrg the statistics. */
2051debfc3dSmrg ggc_stats = stats;
2061debfc3dSmrg
2071debfc3dSmrg /* Then do one collection to fill in the statistics. */
2081debfc3dSmrg ggc_collect ();
2091debfc3dSmrg
2101debfc3dSmrg /* At present, we don't really gather any interesting statistics. */
2111debfc3dSmrg
2121debfc3dSmrg /* Don't gather statistics any more. */
2131debfc3dSmrg ggc_stats = NULL;
2141debfc3dSmrg }
2151debfc3dSmrg
2161debfc3dSmrg /* Functions for saving and restoring GCable memory to disk. */
2171debfc3dSmrg
2181debfc3dSmrg struct ptr_data
2191debfc3dSmrg {
2201debfc3dSmrg void *obj;
2211debfc3dSmrg void *note_ptr_cookie;
2221debfc3dSmrg gt_note_pointers note_ptr_fn;
2231debfc3dSmrg gt_handle_reorder reorder_fn;
2241debfc3dSmrg size_t size;
2251debfc3dSmrg void *new_addr;
2261debfc3dSmrg };
2271debfc3dSmrg
2281debfc3dSmrg #define POINTER_HASH(x) (hashval_t)((intptr_t)x >> 3)
2291debfc3dSmrg
2301debfc3dSmrg /* Helper for hashing saving_htab. */
2311debfc3dSmrg
2321debfc3dSmrg struct saving_hasher : free_ptr_hash <ptr_data>
2331debfc3dSmrg {
2341debfc3dSmrg typedef void *compare_type;
2351debfc3dSmrg static inline hashval_t hash (const ptr_data *);
2361debfc3dSmrg static inline bool equal (const ptr_data *, const void *);
2371debfc3dSmrg };
2381debfc3dSmrg
2391debfc3dSmrg inline hashval_t
hash(const ptr_data * p)2401debfc3dSmrg saving_hasher::hash (const ptr_data *p)
2411debfc3dSmrg {
2421debfc3dSmrg return POINTER_HASH (p->obj);
2431debfc3dSmrg }
2441debfc3dSmrg
2451debfc3dSmrg inline bool
equal(const ptr_data * p1,const void * p2)2461debfc3dSmrg saving_hasher::equal (const ptr_data *p1, const void *p2)
2471debfc3dSmrg {
2481debfc3dSmrg return p1->obj == p2;
2491debfc3dSmrg }
2501debfc3dSmrg
2511debfc3dSmrg static hash_table<saving_hasher> *saving_htab;
2521debfc3dSmrg
2531debfc3dSmrg /* Register an object in the hash table. */
2541debfc3dSmrg
2551debfc3dSmrg int
gt_pch_note_object(void * obj,void * note_ptr_cookie,gt_note_pointers note_ptr_fn)2561debfc3dSmrg gt_pch_note_object (void *obj, void *note_ptr_cookie,
2571debfc3dSmrg gt_note_pointers note_ptr_fn)
2581debfc3dSmrg {
2591debfc3dSmrg struct ptr_data **slot;
2601debfc3dSmrg
2611debfc3dSmrg if (obj == NULL || obj == (void *) 1)
2621debfc3dSmrg return 0;
2631debfc3dSmrg
2641debfc3dSmrg slot = (struct ptr_data **)
2651debfc3dSmrg saving_htab->find_slot_with_hash (obj, POINTER_HASH (obj), INSERT);
2661debfc3dSmrg if (*slot != NULL)
2671debfc3dSmrg {
2681debfc3dSmrg gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn
2691debfc3dSmrg && (*slot)->note_ptr_cookie == note_ptr_cookie);
2701debfc3dSmrg return 0;
2711debfc3dSmrg }
2721debfc3dSmrg
2731debfc3dSmrg *slot = XCNEW (struct ptr_data);
2741debfc3dSmrg (*slot)->obj = obj;
2751debfc3dSmrg (*slot)->note_ptr_fn = note_ptr_fn;
2761debfc3dSmrg (*slot)->note_ptr_cookie = note_ptr_cookie;
2771debfc3dSmrg if (note_ptr_fn == gt_pch_p_S)
2781debfc3dSmrg (*slot)->size = strlen ((const char *)obj) + 1;
2791debfc3dSmrg else
2801debfc3dSmrg (*slot)->size = ggc_get_size (obj);
2811debfc3dSmrg return 1;
2821debfc3dSmrg }
2831debfc3dSmrg
2841debfc3dSmrg /* Register an object in the hash table. */
2851debfc3dSmrg
2861debfc3dSmrg void
gt_pch_note_reorder(void * obj,void * note_ptr_cookie,gt_handle_reorder reorder_fn)2871debfc3dSmrg gt_pch_note_reorder (void *obj, void *note_ptr_cookie,
2881debfc3dSmrg gt_handle_reorder reorder_fn)
2891debfc3dSmrg {
2901debfc3dSmrg struct ptr_data *data;
2911debfc3dSmrg
2921debfc3dSmrg if (obj == NULL || obj == (void *) 1)
2931debfc3dSmrg return;
2941debfc3dSmrg
2951debfc3dSmrg data = (struct ptr_data *)
2961debfc3dSmrg saving_htab->find_with_hash (obj, POINTER_HASH (obj));
2971debfc3dSmrg gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
2981debfc3dSmrg
2991debfc3dSmrg data->reorder_fn = reorder_fn;
3001debfc3dSmrg }
3011debfc3dSmrg
3021debfc3dSmrg /* Handy state for the traversal functions. */
3031debfc3dSmrg
3041debfc3dSmrg struct traversal_state
3051debfc3dSmrg {
3061debfc3dSmrg FILE *f;
3071debfc3dSmrg struct ggc_pch_data *d;
3081debfc3dSmrg size_t count;
3091debfc3dSmrg struct ptr_data **ptrs;
3101debfc3dSmrg size_t ptrs_i;
3111debfc3dSmrg };
3121debfc3dSmrg
3131debfc3dSmrg /* Callbacks for htab_traverse. */
3141debfc3dSmrg
3151debfc3dSmrg int
ggc_call_count(ptr_data ** slot,traversal_state * state)3161debfc3dSmrg ggc_call_count (ptr_data **slot, traversal_state *state)
3171debfc3dSmrg {
3181debfc3dSmrg struct ptr_data *d = *slot;
3191debfc3dSmrg
3201debfc3dSmrg ggc_pch_count_object (state->d, d->obj, d->size,
3211debfc3dSmrg d->note_ptr_fn == gt_pch_p_S);
3221debfc3dSmrg state->count++;
3231debfc3dSmrg return 1;
3241debfc3dSmrg }
3251debfc3dSmrg
3261debfc3dSmrg int
ggc_call_alloc(ptr_data ** slot,traversal_state * state)3271debfc3dSmrg ggc_call_alloc (ptr_data **slot, traversal_state *state)
3281debfc3dSmrg {
3291debfc3dSmrg struct ptr_data *d = *slot;
3301debfc3dSmrg
3311debfc3dSmrg d->new_addr = ggc_pch_alloc_object (state->d, d->obj, d->size,
3321debfc3dSmrg d->note_ptr_fn == gt_pch_p_S);
3331debfc3dSmrg state->ptrs[state->ptrs_i++] = d;
3341debfc3dSmrg return 1;
3351debfc3dSmrg }
3361debfc3dSmrg
3371debfc3dSmrg /* Callback for qsort. */
3381debfc3dSmrg
3391debfc3dSmrg static int
compare_ptr_data(const void * p1_p,const void * p2_p)3401debfc3dSmrg compare_ptr_data (const void *p1_p, const void *p2_p)
3411debfc3dSmrg {
3421debfc3dSmrg const struct ptr_data *const p1 = *(const struct ptr_data *const *)p1_p;
3431debfc3dSmrg const struct ptr_data *const p2 = *(const struct ptr_data *const *)p2_p;
3441debfc3dSmrg return (((size_t)p1->new_addr > (size_t)p2->new_addr)
3451debfc3dSmrg - ((size_t)p1->new_addr < (size_t)p2->new_addr));
3461debfc3dSmrg }
3471debfc3dSmrg
3481debfc3dSmrg /* Callbacks for note_ptr_fn. */
3491debfc3dSmrg
3501debfc3dSmrg static void
relocate_ptrs(void * ptr_p,void * state_p)3511debfc3dSmrg relocate_ptrs (void *ptr_p, void *state_p)
3521debfc3dSmrg {
3531debfc3dSmrg void **ptr = (void **)ptr_p;
3541debfc3dSmrg struct traversal_state *state ATTRIBUTE_UNUSED
3551debfc3dSmrg = (struct traversal_state *)state_p;
3561debfc3dSmrg struct ptr_data *result;
3571debfc3dSmrg
3581debfc3dSmrg if (*ptr == NULL || *ptr == (void *)1)
3591debfc3dSmrg return;
3601debfc3dSmrg
3611debfc3dSmrg result = (struct ptr_data *)
3621debfc3dSmrg saving_htab->find_with_hash (*ptr, POINTER_HASH (*ptr));
3631debfc3dSmrg gcc_assert (result);
3641debfc3dSmrg *ptr = result->new_addr;
3651debfc3dSmrg }
3661debfc3dSmrg
3671debfc3dSmrg /* Write out, after relocation, the pointers in TAB. */
3681debfc3dSmrg static void
write_pch_globals(const struct ggc_root_tab * const * tab,struct traversal_state * state)3691debfc3dSmrg write_pch_globals (const struct ggc_root_tab * const *tab,
3701debfc3dSmrg struct traversal_state *state)
3711debfc3dSmrg {
3721debfc3dSmrg const struct ggc_root_tab *const *rt;
3731debfc3dSmrg const struct ggc_root_tab *rti;
3741debfc3dSmrg size_t i;
3751debfc3dSmrg
3761debfc3dSmrg for (rt = tab; *rt; rt++)
3771debfc3dSmrg for (rti = *rt; rti->base != NULL; rti++)
3781debfc3dSmrg for (i = 0; i < rti->nelt; i++)
3791debfc3dSmrg {
3801debfc3dSmrg void *ptr = *(void **)((char *)rti->base + rti->stride * i);
3811debfc3dSmrg struct ptr_data *new_ptr;
3821debfc3dSmrg if (ptr == NULL || ptr == (void *)1)
3831debfc3dSmrg {
3841debfc3dSmrg if (fwrite (&ptr, sizeof (void *), 1, state->f)
3851debfc3dSmrg != 1)
386*8feb0f0bSmrg fatal_error (input_location, "cannot write PCH file: %m");
3871debfc3dSmrg }
3881debfc3dSmrg else
3891debfc3dSmrg {
3901debfc3dSmrg new_ptr = (struct ptr_data *)
3911debfc3dSmrg saving_htab->find_with_hash (ptr, POINTER_HASH (ptr));
3921debfc3dSmrg if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
3931debfc3dSmrg != 1)
394*8feb0f0bSmrg fatal_error (input_location, "cannot write PCH file: %m");
3951debfc3dSmrg }
3961debfc3dSmrg }
3971debfc3dSmrg }
3981debfc3dSmrg
3991debfc3dSmrg /* Hold the information we need to mmap the file back in. */
4001debfc3dSmrg
4011debfc3dSmrg struct mmap_info
4021debfc3dSmrg {
4031debfc3dSmrg size_t offset;
4041debfc3dSmrg size_t size;
4051debfc3dSmrg void *preferred_base;
4061debfc3dSmrg };
4071debfc3dSmrg
4081debfc3dSmrg /* Write out the state of the compiler to F. */
4091debfc3dSmrg
4101debfc3dSmrg void
gt_pch_save(FILE * f)4111debfc3dSmrg gt_pch_save (FILE *f)
4121debfc3dSmrg {
4131debfc3dSmrg const struct ggc_root_tab *const *rt;
4141debfc3dSmrg const struct ggc_root_tab *rti;
4151debfc3dSmrg size_t i;
4161debfc3dSmrg struct traversal_state state;
4171debfc3dSmrg char *this_object = NULL;
4181debfc3dSmrg size_t this_object_size = 0;
4191debfc3dSmrg struct mmap_info mmi;
4201debfc3dSmrg const size_t mmap_offset_alignment = host_hooks.gt_pch_alloc_granularity ();
4211debfc3dSmrg
4221debfc3dSmrg gt_pch_save_stringpool ();
4231debfc3dSmrg
4241debfc3dSmrg timevar_push (TV_PCH_PTR_REALLOC);
4251debfc3dSmrg saving_htab = new hash_table<saving_hasher> (50000);
4261debfc3dSmrg
4271debfc3dSmrg for (rt = gt_ggc_rtab; *rt; rt++)
4281debfc3dSmrg for (rti = *rt; rti->base != NULL; rti++)
4291debfc3dSmrg for (i = 0; i < rti->nelt; i++)
4301debfc3dSmrg (*rti->pchw)(*(void **)((char *)rti->base + rti->stride * i));
4311debfc3dSmrg
4321debfc3dSmrg /* Prepare the objects for writing, determine addresses and such. */
4331debfc3dSmrg state.f = f;
4341debfc3dSmrg state.d = init_ggc_pch ();
4351debfc3dSmrg state.count = 0;
4361debfc3dSmrg saving_htab->traverse <traversal_state *, ggc_call_count> (&state);
4371debfc3dSmrg
4381debfc3dSmrg mmi.size = ggc_pch_total_size (state.d);
4391debfc3dSmrg
4401debfc3dSmrg /* Try to arrange things so that no relocation is necessary, but
4411debfc3dSmrg don't try very hard. On most platforms, this will always work,
4421debfc3dSmrg and on the rest it's a lot of work to do better.
4431debfc3dSmrg (The extra work goes in HOST_HOOKS_GT_PCH_GET_ADDRESS and
4441debfc3dSmrg HOST_HOOKS_GT_PCH_USE_ADDRESS.) */
4451debfc3dSmrg mmi.preferred_base = host_hooks.gt_pch_get_address (mmi.size, fileno (f));
4461debfc3dSmrg
4471debfc3dSmrg ggc_pch_this_base (state.d, mmi.preferred_base);
4481debfc3dSmrg
4491debfc3dSmrg state.ptrs = XNEWVEC (struct ptr_data *, state.count);
4501debfc3dSmrg state.ptrs_i = 0;
4511debfc3dSmrg
4521debfc3dSmrg saving_htab->traverse <traversal_state *, ggc_call_alloc> (&state);
4531debfc3dSmrg timevar_pop (TV_PCH_PTR_REALLOC);
4541debfc3dSmrg
4551debfc3dSmrg timevar_push (TV_PCH_PTR_SORT);
4561debfc3dSmrg qsort (state.ptrs, state.count, sizeof (*state.ptrs), compare_ptr_data);
4571debfc3dSmrg timevar_pop (TV_PCH_PTR_SORT);
4581debfc3dSmrg
4591debfc3dSmrg /* Write out all the scalar variables. */
4601debfc3dSmrg for (rt = gt_pch_scalar_rtab; *rt; rt++)
4611debfc3dSmrg for (rti = *rt; rti->base != NULL; rti++)
4621debfc3dSmrg if (fwrite (rti->base, rti->stride, 1, f) != 1)
463*8feb0f0bSmrg fatal_error (input_location, "cannot write PCH file: %m");
4641debfc3dSmrg
4651debfc3dSmrg /* Write out all the global pointers, after translation. */
4661debfc3dSmrg write_pch_globals (gt_ggc_rtab, &state);
4671debfc3dSmrg
4681debfc3dSmrg /* Pad the PCH file so that the mmapped area starts on an allocation
4691debfc3dSmrg granularity (usually page) boundary. */
4701debfc3dSmrg {
4711debfc3dSmrg long o;
4721debfc3dSmrg o = ftell (state.f) + sizeof (mmi);
4731debfc3dSmrg if (o == -1)
474*8feb0f0bSmrg fatal_error (input_location, "cannot get position in PCH file: %m");
4751debfc3dSmrg mmi.offset = mmap_offset_alignment - o % mmap_offset_alignment;
4761debfc3dSmrg if (mmi.offset == mmap_offset_alignment)
4771debfc3dSmrg mmi.offset = 0;
4781debfc3dSmrg mmi.offset += o;
4791debfc3dSmrg }
4801debfc3dSmrg if (fwrite (&mmi, sizeof (mmi), 1, state.f) != 1)
481*8feb0f0bSmrg fatal_error (input_location, "cannot write PCH file: %m");
4821debfc3dSmrg if (mmi.offset != 0
4831debfc3dSmrg && fseek (state.f, mmi.offset, SEEK_SET) != 0)
484*8feb0f0bSmrg fatal_error (input_location, "cannot write padding to PCH file: %m");
4851debfc3dSmrg
4861debfc3dSmrg ggc_pch_prepare_write (state.d, state.f);
4871debfc3dSmrg
4881debfc3dSmrg #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
4891debfc3dSmrg vec<char> vbits = vNULL;
4901debfc3dSmrg #endif
4911debfc3dSmrg
4921debfc3dSmrg /* Actually write out the objects. */
4931debfc3dSmrg for (i = 0; i < state.count; i++)
4941debfc3dSmrg {
4951debfc3dSmrg if (this_object_size < state.ptrs[i]->size)
4961debfc3dSmrg {
4971debfc3dSmrg this_object_size = state.ptrs[i]->size;
4981debfc3dSmrg this_object = XRESIZEVAR (char, this_object, this_object_size);
4991debfc3dSmrg }
5001debfc3dSmrg #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
5011debfc3dSmrg /* obj might contain uninitialized bytes, e.g. in the trailing
5021debfc3dSmrg padding of the object. Avoid warnings by making the memory
5031debfc3dSmrg temporarily defined and then restoring previous state. */
5041debfc3dSmrg int get_vbits = 0;
5051debfc3dSmrg size_t valid_size = state.ptrs[i]->size;
5061debfc3dSmrg if (__builtin_expect (RUNNING_ON_VALGRIND, 0))
5071debfc3dSmrg {
5081debfc3dSmrg if (vbits.length () < valid_size)
5091debfc3dSmrg vbits.safe_grow (valid_size);
5101debfc3dSmrg get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
5111debfc3dSmrg vbits.address (), valid_size);
5121debfc3dSmrg if (get_vbits == 3)
5131debfc3dSmrg {
5141debfc3dSmrg /* We assume that first part of obj is addressable, and
5151debfc3dSmrg the rest is unaddressable. Find out where the boundary is
5161debfc3dSmrg using binary search. */
5171debfc3dSmrg size_t lo = 0, hi = valid_size;
5181debfc3dSmrg while (hi > lo)
5191debfc3dSmrg {
5201debfc3dSmrg size_t mid = (lo + hi) / 2;
5211debfc3dSmrg get_vbits = VALGRIND_GET_VBITS ((char *) state.ptrs[i]->obj
5221debfc3dSmrg + mid, vbits.address (),
5231debfc3dSmrg 1);
5241debfc3dSmrg if (get_vbits == 3)
5251debfc3dSmrg hi = mid;
5261debfc3dSmrg else if (get_vbits == 1)
5271debfc3dSmrg lo = mid + 1;
5281debfc3dSmrg else
5291debfc3dSmrg break;
5301debfc3dSmrg }
5311debfc3dSmrg if (get_vbits == 1 || get_vbits == 3)
5321debfc3dSmrg {
5331debfc3dSmrg valid_size = lo;
5341debfc3dSmrg get_vbits = VALGRIND_GET_VBITS (state.ptrs[i]->obj,
5351debfc3dSmrg vbits.address (),
5361debfc3dSmrg valid_size);
5371debfc3dSmrg }
5381debfc3dSmrg }
5391debfc3dSmrg if (get_vbits == 1)
5401debfc3dSmrg VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (state.ptrs[i]->obj,
5411debfc3dSmrg state.ptrs[i]->size));
5421debfc3dSmrg }
5431debfc3dSmrg #endif
5441debfc3dSmrg memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
5451debfc3dSmrg if (state.ptrs[i]->reorder_fn != NULL)
5461debfc3dSmrg state.ptrs[i]->reorder_fn (state.ptrs[i]->obj,
5471debfc3dSmrg state.ptrs[i]->note_ptr_cookie,
5481debfc3dSmrg relocate_ptrs, &state);
5491debfc3dSmrg state.ptrs[i]->note_ptr_fn (state.ptrs[i]->obj,
5501debfc3dSmrg state.ptrs[i]->note_ptr_cookie,
5511debfc3dSmrg relocate_ptrs, &state);
5521debfc3dSmrg ggc_pch_write_object (state.d, state.f, state.ptrs[i]->obj,
5531debfc3dSmrg state.ptrs[i]->new_addr, state.ptrs[i]->size,
5541debfc3dSmrg state.ptrs[i]->note_ptr_fn == gt_pch_p_S);
5551debfc3dSmrg if (state.ptrs[i]->note_ptr_fn != gt_pch_p_S)
5561debfc3dSmrg memcpy (state.ptrs[i]->obj, this_object, state.ptrs[i]->size);
5571debfc3dSmrg #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
5581debfc3dSmrg if (__builtin_expect (get_vbits == 1, 0))
5591debfc3dSmrg {
5601debfc3dSmrg (void) VALGRIND_SET_VBITS (state.ptrs[i]->obj, vbits.address (),
5611debfc3dSmrg valid_size);
5621debfc3dSmrg if (valid_size != state.ptrs[i]->size)
5631debfc3dSmrg VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *)
5641debfc3dSmrg state.ptrs[i]->obj
5651debfc3dSmrg + valid_size,
5661debfc3dSmrg state.ptrs[i]->size
5671debfc3dSmrg - valid_size));
5681debfc3dSmrg }
5691debfc3dSmrg #endif
5701debfc3dSmrg }
5711debfc3dSmrg #if defined ENABLE_VALGRIND_ANNOTATIONS && defined VALGRIND_GET_VBITS
5721debfc3dSmrg vbits.release ();
5731debfc3dSmrg #endif
5741debfc3dSmrg
5751debfc3dSmrg ggc_pch_finish (state.d, state.f);
5761debfc3dSmrg gt_pch_fixup_stringpool ();
5771debfc3dSmrg
5781debfc3dSmrg XDELETE (state.ptrs);
5791debfc3dSmrg XDELETE (this_object);
5801debfc3dSmrg delete saving_htab;
5811debfc3dSmrg saving_htab = NULL;
5821debfc3dSmrg }
5831debfc3dSmrg
5841debfc3dSmrg /* Read the state of the compiler back in from F. */
5851debfc3dSmrg
5861debfc3dSmrg void
gt_pch_restore(FILE * f)5871debfc3dSmrg gt_pch_restore (FILE *f)
5881debfc3dSmrg {
5891debfc3dSmrg const struct ggc_root_tab *const *rt;
5901debfc3dSmrg const struct ggc_root_tab *rti;
5911debfc3dSmrg size_t i;
5921debfc3dSmrg struct mmap_info mmi;
5931debfc3dSmrg int result;
5941debfc3dSmrg struct line_maps * old_line_table = line_table;
5951debfc3dSmrg location_t old_input_loc = input_location;
5961debfc3dSmrg
5971debfc3dSmrg /* Delete any deletable objects. This makes ggc_pch_read much
5981debfc3dSmrg faster, as it can be sure that no GCable objects remain other
5991debfc3dSmrg than the ones just read in. */
6001debfc3dSmrg for (rt = gt_ggc_deletable_rtab; *rt; rt++)
6011debfc3dSmrg for (rti = *rt; rti->base != NULL; rti++)
6021debfc3dSmrg memset (rti->base, 0, rti->stride);
6031debfc3dSmrg
6041debfc3dSmrg /* Read in all the scalar variables. */
6051debfc3dSmrg for (rt = gt_pch_scalar_rtab; *rt; rt++)
6061debfc3dSmrg for (rti = *rt; rti->base != NULL; rti++)
607*8feb0f0bSmrg if (fread (rti->base, rti->stride, 1, f) != 1)
608*8feb0f0bSmrg {
6091debfc3dSmrg line_table = old_line_table;
6101debfc3dSmrg input_location = old_input_loc;
611*8feb0f0bSmrg fatal_error (input_location, "cannot read PCH file: %m");
6121debfc3dSmrg }
6131debfc3dSmrg
6141debfc3dSmrg /* Read in all the global pointers, in 6 easy loops. */
6151debfc3dSmrg for (rt = gt_ggc_rtab; *rt; rt++)
6161debfc3dSmrg for (rti = *rt; rti->base != NULL; rti++)
6171debfc3dSmrg for (i = 0; i < rti->nelt; i++)
6181debfc3dSmrg if (fread ((char *)rti->base + rti->stride * i,
619*8feb0f0bSmrg sizeof (void *), 1, f) != 1)
620*8feb0f0bSmrg {
6211debfc3dSmrg line_table = old_line_table;
6221debfc3dSmrg input_location = old_input_loc;
623*8feb0f0bSmrg fatal_error (input_location, "cannot read PCH file: %m");
6241debfc3dSmrg }
6251debfc3dSmrg
626*8feb0f0bSmrg if (fread (&mmi, sizeof (mmi), 1, f) != 1)
627*8feb0f0bSmrg {
6281debfc3dSmrg line_table = old_line_table;
6291debfc3dSmrg input_location = old_input_loc;
630*8feb0f0bSmrg fatal_error (input_location, "cannot read PCH file: %m");
6311debfc3dSmrg }
6321debfc3dSmrg
6331debfc3dSmrg result = host_hooks.gt_pch_use_address (mmi.preferred_base, mmi.size,
6341debfc3dSmrg fileno (f), mmi.offset);
6351debfc3dSmrg if (result < 0) {
6361debfc3dSmrg line_table = old_line_table;
6371debfc3dSmrg input_location = old_input_loc;
6381debfc3dSmrg fatal_error (input_location, "had to relocate PCH");
6391debfc3dSmrg }
6401debfc3dSmrg if (result == 0)
6411debfc3dSmrg {
6421debfc3dSmrg if (fseek (f, mmi.offset, SEEK_SET) != 0
643*8feb0f0bSmrg || fread (mmi.preferred_base, mmi.size, 1, f) != 1)
644*8feb0f0bSmrg {
6451debfc3dSmrg line_table = old_line_table;
6461debfc3dSmrg input_location = old_input_loc;
647*8feb0f0bSmrg fatal_error (input_location, "cannot read PCH file: %m");
6481debfc3dSmrg }
6491debfc3dSmrg }
650*8feb0f0bSmrg else if (fseek (f, mmi.offset + mmi.size, SEEK_SET) != 0)
651*8feb0f0bSmrg {
6521debfc3dSmrg line_table = old_line_table;
6531debfc3dSmrg input_location = old_input_loc;
654*8feb0f0bSmrg fatal_error (input_location, "cannot read PCH file: %m");
6551debfc3dSmrg }
6561debfc3dSmrg
6571debfc3dSmrg ggc_pch_read (f, mmi.preferred_base);
6581debfc3dSmrg
6591debfc3dSmrg gt_pch_restore_stringpool ();
6601debfc3dSmrg }
6611debfc3dSmrg
6621debfc3dSmrg /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is not present.
6631debfc3dSmrg Select no address whatsoever, and let gt_pch_save choose what it will with
6641debfc3dSmrg malloc, presumably. */
6651debfc3dSmrg
6661debfc3dSmrg void *
default_gt_pch_get_address(size_t size ATTRIBUTE_UNUSED,int fd ATTRIBUTE_UNUSED)6671debfc3dSmrg default_gt_pch_get_address (size_t size ATTRIBUTE_UNUSED,
6681debfc3dSmrg int fd ATTRIBUTE_UNUSED)
6691debfc3dSmrg {
6701debfc3dSmrg return NULL;
6711debfc3dSmrg }
6721debfc3dSmrg
6731debfc3dSmrg /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is not present.
6741debfc3dSmrg Allocate SIZE bytes with malloc. Return 0 if the address we got is the
6751debfc3dSmrg same as base, indicating that the memory has been allocated but needs to
6761debfc3dSmrg be read in from the file. Return -1 if the address differs, to relocation
6771debfc3dSmrg of the PCH file would be required. */
6781debfc3dSmrg
6791debfc3dSmrg int
default_gt_pch_use_address(void * base,size_t size,int fd ATTRIBUTE_UNUSED,size_t offset ATTRIBUTE_UNUSED)6801debfc3dSmrg default_gt_pch_use_address (void *base, size_t size, int fd ATTRIBUTE_UNUSED,
6811debfc3dSmrg size_t offset ATTRIBUTE_UNUSED)
6821debfc3dSmrg {
6831debfc3dSmrg void *addr = xmalloc (size);
6841debfc3dSmrg return (addr == base) - 1;
6851debfc3dSmrg }
6861debfc3dSmrg
6871debfc3dSmrg /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS. Return the
6881debfc3dSmrg alignment required for allocating virtual memory. Usually this is the
6891debfc3dSmrg same as pagesize. */
6901debfc3dSmrg
6911debfc3dSmrg size_t
default_gt_pch_alloc_granularity(void)6921debfc3dSmrg default_gt_pch_alloc_granularity (void)
6931debfc3dSmrg {
6941debfc3dSmrg return getpagesize ();
6951debfc3dSmrg }
6961debfc3dSmrg
6971debfc3dSmrg #if HAVE_MMAP_FILE
6981debfc3dSmrg /* Default version of HOST_HOOKS_GT_PCH_GET_ADDRESS when mmap is present.
6991debfc3dSmrg We temporarily allocate SIZE bytes, and let the kernel place the data
7001debfc3dSmrg wherever it will. If it worked, that's our spot, if not we're likely
7011debfc3dSmrg to be in trouble. */
7021debfc3dSmrg
7031debfc3dSmrg void *
mmap_gt_pch_get_address(size_t size,int fd)7041debfc3dSmrg mmap_gt_pch_get_address (size_t size, int fd)
7051debfc3dSmrg {
7061debfc3dSmrg void *ret;
7071debfc3dSmrg
7081debfc3dSmrg ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
7091debfc3dSmrg if (ret == (void *) MAP_FAILED)
7101debfc3dSmrg ret = NULL;
7111debfc3dSmrg else
7121debfc3dSmrg munmap ((caddr_t) ret, size);
7131debfc3dSmrg
7141debfc3dSmrg return ret;
7151debfc3dSmrg }
7161debfc3dSmrg
7171debfc3dSmrg /* Default version of HOST_HOOKS_GT_PCH_USE_ADDRESS when mmap is present.
7181debfc3dSmrg Map SIZE bytes of FD+OFFSET at BASE. Return 1 if we succeeded at
7191debfc3dSmrg mapping the data at BASE, -1 if we couldn't.
7201debfc3dSmrg
7211debfc3dSmrg This version assumes that the kernel honors the START operand of mmap
7221debfc3dSmrg even without MAP_FIXED if START through START+SIZE are not currently
7231debfc3dSmrg mapped with something. */
7241debfc3dSmrg
7251debfc3dSmrg int
mmap_gt_pch_use_address(void * base,size_t size,int fd,size_t offset)7261debfc3dSmrg mmap_gt_pch_use_address (void *base, size_t size, int fd, size_t offset)
7271debfc3dSmrg {
7281debfc3dSmrg void *addr;
7291debfc3dSmrg
7301debfc3dSmrg /* We're called with size == 0 if we're not planning to load a PCH
7311debfc3dSmrg file at all. This allows the hook to free any static space that
7321debfc3dSmrg we might have allocated at link time. */
7331debfc3dSmrg if (size == 0)
7341debfc3dSmrg return -1;
7351debfc3dSmrg
7361debfc3dSmrg addr = mmap ((caddr_t) base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
7371debfc3dSmrg fd, offset);
7381debfc3dSmrg
7391debfc3dSmrg return addr == base ? 1 : -1;
7401debfc3dSmrg }
7411debfc3dSmrg #endif /* HAVE_MMAP_FILE */
7421debfc3dSmrg
7431debfc3dSmrg #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
7441debfc3dSmrg
7451debfc3dSmrg /* Modify the bound based on rlimits. */
7461debfc3dSmrg static double
ggc_rlimit_bound(double limit)7471debfc3dSmrg ggc_rlimit_bound (double limit)
7481debfc3dSmrg {
7491debfc3dSmrg #if defined(HAVE_GETRLIMIT)
7501debfc3dSmrg struct rlimit rlim;
7511debfc3dSmrg # if defined (RLIMIT_AS)
7521debfc3dSmrg /* RLIMIT_AS is what POSIX says is the limit on mmap. Presumably
7531debfc3dSmrg any OS which has RLIMIT_AS also has a working mmap that GCC will use. */
7541debfc3dSmrg if (getrlimit (RLIMIT_AS, &rlim) == 0
7551debfc3dSmrg && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
7561debfc3dSmrg && rlim.rlim_cur < limit)
7571debfc3dSmrg limit = rlim.rlim_cur;
7581debfc3dSmrg # elif defined (RLIMIT_DATA)
7591debfc3dSmrg /* ... but some older OSs bound mmap based on RLIMIT_DATA, or we
7601debfc3dSmrg might be on an OS that has a broken mmap. (Others don't bound
7611debfc3dSmrg mmap at all, apparently.) */
7621debfc3dSmrg if (getrlimit (RLIMIT_DATA, &rlim) == 0
7631debfc3dSmrg && rlim.rlim_cur != (rlim_t) RLIM_INFINITY
7641debfc3dSmrg && rlim.rlim_cur < limit
7651debfc3dSmrg /* Darwin has this horribly bogus default setting of
7661debfc3dSmrg RLIMIT_DATA, to 6144Kb. No-one notices because RLIMIT_DATA
7671debfc3dSmrg appears to be ignored. Ignore such silliness. If a limit
7681debfc3dSmrg this small was actually effective for mmap, GCC wouldn't even
7691debfc3dSmrg start up. */
7701debfc3dSmrg && rlim.rlim_cur >= 8 * 1024 * 1024)
7711debfc3dSmrg limit = rlim.rlim_cur;
7721debfc3dSmrg # endif /* RLIMIT_AS or RLIMIT_DATA */
7731debfc3dSmrg #endif /* HAVE_GETRLIMIT */
7741debfc3dSmrg
7751debfc3dSmrg return limit;
7761debfc3dSmrg }
7771debfc3dSmrg
7781debfc3dSmrg /* Heuristic to set a default for GGC_MIN_EXPAND. */
7791debfc3dSmrg static int
ggc_min_expand_heuristic(void)7801debfc3dSmrg ggc_min_expand_heuristic (void)
7811debfc3dSmrg {
7821debfc3dSmrg double min_expand = physmem_total ();
7831debfc3dSmrg
7841debfc3dSmrg /* Adjust for rlimits. */
7851debfc3dSmrg min_expand = ggc_rlimit_bound (min_expand);
7861debfc3dSmrg
7871debfc3dSmrg /* The heuristic is a percentage equal to 30% + 70%*(RAM/1GB), yielding
7881debfc3dSmrg a lower bound of 30% and an upper bound of 100% (when RAM >= 1GB). */
7891debfc3dSmrg min_expand /= 1024*1024*1024;
7901debfc3dSmrg min_expand *= 70;
7911debfc3dSmrg min_expand = MIN (min_expand, 70);
7921debfc3dSmrg min_expand += 30;
7931debfc3dSmrg
7941debfc3dSmrg return min_expand;
7951debfc3dSmrg }
7961debfc3dSmrg
7971debfc3dSmrg /* Heuristic to set a default for GGC_MIN_HEAPSIZE. */
7981debfc3dSmrg static int
ggc_min_heapsize_heuristic(void)7991debfc3dSmrg ggc_min_heapsize_heuristic (void)
8001debfc3dSmrg {
8011debfc3dSmrg double phys_kbytes = physmem_total ();
8021debfc3dSmrg double limit_kbytes = ggc_rlimit_bound (phys_kbytes * 2);
8031debfc3dSmrg
8041debfc3dSmrg phys_kbytes /= 1024; /* Convert to Kbytes. */
8051debfc3dSmrg limit_kbytes /= 1024;
8061debfc3dSmrg
8071debfc3dSmrg /* The heuristic is RAM/8, with a lower bound of 4M and an upper
8081debfc3dSmrg bound of 128M (when RAM >= 1GB). */
8091debfc3dSmrg phys_kbytes /= 8;
8101debfc3dSmrg
8111debfc3dSmrg #if defined(HAVE_GETRLIMIT) && defined (RLIMIT_RSS)
8121debfc3dSmrg /* Try not to overrun the RSS limit while doing garbage collection.
8131debfc3dSmrg The RSS limit is only advisory, so no margin is subtracted. */
8141debfc3dSmrg {
8151debfc3dSmrg struct rlimit rlim;
8161debfc3dSmrg if (getrlimit (RLIMIT_RSS, &rlim) == 0
8171debfc3dSmrg && rlim.rlim_cur != (rlim_t) RLIM_INFINITY)
8181debfc3dSmrg phys_kbytes = MIN (phys_kbytes, rlim.rlim_cur / 1024);
8191debfc3dSmrg }
8201debfc3dSmrg # endif
8211debfc3dSmrg
8221debfc3dSmrg /* Don't blindly run over our data limit; do GC at least when the
8231debfc3dSmrg *next* GC would be within 20Mb of the limit or within a quarter of
8241debfc3dSmrg the limit, whichever is larger. If GCC does hit the data limit,
8251debfc3dSmrg compilation will fail, so this tries to be conservative. */
8261debfc3dSmrg limit_kbytes = MAX (0, limit_kbytes - MAX (limit_kbytes / 4, 20 * 1024));
8271debfc3dSmrg limit_kbytes = (limit_kbytes * 100) / (110 + ggc_min_expand_heuristic ());
8281debfc3dSmrg phys_kbytes = MIN (phys_kbytes, limit_kbytes);
8291debfc3dSmrg
8301debfc3dSmrg phys_kbytes = MAX (phys_kbytes, 4 * 1024);
8311debfc3dSmrg phys_kbytes = MIN (phys_kbytes, 128 * 1024);
8321debfc3dSmrg
8331debfc3dSmrg return phys_kbytes;
8341debfc3dSmrg }
8351debfc3dSmrg #endif
8361debfc3dSmrg
8371debfc3dSmrg void
init_ggc_heuristics(void)8381debfc3dSmrg init_ggc_heuristics (void)
8391debfc3dSmrg {
8401debfc3dSmrg #if !defined ENABLE_GC_CHECKING && !defined ENABLE_GC_ALWAYS_COLLECT
841*8feb0f0bSmrg param_ggc_min_expand = ggc_min_expand_heuristic ();
842*8feb0f0bSmrg param_ggc_min_heapsize = ggc_min_heapsize_heuristic ();
8431debfc3dSmrg #endif
8441debfc3dSmrg }
8451debfc3dSmrg
8461debfc3dSmrg /* GGC memory usage. */
847*8feb0f0bSmrg class ggc_usage: public mem_usage
8481debfc3dSmrg {
849*8feb0f0bSmrg public:
8501debfc3dSmrg /* Default constructor. */
ggc_usage()8511debfc3dSmrg ggc_usage (): m_freed (0), m_collected (0), m_overhead (0) {}
8521debfc3dSmrg /* Constructor. */
ggc_usage(size_t allocated,size_t times,size_t peak,size_t freed,size_t collected,size_t overhead)8531debfc3dSmrg ggc_usage (size_t allocated, size_t times, size_t peak,
8541debfc3dSmrg size_t freed, size_t collected, size_t overhead)
8551debfc3dSmrg : mem_usage (allocated, times, peak),
8561debfc3dSmrg m_freed (freed), m_collected (collected), m_overhead (overhead) {}
8571debfc3dSmrg
858a2dc1f3fSmrg /* Equality operator. */
859a2dc1f3fSmrg inline bool
860a2dc1f3fSmrg operator== (const ggc_usage &second) const
861a2dc1f3fSmrg {
862a2dc1f3fSmrg return (get_balance () == second.get_balance ()
863a2dc1f3fSmrg && m_peak == second.m_peak
864a2dc1f3fSmrg && m_times == second.m_times);
865a2dc1f3fSmrg }
866a2dc1f3fSmrg
8671debfc3dSmrg /* Comparison operator. */
8681debfc3dSmrg inline bool
8691debfc3dSmrg operator< (const ggc_usage &second) const
8701debfc3dSmrg {
871a2dc1f3fSmrg if (*this == second)
872a2dc1f3fSmrg return false;
873a2dc1f3fSmrg
8741debfc3dSmrg return (get_balance () == second.get_balance () ?
8751debfc3dSmrg (m_peak == second.m_peak ? m_times < second.m_times
8761debfc3dSmrg : m_peak < second.m_peak)
8771debfc3dSmrg : get_balance () < second.get_balance ());
8781debfc3dSmrg }
8791debfc3dSmrg
8801debfc3dSmrg /* Register overhead of ALLOCATED and OVERHEAD bytes. */
8811debfc3dSmrg inline void
register_overhead(size_t allocated,size_t overhead)8821debfc3dSmrg register_overhead (size_t allocated, size_t overhead)
8831debfc3dSmrg {
8841debfc3dSmrg m_allocated += allocated;
8851debfc3dSmrg m_overhead += overhead;
8861debfc3dSmrg m_times++;
8871debfc3dSmrg }
8881debfc3dSmrg
8891debfc3dSmrg /* Release overhead of SIZE bytes. */
8901debfc3dSmrg inline void
release_overhead(size_t size)8911debfc3dSmrg release_overhead (size_t size)
8921debfc3dSmrg {
8931debfc3dSmrg m_freed += size;
8941debfc3dSmrg }
8951debfc3dSmrg
8961debfc3dSmrg /* Sum the usage with SECOND usage. */
8971debfc3dSmrg ggc_usage
8981debfc3dSmrg operator+ (const ggc_usage &second)
8991debfc3dSmrg {
9001debfc3dSmrg return ggc_usage (m_allocated + second.m_allocated,
9011debfc3dSmrg m_times + second.m_times,
9021debfc3dSmrg m_peak + second.m_peak,
9031debfc3dSmrg m_freed + second.m_freed,
9041debfc3dSmrg m_collected + second.m_collected,
9051debfc3dSmrg m_overhead + second.m_overhead);
9061debfc3dSmrg }
9071debfc3dSmrg
9081debfc3dSmrg /* Dump usage with PREFIX, where TOTAL is sum of all rows. */
9091debfc3dSmrg inline void
dump(const char * prefix,ggc_usage & total)9101debfc3dSmrg dump (const char *prefix, ggc_usage &total) const
9111debfc3dSmrg {
912c0a68be4Smrg size_t balance = get_balance ();
9131debfc3dSmrg fprintf (stderr,
914c0a68be4Smrg "%-48s " PRsa (9) ":%5.1f%%" PRsa (9) ":%5.1f%%"
915c0a68be4Smrg PRsa (9) ":%5.1f%%" PRsa (9) ":%5.1f%%" PRsa (9) "\n",
916*8feb0f0bSmrg prefix,
917*8feb0f0bSmrg SIZE_AMOUNT (balance), get_percent (balance, total.get_balance ()),
918*8feb0f0bSmrg SIZE_AMOUNT (m_collected),
9191debfc3dSmrg get_percent (m_collected, total.m_collected),
920c0a68be4Smrg SIZE_AMOUNT (m_freed), get_percent (m_freed, total.m_freed),
921c0a68be4Smrg SIZE_AMOUNT (m_overhead),
922c0a68be4Smrg get_percent (m_overhead, total.m_overhead),
923c0a68be4Smrg SIZE_AMOUNT (m_times));
9241debfc3dSmrg }
9251debfc3dSmrg
9261debfc3dSmrg /* Dump usage coupled to LOC location, where TOTAL is sum of all rows. */
9271debfc3dSmrg inline void
dump(mem_location * loc,ggc_usage & total)9281debfc3dSmrg dump (mem_location *loc, ggc_usage &total) const
9291debfc3dSmrg {
9301debfc3dSmrg char *location_string = loc->to_string ();
9311debfc3dSmrg
9321debfc3dSmrg dump (location_string, total);
9331debfc3dSmrg
9341debfc3dSmrg free (location_string);
9351debfc3dSmrg }
9361debfc3dSmrg
9371debfc3dSmrg /* Dump footer. */
9381debfc3dSmrg inline void
dump_footer()9391debfc3dSmrg dump_footer ()
9401debfc3dSmrg {
9411debfc3dSmrg dump ("Total", *this);
9421debfc3dSmrg }
9431debfc3dSmrg
9441debfc3dSmrg /* Get balance which is GGC allocation leak. */
945c0a68be4Smrg inline size_t
get_balance()9461debfc3dSmrg get_balance () const
9471debfc3dSmrg {
9481debfc3dSmrg return m_allocated + m_overhead - m_collected - m_freed;
9491debfc3dSmrg }
9501debfc3dSmrg
9511debfc3dSmrg typedef std::pair<mem_location *, ggc_usage *> mem_pair_t;
9521debfc3dSmrg
9531debfc3dSmrg /* Compare wrapper used by qsort method. */
9541debfc3dSmrg static int
compare(const void * first,const void * second)9551debfc3dSmrg compare (const void *first, const void *second)
9561debfc3dSmrg {
957*8feb0f0bSmrg const mem_pair_t mem1 = *(const mem_pair_t *) first;
958*8feb0f0bSmrg const mem_pair_t mem2 = *(const mem_pair_t *) second;
9591debfc3dSmrg
960*8feb0f0bSmrg size_t balance1 = mem1.second->get_balance ();
961*8feb0f0bSmrg size_t balance2 = mem2.second->get_balance ();
9621debfc3dSmrg
963*8feb0f0bSmrg return balance1 == balance2 ? 0 : (balance1 < balance2 ? 1 : -1);
9641debfc3dSmrg }
9651debfc3dSmrg
9661debfc3dSmrg /* Dump header with NAME. */
9671debfc3dSmrg static inline void
dump_header(const char * name)9681debfc3dSmrg dump_header (const char *name)
9691debfc3dSmrg {
970*8feb0f0bSmrg fprintf (stderr, "%-48s %11s%17s%17s%16s%17s\n", name, "Leak", "Garbage",
971*8feb0f0bSmrg "Freed", "Overhead", "Times");
9721debfc3dSmrg }
9731debfc3dSmrg
9741debfc3dSmrg /* Freed memory in bytes. */
9751debfc3dSmrg size_t m_freed;
9761debfc3dSmrg /* Collected memory in bytes. */
9771debfc3dSmrg size_t m_collected;
9781debfc3dSmrg /* Overhead memory in bytes. */
9791debfc3dSmrg size_t m_overhead;
9801debfc3dSmrg };
9811debfc3dSmrg
9821debfc3dSmrg /* GCC memory description. */
9831debfc3dSmrg static mem_alloc_description<ggc_usage> ggc_mem_desc;
9841debfc3dSmrg
9851debfc3dSmrg /* Dump per-site memory statistics. */
9861debfc3dSmrg
9871debfc3dSmrg void
dump_ggc_loc_statistics()988*8feb0f0bSmrg dump_ggc_loc_statistics ()
9891debfc3dSmrg {
9901debfc3dSmrg if (! GATHER_STATISTICS)
9911debfc3dSmrg return;
9921debfc3dSmrg
9931debfc3dSmrg ggc_force_collect = true;
9941debfc3dSmrg ggc_collect ();
9951debfc3dSmrg
996*8feb0f0bSmrg ggc_mem_desc.dump (GGC_ORIGIN);
9971debfc3dSmrg
9981debfc3dSmrg ggc_force_collect = false;
9991debfc3dSmrg }
10001debfc3dSmrg
10011debfc3dSmrg /* Record ALLOCATED and OVERHEAD bytes to descriptor NAME:LINE (FUNCTION). */
10021debfc3dSmrg void
ggc_record_overhead(size_t allocated,size_t overhead,void * ptr MEM_STAT_DECL)10031debfc3dSmrg ggc_record_overhead (size_t allocated, size_t overhead, void *ptr MEM_STAT_DECL)
10041debfc3dSmrg {
10051debfc3dSmrg ggc_usage *usage = ggc_mem_desc.register_descriptor (ptr, GGC_ORIGIN, false
10061debfc3dSmrg FINAL_PASS_MEM_STAT);
10071debfc3dSmrg
10081debfc3dSmrg ggc_mem_desc.register_object_overhead (usage, allocated + overhead, ptr);
10091debfc3dSmrg usage->register_overhead (allocated, overhead);
10101debfc3dSmrg }
10111debfc3dSmrg
10121debfc3dSmrg /* Notice that the pointer has been freed. */
10131debfc3dSmrg void
ggc_free_overhead(void * ptr)10141debfc3dSmrg ggc_free_overhead (void *ptr)
10151debfc3dSmrg {
10161debfc3dSmrg ggc_mem_desc.release_object_overhead (ptr);
10171debfc3dSmrg }
10181debfc3dSmrg
10191debfc3dSmrg /* After live values has been marked, walk all recorded pointers and see if
10201debfc3dSmrg they are still live. */
10211debfc3dSmrg void
ggc_prune_overhead_list(void)10221debfc3dSmrg ggc_prune_overhead_list (void)
10231debfc3dSmrg {
10241debfc3dSmrg typedef hash_map<const void *, std::pair<ggc_usage *, size_t > > map_t;
10251debfc3dSmrg
10261debfc3dSmrg map_t::iterator it = ggc_mem_desc.m_reverse_object_map->begin ();
10271debfc3dSmrg
10281debfc3dSmrg for (; it != ggc_mem_desc.m_reverse_object_map->end (); ++it)
10291debfc3dSmrg if (!ggc_marked_p ((*it).first))
1030*8feb0f0bSmrg {
10311debfc3dSmrg (*it).second.first->m_collected += (*it).second.second;
1032*8feb0f0bSmrg ggc_mem_desc.m_reverse_object_map->remove ((*it).first);
1033*8feb0f0bSmrg }
1034*8feb0f0bSmrg }
10351debfc3dSmrg
1036*8feb0f0bSmrg /* Return memory used by heap in kb, 0 if this info is not available. */
1037*8feb0f0bSmrg
1038*8feb0f0bSmrg void
report_heap_memory_use()1039*8feb0f0bSmrg report_heap_memory_use ()
1040*8feb0f0bSmrg {
1041*8feb0f0bSmrg #ifdef HAVE_MALLINFO
1042*8feb0f0bSmrg if (!quiet_flag)
1043*8feb0f0bSmrg fprintf (stderr," {heap %luk}", (unsigned long)(mallinfo().arena / 1024));
1044*8feb0f0bSmrg #endif
10451debfc3dSmrg }
1046