xref: /dflybsd-src/contrib/gcc-4.7/gcc/ggc-page.c (revision 81fc95a5293ee307c688a350a3feb4734aaddbb4)
1e4b17023SJohn Marino /* "Bag-of-pages" garbage collector for the GNU compiler.
2e4b17023SJohn Marino    Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009,
3e4b17023SJohn Marino    2010, 2011 Free Software Foundation, Inc.
4e4b17023SJohn Marino 
5e4b17023SJohn Marino This file is part of GCC.
6e4b17023SJohn Marino 
7e4b17023SJohn Marino GCC is free software; you can redistribute it and/or modify it under
8e4b17023SJohn Marino the terms of the GNU General Public License as published by the Free
9e4b17023SJohn Marino Software Foundation; either version 3, or (at your option) any later
10e4b17023SJohn Marino version.
11e4b17023SJohn Marino 
12e4b17023SJohn Marino GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13e4b17023SJohn Marino WARRANTY; without even the implied warranty of MERCHANTABILITY or
14e4b17023SJohn Marino FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15e4b17023SJohn Marino for more details.
16e4b17023SJohn Marino 
17e4b17023SJohn Marino You should have received a copy of the GNU General Public License
18e4b17023SJohn Marino along with GCC; see the file COPYING3.  If not see
19e4b17023SJohn Marino <http://www.gnu.org/licenses/>.  */
20e4b17023SJohn Marino 
21e4b17023SJohn Marino #include "config.h"
22e4b17023SJohn Marino #include "system.h"
23e4b17023SJohn Marino #include "coretypes.h"
24e4b17023SJohn Marino #include "tm.h"
25e4b17023SJohn Marino #include "tree.h"
26e4b17023SJohn Marino #include "rtl.h"
27e4b17023SJohn Marino #include "tm_p.h"
28e4b17023SJohn Marino #include "diagnostic-core.h"
29e4b17023SJohn Marino #include "flags.h"
30e4b17023SJohn Marino #include "ggc.h"
31e4b17023SJohn Marino #include "ggc-internal.h"
32e4b17023SJohn Marino #include "timevar.h"
33e4b17023SJohn Marino #include "params.h"
34e4b17023SJohn Marino #include "tree-flow.h"
35e4b17023SJohn Marino #include "cfgloop.h"
36e4b17023SJohn Marino #include "plugin.h"
37e4b17023SJohn Marino 
38e4b17023SJohn Marino /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
39e4b17023SJohn Marino    file open.  Prefer either to valloc.  */
40e4b17023SJohn Marino #ifdef HAVE_MMAP_ANON
41e4b17023SJohn Marino # undef HAVE_MMAP_DEV_ZERO
42e4b17023SJohn Marino # define USING_MMAP
43e4b17023SJohn Marino #endif
44e4b17023SJohn Marino 
45e4b17023SJohn Marino #ifdef HAVE_MMAP_DEV_ZERO
46e4b17023SJohn Marino # define USING_MMAP
47e4b17023SJohn Marino #endif
48e4b17023SJohn Marino 
49e4b17023SJohn Marino #ifndef USING_MMAP
50e4b17023SJohn Marino #define USING_MALLOC_PAGE_GROUPS
51e4b17023SJohn Marino #endif
52e4b17023SJohn Marino 
53e4b17023SJohn Marino #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
54e4b17023SJohn Marino     && defined(USING_MMAP)
55e4b17023SJohn Marino # define USING_MADVISE
56e4b17023SJohn Marino #endif
57e4b17023SJohn Marino 
58e4b17023SJohn Marino /* Strategy:
59e4b17023SJohn Marino 
60e4b17023SJohn Marino    This garbage-collecting allocator allocates objects on one of a set
61e4b17023SJohn Marino    of pages.  Each page can allocate objects of a single size only;
62e4b17023SJohn Marino    available sizes are powers of two starting at four bytes.  The size
63e4b17023SJohn Marino    of an allocation request is rounded up to the next power of two
64e4b17023SJohn Marino    (`order'), and satisfied from the appropriate page.
65e4b17023SJohn Marino 
66e4b17023SJohn Marino    Each page is recorded in a page-entry, which also maintains an
67e4b17023SJohn Marino    in-use bitmap of object positions on the page.  This allows the
68e4b17023SJohn Marino    allocation state of a particular object to be flipped without
69e4b17023SJohn Marino    touching the page itself.
70e4b17023SJohn Marino 
71e4b17023SJohn Marino    Each page-entry also has a context depth, which is used to track
72e4b17023SJohn Marino    pushing and popping of allocation contexts.  Only objects allocated
73e4b17023SJohn Marino    in the current (highest-numbered) context may be collected.
74e4b17023SJohn Marino 
75e4b17023SJohn Marino    Page entries are arranged in an array of singly-linked lists.  The
76e4b17023SJohn Marino    array is indexed by the allocation size, in bits, of the pages on
77e4b17023SJohn Marino    it; i.e. all pages on a list allocate objects of the same size.
78e4b17023SJohn Marino    Pages are ordered on the list such that all non-full pages precede
79e4b17023SJohn Marino    all full pages, with non-full pages arranged in order of decreasing
80e4b17023SJohn Marino    context depth.
81e4b17023SJohn Marino 
82e4b17023SJohn Marino    Empty pages (of all orders) are kept on a single page cache list,
83e4b17023SJohn Marino    and are considered first when new pages are required; they are
84e4b17023SJohn Marino    deallocated at the start of the next collection if they haven't
85e4b17023SJohn Marino    been recycled by then.  */
86e4b17023SJohn Marino 
87e4b17023SJohn Marino /* Define GGC_DEBUG_LEVEL to print debugging information.
88e4b17023SJohn Marino      0: No debugging output.
89e4b17023SJohn Marino      1: GC statistics only.
90e4b17023SJohn Marino      2: Page-entry allocations/deallocations as well.
91e4b17023SJohn Marino      3: Object allocations as well.
92e4b17023SJohn Marino      4: Object marks as well.  */
93e4b17023SJohn Marino #define GGC_DEBUG_LEVEL (0)
94e4b17023SJohn Marino 
95e4b17023SJohn Marino #ifndef HOST_BITS_PER_PTR
96e4b17023SJohn Marino #define HOST_BITS_PER_PTR  HOST_BITS_PER_LONG
97e4b17023SJohn Marino #endif
98e4b17023SJohn Marino 
99e4b17023SJohn Marino 
100e4b17023SJohn Marino /* A two-level tree is used to look up the page-entry for a given
101e4b17023SJohn Marino    pointer.  Two chunks of the pointer's bits are extracted to index
102e4b17023SJohn Marino    the first and second levels of the tree, as follows:
103e4b17023SJohn Marino 
104e4b17023SJohn Marino 				   HOST_PAGE_SIZE_BITS
105e4b17023SJohn Marino 			   32		|      |
106e4b17023SJohn Marino        msb +----------------+----+------+------+ lsb
107e4b17023SJohn Marino 			    |    |      |
108e4b17023SJohn Marino 			 PAGE_L1_BITS   |
109e4b17023SJohn Marino 				 |      |
110e4b17023SJohn Marino 			       PAGE_L2_BITS
111e4b17023SJohn Marino 
112e4b17023SJohn Marino    The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
113e4b17023SJohn Marino    pages are aligned on system page boundaries.  The next most
114e4b17023SJohn Marino    significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
115e4b17023SJohn Marino    index values in the lookup table, respectively.
116e4b17023SJohn Marino 
117e4b17023SJohn Marino    For 32-bit architectures and the settings below, there are no
118e4b17023SJohn Marino    leftover bits.  For architectures with wider pointers, the lookup
119e4b17023SJohn Marino    tree points to a list of pages, which must be scanned to find the
120e4b17023SJohn Marino    correct one.  */
121e4b17023SJohn Marino 
122e4b17023SJohn Marino #define PAGE_L1_BITS	(8)
123e4b17023SJohn Marino #define PAGE_L2_BITS	(32 - PAGE_L1_BITS - G.lg_pagesize)
124e4b17023SJohn Marino #define PAGE_L1_SIZE	((size_t) 1 << PAGE_L1_BITS)
125e4b17023SJohn Marino #define PAGE_L2_SIZE	((size_t) 1 << PAGE_L2_BITS)
126e4b17023SJohn Marino 
127e4b17023SJohn Marino #define LOOKUP_L1(p) \
128e4b17023SJohn Marino   (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
129e4b17023SJohn Marino 
130e4b17023SJohn Marino #define LOOKUP_L2(p) \
131e4b17023SJohn Marino   (((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
132e4b17023SJohn Marino 
133e4b17023SJohn Marino /* The number of objects per allocation page, for objects on a page of
134e4b17023SJohn Marino    the indicated ORDER.  */
135e4b17023SJohn Marino #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
136e4b17023SJohn Marino 
137e4b17023SJohn Marino /* The number of objects in P.  */
138e4b17023SJohn Marino #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
139e4b17023SJohn Marino 
140e4b17023SJohn Marino /* The size of an object on a page of the indicated ORDER.  */
141e4b17023SJohn Marino #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
142e4b17023SJohn Marino 
143e4b17023SJohn Marino /* For speed, we avoid doing a general integer divide to locate the
144e4b17023SJohn Marino    offset in the allocation bitmap, by precalculating numbers M, S
145e4b17023SJohn Marino    such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
146e4b17023SJohn Marino    within the page which is evenly divisible by the object size Z.  */
147e4b17023SJohn Marino #define DIV_MULT(ORDER) inverse_table[ORDER].mult
148e4b17023SJohn Marino #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
149e4b17023SJohn Marino #define OFFSET_TO_BIT(OFFSET, ORDER) \
150e4b17023SJohn Marino   (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
151e4b17023SJohn Marino 
152e4b17023SJohn Marino /* We use this structure to determine the alignment required for
153e4b17023SJohn Marino    allocations.  For power-of-two sized allocations, that's not a
154e4b17023SJohn Marino    problem, but it does matter for odd-sized allocations.
155e4b17023SJohn Marino    We do not care about alignment for floating-point types.  */
156e4b17023SJohn Marino 
157e4b17023SJohn Marino struct max_alignment {
158e4b17023SJohn Marino   char c;
159e4b17023SJohn Marino   union {
160e4b17023SJohn Marino     HOST_WIDEST_INT i;
161e4b17023SJohn Marino     void *p;
162e4b17023SJohn Marino   } u;
163e4b17023SJohn Marino };
164e4b17023SJohn Marino 
165e4b17023SJohn Marino /* The biggest alignment required.  */
166e4b17023SJohn Marino 
167e4b17023SJohn Marino #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
168e4b17023SJohn Marino 
169e4b17023SJohn Marino 
170e4b17023SJohn Marino /* The number of extra orders, not corresponding to power-of-two sized
171e4b17023SJohn Marino    objects.  */
172e4b17023SJohn Marino 
173e4b17023SJohn Marino #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
174e4b17023SJohn Marino 
175e4b17023SJohn Marino #define RTL_SIZE(NSLOTS) \
176e4b17023SJohn Marino   (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
177e4b17023SJohn Marino 
178e4b17023SJohn Marino #define TREE_EXP_SIZE(OPS) \
179e4b17023SJohn Marino   (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
180e4b17023SJohn Marino 
181e4b17023SJohn Marino /* The Ith entry is the maximum size of an object to be stored in the
182e4b17023SJohn Marino    Ith extra order.  Adding a new entry to this array is the *only*
183e4b17023SJohn Marino    thing you need to do to add a new special allocation size.  */
184e4b17023SJohn Marino 
185e4b17023SJohn Marino static const size_t extra_order_size_table[] = {
186e4b17023SJohn Marino   /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
187e4b17023SJohn Marino      There are a lot of structures with these sizes and explicitly
188e4b17023SJohn Marino      listing them risks orders being dropped because they changed size.  */
189e4b17023SJohn Marino   MAX_ALIGNMENT * 3,
190e4b17023SJohn Marino   MAX_ALIGNMENT * 5,
191e4b17023SJohn Marino   MAX_ALIGNMENT * 6,
192e4b17023SJohn Marino   MAX_ALIGNMENT * 7,
193e4b17023SJohn Marino   MAX_ALIGNMENT * 9,
194e4b17023SJohn Marino   MAX_ALIGNMENT * 10,
195e4b17023SJohn Marino   MAX_ALIGNMENT * 11,
196e4b17023SJohn Marino   MAX_ALIGNMENT * 12,
197e4b17023SJohn Marino   MAX_ALIGNMENT * 13,
198e4b17023SJohn Marino   MAX_ALIGNMENT * 14,
199e4b17023SJohn Marino   MAX_ALIGNMENT * 15,
200e4b17023SJohn Marino   sizeof (struct tree_decl_non_common),
201e4b17023SJohn Marino   sizeof (struct tree_field_decl),
202e4b17023SJohn Marino   sizeof (struct tree_parm_decl),
203e4b17023SJohn Marino   sizeof (struct tree_var_decl),
204e4b17023SJohn Marino   sizeof (struct tree_type_non_common),
205e4b17023SJohn Marino   sizeof (struct function),
206e4b17023SJohn Marino   sizeof (struct basic_block_def),
207e4b17023SJohn Marino   sizeof (struct cgraph_node),
208e4b17023SJohn Marino   sizeof (struct loop),
209e4b17023SJohn Marino };
210e4b17023SJohn Marino 
211e4b17023SJohn Marino /* The total number of orders.  */
212e4b17023SJohn Marino 
213e4b17023SJohn Marino #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
214e4b17023SJohn Marino 
215e4b17023SJohn Marino /* Compute the smallest nonnegative number which when added to X gives
216e4b17023SJohn Marino    a multiple of F.  */
217e4b17023SJohn Marino 
218e4b17023SJohn Marino #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
219e4b17023SJohn Marino 
220e4b17023SJohn Marino /* Compute the smallest multiple of F that is >= X.  */
221e4b17023SJohn Marino 
222e4b17023SJohn Marino #define ROUND_UP(x, f) (CEIL (x, f) * (f))
223e4b17023SJohn Marino 
224e4b17023SJohn Marino /* Round X to next multiple of the page size */
225e4b17023SJohn Marino 
226e4b17023SJohn Marino #define PAGE_ALIGN(x) (((x) + G.pagesize - 1) & ~(G.pagesize - 1))
227e4b17023SJohn Marino 
228e4b17023SJohn Marino /* The Ith entry is the number of objects on a page or order I.  */
229e4b17023SJohn Marino 
230e4b17023SJohn Marino static unsigned objects_per_page_table[NUM_ORDERS];
231e4b17023SJohn Marino 
232e4b17023SJohn Marino /* The Ith entry is the size of an object on a page of order I.  */
233e4b17023SJohn Marino 
234e4b17023SJohn Marino static size_t object_size_table[NUM_ORDERS];
235e4b17023SJohn Marino 
236e4b17023SJohn Marino /* The Ith entry is a pair of numbers (mult, shift) such that
237e4b17023SJohn Marino    ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
238e4b17023SJohn Marino    for all k evenly divisible by OBJECT_SIZE(I).  */
239e4b17023SJohn Marino 
240e4b17023SJohn Marino static struct
241e4b17023SJohn Marino {
242e4b17023SJohn Marino   size_t mult;
243e4b17023SJohn Marino   unsigned int shift;
244e4b17023SJohn Marino }
245e4b17023SJohn Marino inverse_table[NUM_ORDERS];
246e4b17023SJohn Marino 
247e4b17023SJohn Marino /* A page_entry records the status of an allocation page.  This
248e4b17023SJohn Marino    structure is dynamically sized to fit the bitmap in_use_p.  */
249e4b17023SJohn Marino typedef struct page_entry
250e4b17023SJohn Marino {
251e4b17023SJohn Marino   /* The next page-entry with objects of the same size, or NULL if
252e4b17023SJohn Marino      this is the last page-entry.  */
253e4b17023SJohn Marino   struct page_entry *next;
254e4b17023SJohn Marino 
255e4b17023SJohn Marino   /* The previous page-entry with objects of the same size, or NULL if
256e4b17023SJohn Marino      this is the first page-entry.   The PREV pointer exists solely to
257e4b17023SJohn Marino      keep the cost of ggc_free manageable.  */
258e4b17023SJohn Marino   struct page_entry *prev;
259e4b17023SJohn Marino 
260e4b17023SJohn Marino   /* The number of bytes allocated.  (This will always be a multiple
261e4b17023SJohn Marino      of the host system page size.)  */
262e4b17023SJohn Marino   size_t bytes;
263e4b17023SJohn Marino 
264e4b17023SJohn Marino   /* The address at which the memory is allocated.  */
265e4b17023SJohn Marino   char *page;
266e4b17023SJohn Marino 
267e4b17023SJohn Marino #ifdef USING_MALLOC_PAGE_GROUPS
268e4b17023SJohn Marino   /* Back pointer to the page group this page came from.  */
269e4b17023SJohn Marino   struct page_group *group;
270e4b17023SJohn Marino #endif
271e4b17023SJohn Marino 
272e4b17023SJohn Marino   /* This is the index in the by_depth varray where this page table
273e4b17023SJohn Marino      can be found.  */
274e4b17023SJohn Marino   unsigned long index_by_depth;
275e4b17023SJohn Marino 
276e4b17023SJohn Marino   /* Context depth of this page.  */
277e4b17023SJohn Marino   unsigned short context_depth;
278e4b17023SJohn Marino 
279e4b17023SJohn Marino   /* The number of free objects remaining on this page.  */
280e4b17023SJohn Marino   unsigned short num_free_objects;
281e4b17023SJohn Marino 
282e4b17023SJohn Marino   /* A likely candidate for the bit position of a free object for the
283e4b17023SJohn Marino      next allocation from this page.  */
284e4b17023SJohn Marino   unsigned short next_bit_hint;
285e4b17023SJohn Marino 
286e4b17023SJohn Marino   /* The lg of size of objects allocated from this page.  */
287e4b17023SJohn Marino   unsigned char order;
288e4b17023SJohn Marino 
289e4b17023SJohn Marino   /* Discarded page? */
290e4b17023SJohn Marino   bool discarded;
291e4b17023SJohn Marino 
292e4b17023SJohn Marino   /* A bit vector indicating whether or not objects are in use.  The
293e4b17023SJohn Marino      Nth bit is one if the Nth object on this page is allocated.  This
294e4b17023SJohn Marino      array is dynamically sized.  */
295e4b17023SJohn Marino   unsigned long in_use_p[1];
296e4b17023SJohn Marino } page_entry;
297e4b17023SJohn Marino 
298e4b17023SJohn Marino #ifdef USING_MALLOC_PAGE_GROUPS
299e4b17023SJohn Marino /* A page_group describes a large allocation from malloc, from which
300e4b17023SJohn Marino    we parcel out aligned pages.  */
301e4b17023SJohn Marino typedef struct page_group
302e4b17023SJohn Marino {
303e4b17023SJohn Marino   /* A linked list of all extant page groups.  */
304e4b17023SJohn Marino   struct page_group *next;
305e4b17023SJohn Marino 
306e4b17023SJohn Marino   /* The address we received from malloc.  */
307e4b17023SJohn Marino   char *allocation;
308e4b17023SJohn Marino 
309e4b17023SJohn Marino   /* The size of the block.  */
310e4b17023SJohn Marino   size_t alloc_size;
311e4b17023SJohn Marino 
312e4b17023SJohn Marino   /* A bitmask of pages in use.  */
313e4b17023SJohn Marino   unsigned int in_use;
314e4b17023SJohn Marino } page_group;
315e4b17023SJohn Marino #endif
316e4b17023SJohn Marino 
317e4b17023SJohn Marino #if HOST_BITS_PER_PTR <= 32
318e4b17023SJohn Marino 
319e4b17023SJohn Marino /* On 32-bit hosts, we use a two level page table, as pictured above.  */
320e4b17023SJohn Marino typedef page_entry **page_table[PAGE_L1_SIZE];
321e4b17023SJohn Marino 
322e4b17023SJohn Marino #else
323e4b17023SJohn Marino 
324e4b17023SJohn Marino /* On 64-bit hosts, we use the same two level page tables plus a linked
325e4b17023SJohn Marino    list that disambiguates the top 32-bits.  There will almost always be
326e4b17023SJohn Marino    exactly one entry in the list.  */
327e4b17023SJohn Marino typedef struct page_table_chain
328e4b17023SJohn Marino {
329e4b17023SJohn Marino   struct page_table_chain *next;
330e4b17023SJohn Marino   size_t high_bits;
331e4b17023SJohn Marino   page_entry **table[PAGE_L1_SIZE];
332e4b17023SJohn Marino } *page_table;
333e4b17023SJohn Marino 
334e4b17023SJohn Marino #endif
335e4b17023SJohn Marino 
336e4b17023SJohn Marino #ifdef ENABLE_GC_ALWAYS_COLLECT
337e4b17023SJohn Marino /* List of free objects to be verified as actually free on the
338e4b17023SJohn Marino    next collection.  */
339e4b17023SJohn Marino struct free_object
340e4b17023SJohn Marino {
341e4b17023SJohn Marino   void *object;
342e4b17023SJohn Marino   struct free_object *next;
343e4b17023SJohn Marino };
344e4b17023SJohn Marino #endif
345e4b17023SJohn Marino 
346e4b17023SJohn Marino /* The rest of the global variables.  */
347e4b17023SJohn Marino static struct globals
348e4b17023SJohn Marino {
349e4b17023SJohn Marino   /* The Nth element in this array is a page with objects of size 2^N.
350e4b17023SJohn Marino      If there are any pages with free objects, they will be at the
351e4b17023SJohn Marino      head of the list.  NULL if there are no page-entries for this
352e4b17023SJohn Marino      object size.  */
353e4b17023SJohn Marino   page_entry *pages[NUM_ORDERS];
354e4b17023SJohn Marino 
355e4b17023SJohn Marino   /* The Nth element in this array is the last page with objects of
356e4b17023SJohn Marino      size 2^N.  NULL if there are no page-entries for this object
357e4b17023SJohn Marino      size.  */
358e4b17023SJohn Marino   page_entry *page_tails[NUM_ORDERS];
359e4b17023SJohn Marino 
360e4b17023SJohn Marino   /* Lookup table for associating allocation pages with object addresses.  */
361e4b17023SJohn Marino   page_table lookup;
362e4b17023SJohn Marino 
363e4b17023SJohn Marino   /* The system's page size.  */
364e4b17023SJohn Marino   size_t pagesize;
365e4b17023SJohn Marino   size_t lg_pagesize;
366e4b17023SJohn Marino 
367e4b17023SJohn Marino   /* Bytes currently allocated.  */
368e4b17023SJohn Marino   size_t allocated;
369e4b17023SJohn Marino 
370e4b17023SJohn Marino   /* Bytes currently allocated at the end of the last collection.  */
371e4b17023SJohn Marino   size_t allocated_last_gc;
372e4b17023SJohn Marino 
373e4b17023SJohn Marino   /* Total amount of memory mapped.  */
374e4b17023SJohn Marino   size_t bytes_mapped;
375e4b17023SJohn Marino 
376e4b17023SJohn Marino   /* Bit N set if any allocations have been done at context depth N.  */
377e4b17023SJohn Marino   unsigned long context_depth_allocations;
378e4b17023SJohn Marino 
379e4b17023SJohn Marino   /* Bit N set if any collections have been done at context depth N.  */
380e4b17023SJohn Marino   unsigned long context_depth_collections;
381e4b17023SJohn Marino 
382e4b17023SJohn Marino   /* The current depth in the context stack.  */
383e4b17023SJohn Marino   unsigned short context_depth;
384e4b17023SJohn Marino 
385e4b17023SJohn Marino   /* A file descriptor open to /dev/zero for reading.  */
386e4b17023SJohn Marino #if defined (HAVE_MMAP_DEV_ZERO)
387e4b17023SJohn Marino   int dev_zero_fd;
388e4b17023SJohn Marino #endif
389e4b17023SJohn Marino 
390e4b17023SJohn Marino   /* A cache of free system pages.  */
391e4b17023SJohn Marino   page_entry *free_pages;
392e4b17023SJohn Marino 
393e4b17023SJohn Marino #ifdef USING_MALLOC_PAGE_GROUPS
394e4b17023SJohn Marino   page_group *page_groups;
395e4b17023SJohn Marino #endif
396e4b17023SJohn Marino 
397e4b17023SJohn Marino   /* The file descriptor for debugging output.  */
398e4b17023SJohn Marino   FILE *debug_file;
399e4b17023SJohn Marino 
400e4b17023SJohn Marino   /* Current number of elements in use in depth below.  */
401e4b17023SJohn Marino   unsigned int depth_in_use;
402e4b17023SJohn Marino 
403e4b17023SJohn Marino   /* Maximum number of elements that can be used before resizing.  */
404e4b17023SJohn Marino   unsigned int depth_max;
405e4b17023SJohn Marino 
406e4b17023SJohn Marino   /* Each element of this array is an index in by_depth where the given
407e4b17023SJohn Marino      depth starts.  This structure is indexed by that given depth we
408e4b17023SJohn Marino      are interested in.  */
409e4b17023SJohn Marino   unsigned int *depth;
410e4b17023SJohn Marino 
411e4b17023SJohn Marino   /* Current number of elements in use in by_depth below.  */
412e4b17023SJohn Marino   unsigned int by_depth_in_use;
413e4b17023SJohn Marino 
414e4b17023SJohn Marino   /* Maximum number of elements that can be used before resizing.  */
415e4b17023SJohn Marino   unsigned int by_depth_max;
416e4b17023SJohn Marino 
417e4b17023SJohn Marino   /* Each element of this array is a pointer to a page_entry, all
418e4b17023SJohn Marino      page_entries can be found in here by increasing depth.
419e4b17023SJohn Marino      index_by_depth in the page_entry is the index into this data
420e4b17023SJohn Marino      structure where that page_entry can be found.  This is used to
421e4b17023SJohn Marino      speed up finding all page_entries at a particular depth.  */
422e4b17023SJohn Marino   page_entry **by_depth;
423e4b17023SJohn Marino 
424e4b17023SJohn Marino   /* Each element is a pointer to the saved in_use_p bits, if any,
425e4b17023SJohn Marino      zero otherwise.  We allocate them all together, to enable a
426e4b17023SJohn Marino      better runtime data access pattern.  */
427e4b17023SJohn Marino   unsigned long **save_in_use;
428e4b17023SJohn Marino 
429e4b17023SJohn Marino #ifdef ENABLE_GC_ALWAYS_COLLECT
430e4b17023SJohn Marino   /* List of free objects to be verified as actually free on the
431e4b17023SJohn Marino      next collection.  */
432e4b17023SJohn Marino   struct free_object *free_object_list;
433e4b17023SJohn Marino #endif
434e4b17023SJohn Marino 
435e4b17023SJohn Marino #ifdef GATHER_STATISTICS
436e4b17023SJohn Marino   struct
437e4b17023SJohn Marino   {
438e4b17023SJohn Marino     /* Total GC-allocated memory.  */
439e4b17023SJohn Marino     unsigned long long total_allocated;
440e4b17023SJohn Marino     /* Total overhead for GC-allocated memory.  */
441e4b17023SJohn Marino     unsigned long long total_overhead;
442e4b17023SJohn Marino 
443e4b17023SJohn Marino     /* Total allocations and overhead for sizes less than 32, 64 and 128.
444e4b17023SJohn Marino        These sizes are interesting because they are typical cache line
445e4b17023SJohn Marino        sizes.  */
446e4b17023SJohn Marino 
447e4b17023SJohn Marino     unsigned long long total_allocated_under32;
448e4b17023SJohn Marino     unsigned long long total_overhead_under32;
449e4b17023SJohn Marino 
450e4b17023SJohn Marino     unsigned long long total_allocated_under64;
451e4b17023SJohn Marino     unsigned long long total_overhead_under64;
452e4b17023SJohn Marino 
453e4b17023SJohn Marino     unsigned long long total_allocated_under128;
454e4b17023SJohn Marino     unsigned long long total_overhead_under128;
455e4b17023SJohn Marino 
456e4b17023SJohn Marino     /* The allocations for each of the allocation orders.  */
457e4b17023SJohn Marino     unsigned long long total_allocated_per_order[NUM_ORDERS];
458e4b17023SJohn Marino 
459e4b17023SJohn Marino     /* The overhead for each of the allocation orders.  */
460e4b17023SJohn Marino     unsigned long long total_overhead_per_order[NUM_ORDERS];
461e4b17023SJohn Marino   } stats;
462e4b17023SJohn Marino #endif
463e4b17023SJohn Marino } G;
464e4b17023SJohn Marino 
465e4b17023SJohn Marino /* The size in bytes required to maintain a bitmap for the objects
466e4b17023SJohn Marino    on a page-entry.  */
467e4b17023SJohn Marino #define BITMAP_SIZE(Num_objects) \
468e4b17023SJohn Marino   (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof(long))
469e4b17023SJohn Marino 
470e4b17023SJohn Marino /* Allocate pages in chunks of this size, to throttle calls to memory
471e4b17023SJohn Marino    allocation routines.  The first page is used, the rest go onto the
472e4b17023SJohn Marino    free list.  This cannot be larger than HOST_BITS_PER_INT for the
473e4b17023SJohn Marino    in_use bitmask for page_group.  Hosts that need a different value
474e4b17023SJohn Marino    can override this by defining GGC_QUIRE_SIZE explicitly.  */
475e4b17023SJohn Marino #ifndef GGC_QUIRE_SIZE
476e4b17023SJohn Marino # ifdef USING_MMAP
477e4b17023SJohn Marino #  define GGC_QUIRE_SIZE 512	/* 2MB for 4K pages */
478e4b17023SJohn Marino # else
479e4b17023SJohn Marino #  define GGC_QUIRE_SIZE 16
480e4b17023SJohn Marino # endif
481e4b17023SJohn Marino #endif
482e4b17023SJohn Marino 
483e4b17023SJohn Marino /* Initial guess as to how many page table entries we might need.  */
484e4b17023SJohn Marino #define INITIAL_PTE_COUNT 128
485e4b17023SJohn Marino 
486e4b17023SJohn Marino static int ggc_allocated_p (const void *);
487e4b17023SJohn Marino static page_entry *lookup_page_table_entry (const void *);
488e4b17023SJohn Marino static void set_page_table_entry (void *, page_entry *);
489e4b17023SJohn Marino #ifdef USING_MMAP
490e4b17023SJohn Marino static char *alloc_anon (char *, size_t, bool check);
491e4b17023SJohn Marino #endif
492e4b17023SJohn Marino #ifdef USING_MALLOC_PAGE_GROUPS
493e4b17023SJohn Marino static size_t page_group_index (char *, char *);
494e4b17023SJohn Marino static void set_page_group_in_use (page_group *, char *);
495e4b17023SJohn Marino static void clear_page_group_in_use (page_group *, char *);
496e4b17023SJohn Marino #endif
497e4b17023SJohn Marino static struct page_entry * alloc_page (unsigned);
498e4b17023SJohn Marino static void free_page (struct page_entry *);
499e4b17023SJohn Marino static void release_pages (void);
500e4b17023SJohn Marino static void clear_marks (void);
501e4b17023SJohn Marino static void sweep_pages (void);
502e4b17023SJohn Marino static void ggc_recalculate_in_use_p (page_entry *);
503e4b17023SJohn Marino static void compute_inverse (unsigned);
504e4b17023SJohn Marino static inline void adjust_depth (void);
505e4b17023SJohn Marino static void move_ptes_to_front (int, int);
506e4b17023SJohn Marino 
507e4b17023SJohn Marino void debug_print_page_list (int);
508e4b17023SJohn Marino static void push_depth (unsigned int);
509e4b17023SJohn Marino static void push_by_depth (page_entry *, unsigned long *);
510e4b17023SJohn Marino 
511e4b17023SJohn Marino /* Push an entry onto G.depth.  */
512e4b17023SJohn Marino 
513e4b17023SJohn Marino inline static void
push_depth(unsigned int i)514e4b17023SJohn Marino push_depth (unsigned int i)
515e4b17023SJohn Marino {
516e4b17023SJohn Marino   if (G.depth_in_use >= G.depth_max)
517e4b17023SJohn Marino     {
518e4b17023SJohn Marino       G.depth_max *= 2;
519e4b17023SJohn Marino       G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
520e4b17023SJohn Marino     }
521e4b17023SJohn Marino   G.depth[G.depth_in_use++] = i;
522e4b17023SJohn Marino }
523e4b17023SJohn Marino 
524e4b17023SJohn Marino /* Push an entry onto G.by_depth and G.save_in_use.  */
525e4b17023SJohn Marino 
526e4b17023SJohn Marino inline static void
push_by_depth(page_entry * p,unsigned long * s)527e4b17023SJohn Marino push_by_depth (page_entry *p, unsigned long *s)
528e4b17023SJohn Marino {
529e4b17023SJohn Marino   if (G.by_depth_in_use >= G.by_depth_max)
530e4b17023SJohn Marino     {
531e4b17023SJohn Marino       G.by_depth_max *= 2;
532e4b17023SJohn Marino       G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
533e4b17023SJohn Marino       G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
534e4b17023SJohn Marino 				  G.by_depth_max);
535e4b17023SJohn Marino     }
536e4b17023SJohn Marino   G.by_depth[G.by_depth_in_use] = p;
537e4b17023SJohn Marino   G.save_in_use[G.by_depth_in_use++] = s;
538e4b17023SJohn Marino }
539e4b17023SJohn Marino 
540e4b17023SJohn Marino #if (GCC_VERSION < 3001)
541e4b17023SJohn Marino #define prefetch(X) ((void) X)
542e4b17023SJohn Marino #else
543e4b17023SJohn Marino #define prefetch(X) __builtin_prefetch (X)
544e4b17023SJohn Marino #endif
545e4b17023SJohn Marino 
546e4b17023SJohn Marino #define save_in_use_p_i(__i) \
547e4b17023SJohn Marino   (G.save_in_use[__i])
548e4b17023SJohn Marino #define save_in_use_p(__p) \
549e4b17023SJohn Marino   (save_in_use_p_i (__p->index_by_depth))
550e4b17023SJohn Marino 
551e4b17023SJohn Marino /* Returns nonzero if P was allocated in GC'able memory.  */
552e4b17023SJohn Marino 
553e4b17023SJohn Marino static inline int
ggc_allocated_p(const void * p)554e4b17023SJohn Marino ggc_allocated_p (const void *p)
555e4b17023SJohn Marino {
556e4b17023SJohn Marino   page_entry ***base;
557e4b17023SJohn Marino   size_t L1, L2;
558e4b17023SJohn Marino 
559e4b17023SJohn Marino #if HOST_BITS_PER_PTR <= 32
560e4b17023SJohn Marino   base = &G.lookup[0];
561e4b17023SJohn Marino #else
562e4b17023SJohn Marino   page_table table = G.lookup;
563e4b17023SJohn Marino   size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
564e4b17023SJohn Marino   while (1)
565e4b17023SJohn Marino     {
566e4b17023SJohn Marino       if (table == NULL)
567e4b17023SJohn Marino 	return 0;
568e4b17023SJohn Marino       if (table->high_bits == high_bits)
569e4b17023SJohn Marino 	break;
570e4b17023SJohn Marino       table = table->next;
571e4b17023SJohn Marino     }
572e4b17023SJohn Marino   base = &table->table[0];
573e4b17023SJohn Marino #endif
574e4b17023SJohn Marino 
575e4b17023SJohn Marino   /* Extract the level 1 and 2 indices.  */
576e4b17023SJohn Marino   L1 = LOOKUP_L1 (p);
577e4b17023SJohn Marino   L2 = LOOKUP_L2 (p);
578e4b17023SJohn Marino 
579e4b17023SJohn Marino   return base[L1] && base[L1][L2];
580e4b17023SJohn Marino }
581e4b17023SJohn Marino 
582e4b17023SJohn Marino /* Traverse the page table and find the entry for a page.
583e4b17023SJohn Marino    Die (probably) if the object wasn't allocated via GC.  */
584e4b17023SJohn Marino 
585e4b17023SJohn Marino static inline page_entry *
lookup_page_table_entry(const void * p)586e4b17023SJohn Marino lookup_page_table_entry (const void *p)
587e4b17023SJohn Marino {
588e4b17023SJohn Marino   page_entry ***base;
589e4b17023SJohn Marino   size_t L1, L2;
590e4b17023SJohn Marino 
591e4b17023SJohn Marino #if HOST_BITS_PER_PTR <= 32
592e4b17023SJohn Marino   base = &G.lookup[0];
593e4b17023SJohn Marino #else
594e4b17023SJohn Marino   page_table table = G.lookup;
595e4b17023SJohn Marino   size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
596e4b17023SJohn Marino   while (table->high_bits != high_bits)
597e4b17023SJohn Marino     table = table->next;
598e4b17023SJohn Marino   base = &table->table[0];
599e4b17023SJohn Marino #endif
600e4b17023SJohn Marino 
601e4b17023SJohn Marino   /* Extract the level 1 and 2 indices.  */
602e4b17023SJohn Marino   L1 = LOOKUP_L1 (p);
603e4b17023SJohn Marino   L2 = LOOKUP_L2 (p);
604e4b17023SJohn Marino 
605e4b17023SJohn Marino   return base[L1][L2];
606e4b17023SJohn Marino }
607e4b17023SJohn Marino 
608e4b17023SJohn Marino /* Set the page table entry for a page.  */
609e4b17023SJohn Marino 
610e4b17023SJohn Marino static void
set_page_table_entry(void * p,page_entry * entry)611e4b17023SJohn Marino set_page_table_entry (void *p, page_entry *entry)
612e4b17023SJohn Marino {
613e4b17023SJohn Marino   page_entry ***base;
614e4b17023SJohn Marino   size_t L1, L2;
615e4b17023SJohn Marino 
616e4b17023SJohn Marino #if HOST_BITS_PER_PTR <= 32
617e4b17023SJohn Marino   base = &G.lookup[0];
618e4b17023SJohn Marino #else
619e4b17023SJohn Marino   page_table table;
620e4b17023SJohn Marino   size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
621e4b17023SJohn Marino   for (table = G.lookup; table; table = table->next)
622e4b17023SJohn Marino     if (table->high_bits == high_bits)
623e4b17023SJohn Marino       goto found;
624e4b17023SJohn Marino 
625e4b17023SJohn Marino   /* Not found -- allocate a new table.  */
626e4b17023SJohn Marino   table = XCNEW (struct page_table_chain);
627e4b17023SJohn Marino   table->next = G.lookup;
628e4b17023SJohn Marino   table->high_bits = high_bits;
629e4b17023SJohn Marino   G.lookup = table;
630e4b17023SJohn Marino found:
631e4b17023SJohn Marino   base = &table->table[0];
632e4b17023SJohn Marino #endif
633e4b17023SJohn Marino 
634e4b17023SJohn Marino   /* Extract the level 1 and 2 indices.  */
635e4b17023SJohn Marino   L1 = LOOKUP_L1 (p);
636e4b17023SJohn Marino   L2 = LOOKUP_L2 (p);
637e4b17023SJohn Marino 
638e4b17023SJohn Marino   if (base[L1] == NULL)
639e4b17023SJohn Marino     base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
640e4b17023SJohn Marino 
641e4b17023SJohn Marino   base[L1][L2] = entry;
642e4b17023SJohn Marino }
643e4b17023SJohn Marino 
644e4b17023SJohn Marino /* Prints the page-entry for object size ORDER, for debugging.  */
645e4b17023SJohn Marino 
646e4b17023SJohn Marino DEBUG_FUNCTION void
debug_print_page_list(int order)647e4b17023SJohn Marino debug_print_page_list (int order)
648e4b17023SJohn Marino {
649e4b17023SJohn Marino   page_entry *p;
650e4b17023SJohn Marino   printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
651e4b17023SJohn Marino 	  (void *) G.page_tails[order]);
652e4b17023SJohn Marino   p = G.pages[order];
653e4b17023SJohn Marino   while (p != NULL)
654e4b17023SJohn Marino     {
655e4b17023SJohn Marino       printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
656e4b17023SJohn Marino 	      p->num_free_objects);
657e4b17023SJohn Marino       p = p->next;
658e4b17023SJohn Marino     }
659e4b17023SJohn Marino   printf ("NULL\n");
660e4b17023SJohn Marino   fflush (stdout);
661e4b17023SJohn Marino }
662e4b17023SJohn Marino 
663e4b17023SJohn Marino #ifdef USING_MMAP
664e4b17023SJohn Marino /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
665e4b17023SJohn Marino    (if non-null).  The ifdef structure here is intended to cause a
666e4b17023SJohn Marino    compile error unless exactly one of the HAVE_* is defined.  */
667e4b17023SJohn Marino 
668e4b17023SJohn Marino static inline char *
alloc_anon(char * pref ATTRIBUTE_UNUSED,size_t size,bool check)669e4b17023SJohn Marino alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
670e4b17023SJohn Marino {
671e4b17023SJohn Marino #ifdef HAVE_MMAP_ANON
672e4b17023SJohn Marino   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
673e4b17023SJohn Marino 			      MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
674e4b17023SJohn Marino #endif
675e4b17023SJohn Marino #ifdef HAVE_MMAP_DEV_ZERO
676e4b17023SJohn Marino   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
677e4b17023SJohn Marino 			      MAP_PRIVATE, G.dev_zero_fd, 0);
678e4b17023SJohn Marino #endif
679e4b17023SJohn Marino 
680e4b17023SJohn Marino   if (page == (char *) MAP_FAILED)
681e4b17023SJohn Marino     {
682e4b17023SJohn Marino       if (!check)
683e4b17023SJohn Marino         return NULL;
684e4b17023SJohn Marino       perror ("virtual memory exhausted");
685e4b17023SJohn Marino       exit (FATAL_EXIT_CODE);
686e4b17023SJohn Marino     }
687e4b17023SJohn Marino 
688e4b17023SJohn Marino   /* Remember that we allocated this memory.  */
689e4b17023SJohn Marino   G.bytes_mapped += size;
690e4b17023SJohn Marino 
691e4b17023SJohn Marino   /* Pretend we don't have access to the allocated pages.  We'll enable
692e4b17023SJohn Marino      access to smaller pieces of the area in ggc_internal_alloc.  Discard the
693e4b17023SJohn Marino      handle to avoid handle leak.  */
694e4b17023SJohn Marino   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
695e4b17023SJohn Marino 
696e4b17023SJohn Marino   return page;
697e4b17023SJohn Marino }
698e4b17023SJohn Marino #endif
699e4b17023SJohn Marino #ifdef USING_MALLOC_PAGE_GROUPS
700e4b17023SJohn Marino /* Compute the index for this page into the page group.  */
701e4b17023SJohn Marino 
702e4b17023SJohn Marino static inline size_t
page_group_index(char * allocation,char * page)703e4b17023SJohn Marino page_group_index (char *allocation, char *page)
704e4b17023SJohn Marino {
705e4b17023SJohn Marino   return (size_t) (page - allocation) >> G.lg_pagesize;
706e4b17023SJohn Marino }
707e4b17023SJohn Marino 
708e4b17023SJohn Marino /* Set and clear the in_use bit for this page in the page group.  */
709e4b17023SJohn Marino 
710e4b17023SJohn Marino static inline void
set_page_group_in_use(page_group * group,char * page)711e4b17023SJohn Marino set_page_group_in_use (page_group *group, char *page)
712e4b17023SJohn Marino {
713e4b17023SJohn Marino   group->in_use |= 1 << page_group_index (group->allocation, page);
714e4b17023SJohn Marino }
715e4b17023SJohn Marino 
716e4b17023SJohn Marino static inline void
clear_page_group_in_use(page_group * group,char * page)717e4b17023SJohn Marino clear_page_group_in_use (page_group *group, char *page)
718e4b17023SJohn Marino {
719e4b17023SJohn Marino   group->in_use &= ~(1 << page_group_index (group->allocation, page));
720e4b17023SJohn Marino }
721e4b17023SJohn Marino #endif
722e4b17023SJohn Marino 
723e4b17023SJohn Marino /* Allocate a new page for allocating objects of size 2^ORDER,
724e4b17023SJohn Marino    and return an entry for it.  The entry is not added to the
725e4b17023SJohn Marino    appropriate page_table list.  */
726e4b17023SJohn Marino 
727e4b17023SJohn Marino static inline struct page_entry *
alloc_page(unsigned order)728e4b17023SJohn Marino alloc_page (unsigned order)
729e4b17023SJohn Marino {
730e4b17023SJohn Marino   struct page_entry *entry, *p, **pp;
731e4b17023SJohn Marino   char *page;
732e4b17023SJohn Marino   size_t num_objects;
733e4b17023SJohn Marino   size_t bitmap_size;
734e4b17023SJohn Marino   size_t page_entry_size;
735e4b17023SJohn Marino   size_t entry_size;
736e4b17023SJohn Marino #ifdef USING_MALLOC_PAGE_GROUPS
737e4b17023SJohn Marino   page_group *group;
738e4b17023SJohn Marino #endif
739e4b17023SJohn Marino 
740e4b17023SJohn Marino   num_objects = OBJECTS_PER_PAGE (order);
741e4b17023SJohn Marino   bitmap_size = BITMAP_SIZE (num_objects + 1);
742e4b17023SJohn Marino   page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
743e4b17023SJohn Marino   entry_size = num_objects * OBJECT_SIZE (order);
744e4b17023SJohn Marino   if (entry_size < G.pagesize)
745e4b17023SJohn Marino     entry_size = G.pagesize;
746e4b17023SJohn Marino   entry_size = PAGE_ALIGN (entry_size);
747e4b17023SJohn Marino 
748e4b17023SJohn Marino   entry = NULL;
749e4b17023SJohn Marino   page = NULL;
750e4b17023SJohn Marino 
751e4b17023SJohn Marino   /* Check the list of free pages for one we can use.  */
752e4b17023SJohn Marino   for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
753e4b17023SJohn Marino     if (p->bytes == entry_size)
754e4b17023SJohn Marino       break;
755e4b17023SJohn Marino 
756e4b17023SJohn Marino   if (p != NULL)
757e4b17023SJohn Marino     {
758e4b17023SJohn Marino       if (p->discarded)
759e4b17023SJohn Marino         G.bytes_mapped += p->bytes;
760e4b17023SJohn Marino       p->discarded = false;
761e4b17023SJohn Marino 
762e4b17023SJohn Marino       /* Recycle the allocated memory from this page ...  */
763e4b17023SJohn Marino       *pp = p->next;
764e4b17023SJohn Marino       page = p->page;
765e4b17023SJohn Marino 
766e4b17023SJohn Marino #ifdef USING_MALLOC_PAGE_GROUPS
767e4b17023SJohn Marino       group = p->group;
768e4b17023SJohn Marino #endif
769e4b17023SJohn Marino 
770e4b17023SJohn Marino       /* ... and, if possible, the page entry itself.  */
771e4b17023SJohn Marino       if (p->order == order)
772e4b17023SJohn Marino 	{
773e4b17023SJohn Marino 	  entry = p;
774e4b17023SJohn Marino 	  memset (entry, 0, page_entry_size);
775e4b17023SJohn Marino 	}
776e4b17023SJohn Marino       else
777e4b17023SJohn Marino 	free (p);
778e4b17023SJohn Marino     }
779e4b17023SJohn Marino #ifdef USING_MMAP
780e4b17023SJohn Marino   else if (entry_size == G.pagesize)
781e4b17023SJohn Marino     {
782e4b17023SJohn Marino       /* We want just one page.  Allocate a bunch of them and put the
783e4b17023SJohn Marino 	 extras on the freelist.  (Can only do this optimization with
784e4b17023SJohn Marino 	 mmap for backing store.)  */
785e4b17023SJohn Marino       struct page_entry *e, *f = G.free_pages;
786e4b17023SJohn Marino       int i, entries = GGC_QUIRE_SIZE;
787e4b17023SJohn Marino 
788e4b17023SJohn Marino       page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
789e4b17023SJohn Marino       if (page == NULL)
790e4b17023SJohn Marino      	{
791e4b17023SJohn Marino 	  page = alloc_anon(NULL, G.pagesize, true);
792e4b17023SJohn Marino           entries = 1;
793e4b17023SJohn Marino 	}
794e4b17023SJohn Marino 
795e4b17023SJohn Marino       /* This loop counts down so that the chain will be in ascending
796e4b17023SJohn Marino 	 memory order.  */
797e4b17023SJohn Marino       for (i = entries - 1; i >= 1; i--)
798e4b17023SJohn Marino 	{
799e4b17023SJohn Marino 	  e = XCNEWVAR (struct page_entry, page_entry_size);
800e4b17023SJohn Marino 	  e->order = order;
801e4b17023SJohn Marino 	  e->bytes = G.pagesize;
802e4b17023SJohn Marino 	  e->page = page + (i << G.lg_pagesize);
803e4b17023SJohn Marino 	  e->next = f;
804e4b17023SJohn Marino 	  f = e;
805e4b17023SJohn Marino 	}
806e4b17023SJohn Marino 
807e4b17023SJohn Marino       G.free_pages = f;
808e4b17023SJohn Marino     }
809e4b17023SJohn Marino   else
810e4b17023SJohn Marino     page = alloc_anon (NULL, entry_size, true);
811e4b17023SJohn Marino #endif
812e4b17023SJohn Marino #ifdef USING_MALLOC_PAGE_GROUPS
813e4b17023SJohn Marino   else
814e4b17023SJohn Marino     {
815e4b17023SJohn Marino       /* Allocate a large block of memory and serve out the aligned
816e4b17023SJohn Marino 	 pages therein.  This results in much less memory wastage
817e4b17023SJohn Marino 	 than the traditional implementation of valloc.  */
818e4b17023SJohn Marino 
819e4b17023SJohn Marino       char *allocation, *a, *enda;
820e4b17023SJohn Marino       size_t alloc_size, head_slop, tail_slop;
821e4b17023SJohn Marino       int multiple_pages = (entry_size == G.pagesize);
822e4b17023SJohn Marino 
823e4b17023SJohn Marino       if (multiple_pages)
824e4b17023SJohn Marino 	alloc_size = GGC_QUIRE_SIZE * G.pagesize;
825e4b17023SJohn Marino       else
826e4b17023SJohn Marino 	alloc_size = entry_size + G.pagesize - 1;
827e4b17023SJohn Marino       allocation = XNEWVEC (char, alloc_size);
828e4b17023SJohn Marino 
829e4b17023SJohn Marino       page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
830e4b17023SJohn Marino       head_slop = page - allocation;
831e4b17023SJohn Marino       if (multiple_pages)
832e4b17023SJohn Marino 	tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
833e4b17023SJohn Marino       else
834e4b17023SJohn Marino 	tail_slop = alloc_size - entry_size - head_slop;
835e4b17023SJohn Marino       enda = allocation + alloc_size - tail_slop;
836e4b17023SJohn Marino 
837e4b17023SJohn Marino       /* We allocated N pages, which are likely not aligned, leaving
838e4b17023SJohn Marino 	 us with N-1 usable pages.  We plan to place the page_group
839e4b17023SJohn Marino 	 structure somewhere in the slop.  */
840e4b17023SJohn Marino       if (head_slop >= sizeof (page_group))
841e4b17023SJohn Marino 	group = (page_group *)page - 1;
842e4b17023SJohn Marino       else
843e4b17023SJohn Marino 	{
844e4b17023SJohn Marino 	  /* We magically got an aligned allocation.  Too bad, we have
845e4b17023SJohn Marino 	     to waste a page anyway.  */
846e4b17023SJohn Marino 	  if (tail_slop == 0)
847e4b17023SJohn Marino 	    {
848e4b17023SJohn Marino 	      enda -= G.pagesize;
849e4b17023SJohn Marino 	      tail_slop += G.pagesize;
850e4b17023SJohn Marino 	    }
851e4b17023SJohn Marino 	  gcc_assert (tail_slop >= sizeof (page_group));
852e4b17023SJohn Marino 	  group = (page_group *)enda;
853e4b17023SJohn Marino 	  tail_slop -= sizeof (page_group);
854e4b17023SJohn Marino 	}
855e4b17023SJohn Marino 
856e4b17023SJohn Marino       /* Remember that we allocated this memory.  */
857e4b17023SJohn Marino       group->next = G.page_groups;
858e4b17023SJohn Marino       group->allocation = allocation;
859e4b17023SJohn Marino       group->alloc_size = alloc_size;
860e4b17023SJohn Marino       group->in_use = 0;
861e4b17023SJohn Marino       G.page_groups = group;
862e4b17023SJohn Marino       G.bytes_mapped += alloc_size;
863e4b17023SJohn Marino 
864e4b17023SJohn Marino       /* If we allocated multiple pages, put the rest on the free list.  */
865e4b17023SJohn Marino       if (multiple_pages)
866e4b17023SJohn Marino 	{
867e4b17023SJohn Marino 	  struct page_entry *e, *f = G.free_pages;
868e4b17023SJohn Marino 	  for (a = enda - G.pagesize; a != page; a -= G.pagesize)
869e4b17023SJohn Marino 	    {
870e4b17023SJohn Marino 	      e = XCNEWVAR (struct page_entry, page_entry_size);
871e4b17023SJohn Marino 	      e->order = order;
872e4b17023SJohn Marino 	      e->bytes = G.pagesize;
873e4b17023SJohn Marino 	      e->page = a;
874e4b17023SJohn Marino 	      e->group = group;
875e4b17023SJohn Marino 	      e->next = f;
876e4b17023SJohn Marino 	      f = e;
877e4b17023SJohn Marino 	    }
878e4b17023SJohn Marino 	  G.free_pages = f;
879e4b17023SJohn Marino 	}
880e4b17023SJohn Marino     }
881e4b17023SJohn Marino #endif
882e4b17023SJohn Marino 
883e4b17023SJohn Marino   if (entry == NULL)
884e4b17023SJohn Marino     entry = XCNEWVAR (struct page_entry, page_entry_size);
885e4b17023SJohn Marino 
886e4b17023SJohn Marino   entry->bytes = entry_size;
887e4b17023SJohn Marino   entry->page = page;
888e4b17023SJohn Marino   entry->context_depth = G.context_depth;
889e4b17023SJohn Marino   entry->order = order;
890e4b17023SJohn Marino   entry->num_free_objects = num_objects;
891e4b17023SJohn Marino   entry->next_bit_hint = 1;
892e4b17023SJohn Marino 
893e4b17023SJohn Marino   G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
894e4b17023SJohn Marino 
895e4b17023SJohn Marino #ifdef USING_MALLOC_PAGE_GROUPS
896e4b17023SJohn Marino   entry->group = group;
897e4b17023SJohn Marino   set_page_group_in_use (group, page);
898e4b17023SJohn Marino #endif
899e4b17023SJohn Marino 
900e4b17023SJohn Marino   /* Set the one-past-the-end in-use bit.  This acts as a sentry as we
901e4b17023SJohn Marino      increment the hint.  */
902e4b17023SJohn Marino   entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
903e4b17023SJohn Marino     = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
904e4b17023SJohn Marino 
905e4b17023SJohn Marino   set_page_table_entry (page, entry);
906e4b17023SJohn Marino 
907e4b17023SJohn Marino   if (GGC_DEBUG_LEVEL >= 2)
908e4b17023SJohn Marino     fprintf (G.debug_file,
909e4b17023SJohn Marino 	     "Allocating page at %p, object size=%lu, data %p-%p\n",
910e4b17023SJohn Marino 	     (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
911e4b17023SJohn Marino 	     page + entry_size - 1);
912e4b17023SJohn Marino 
913e4b17023SJohn Marino   return entry;
914e4b17023SJohn Marino }
915e4b17023SJohn Marino 
916e4b17023SJohn Marino /* Adjust the size of G.depth so that no index greater than the one
917e4b17023SJohn Marino    used by the top of the G.by_depth is used.  */
918e4b17023SJohn Marino 
919e4b17023SJohn Marino static inline void
adjust_depth(void)920e4b17023SJohn Marino adjust_depth (void)
921e4b17023SJohn Marino {
922e4b17023SJohn Marino   page_entry *top;
923e4b17023SJohn Marino 
924e4b17023SJohn Marino   if (G.by_depth_in_use)
925e4b17023SJohn Marino     {
926e4b17023SJohn Marino       top = G.by_depth[G.by_depth_in_use-1];
927e4b17023SJohn Marino 
928e4b17023SJohn Marino       /* Peel back indices in depth that index into by_depth, so that
929e4b17023SJohn Marino 	 as new elements are added to by_depth, we note the indices
930e4b17023SJohn Marino 	 of those elements, if they are for new context depths.  */
931e4b17023SJohn Marino       while (G.depth_in_use > (size_t)top->context_depth+1)
932e4b17023SJohn Marino 	--G.depth_in_use;
933e4b17023SJohn Marino     }
934e4b17023SJohn Marino }
935e4b17023SJohn Marino 
936e4b17023SJohn Marino /* For a page that is no longer needed, put it on the free page list.  */
937e4b17023SJohn Marino 
938e4b17023SJohn Marino static void
free_page(page_entry * entry)939e4b17023SJohn Marino free_page (page_entry *entry)
940e4b17023SJohn Marino {
941e4b17023SJohn Marino   if (GGC_DEBUG_LEVEL >= 2)
942e4b17023SJohn Marino     fprintf (G.debug_file,
943e4b17023SJohn Marino 	     "Deallocating page at %p, data %p-%p\n", (void *) entry,
944e4b17023SJohn Marino 	     entry->page, entry->page + entry->bytes - 1);
945e4b17023SJohn Marino 
946e4b17023SJohn Marino   /* Mark the page as inaccessible.  Discard the handle to avoid handle
947e4b17023SJohn Marino      leak.  */
948e4b17023SJohn Marino   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
949e4b17023SJohn Marino 
950e4b17023SJohn Marino   set_page_table_entry (entry->page, NULL);
951e4b17023SJohn Marino 
952e4b17023SJohn Marino #ifdef USING_MALLOC_PAGE_GROUPS
953e4b17023SJohn Marino   clear_page_group_in_use (entry->group, entry->page);
954e4b17023SJohn Marino #endif
955e4b17023SJohn Marino 
956e4b17023SJohn Marino   if (G.by_depth_in_use > 1)
957e4b17023SJohn Marino     {
958e4b17023SJohn Marino       page_entry *top = G.by_depth[G.by_depth_in_use-1];
959e4b17023SJohn Marino       int i = entry->index_by_depth;
960e4b17023SJohn Marino 
961e4b17023SJohn Marino       /* We cannot free a page from a context deeper than the current
962e4b17023SJohn Marino 	 one.  */
963e4b17023SJohn Marino       gcc_assert (entry->context_depth == top->context_depth);
964e4b17023SJohn Marino 
965e4b17023SJohn Marino       /* Put top element into freed slot.  */
966e4b17023SJohn Marino       G.by_depth[i] = top;
967e4b17023SJohn Marino       G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
968e4b17023SJohn Marino       top->index_by_depth = i;
969e4b17023SJohn Marino     }
970e4b17023SJohn Marino   --G.by_depth_in_use;
971e4b17023SJohn Marino 
972e4b17023SJohn Marino   adjust_depth ();
973e4b17023SJohn Marino 
974e4b17023SJohn Marino   entry->next = G.free_pages;
975e4b17023SJohn Marino   G.free_pages = entry;
976e4b17023SJohn Marino }
977e4b17023SJohn Marino 
978e4b17023SJohn Marino /* Release the free page cache to the system.  */
979e4b17023SJohn Marino 
980e4b17023SJohn Marino static void
release_pages(void)981e4b17023SJohn Marino release_pages (void)
982e4b17023SJohn Marino {
983e4b17023SJohn Marino #ifdef USING_MADVISE
984e4b17023SJohn Marino   page_entry *p, *start_p;
985e4b17023SJohn Marino   char *start;
986e4b17023SJohn Marino   size_t len;
987e4b17023SJohn Marino   size_t mapped_len;
988e4b17023SJohn Marino   page_entry *next, *prev, *newprev;
989e4b17023SJohn Marino   size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
990e4b17023SJohn Marino 
991e4b17023SJohn Marino   /* First free larger continuous areas to the OS.
992e4b17023SJohn Marino      This allows other allocators to grab these areas if needed.
993e4b17023SJohn Marino      This is only done on larger chunks to avoid fragmentation.
994e4b17023SJohn Marino      This does not always work because the free_pages list is only
995e4b17023SJohn Marino      approximately sorted. */
996e4b17023SJohn Marino 
997e4b17023SJohn Marino   p = G.free_pages;
998e4b17023SJohn Marino   prev = NULL;
999e4b17023SJohn Marino   while (p)
1000e4b17023SJohn Marino     {
1001e4b17023SJohn Marino       start = p->page;
1002e4b17023SJohn Marino       start_p = p;
1003e4b17023SJohn Marino       len = 0;
1004e4b17023SJohn Marino       mapped_len = 0;
1005e4b17023SJohn Marino       newprev = prev;
1006e4b17023SJohn Marino       while (p && p->page == start + len)
1007e4b17023SJohn Marino         {
1008e4b17023SJohn Marino           len += p->bytes;
1009e4b17023SJohn Marino 	  if (!p->discarded)
1010e4b17023SJohn Marino 	      mapped_len += p->bytes;
1011e4b17023SJohn Marino 	  newprev = p;
1012e4b17023SJohn Marino           p = p->next;
1013e4b17023SJohn Marino         }
1014e4b17023SJohn Marino       if (len >= free_unit)
1015e4b17023SJohn Marino         {
1016e4b17023SJohn Marino           while (start_p != p)
1017e4b17023SJohn Marino             {
1018e4b17023SJohn Marino               next = start_p->next;
1019e4b17023SJohn Marino               free (start_p);
1020e4b17023SJohn Marino               start_p = next;
1021e4b17023SJohn Marino             }
1022e4b17023SJohn Marino           munmap (start, len);
1023e4b17023SJohn Marino 	  if (prev)
1024e4b17023SJohn Marino 	    prev->next = p;
1025e4b17023SJohn Marino           else
1026e4b17023SJohn Marino             G.free_pages = p;
1027e4b17023SJohn Marino           G.bytes_mapped -= mapped_len;
1028e4b17023SJohn Marino 	  continue;
1029e4b17023SJohn Marino         }
1030e4b17023SJohn Marino       prev = newprev;
1031e4b17023SJohn Marino    }
1032e4b17023SJohn Marino 
1033e4b17023SJohn Marino   /* Now give back the fragmented pages to the OS, but keep the address
1034e4b17023SJohn Marino      space to reuse it next time. */
1035e4b17023SJohn Marino 
1036e4b17023SJohn Marino   for (p = G.free_pages; p; )
1037e4b17023SJohn Marino     {
1038e4b17023SJohn Marino       if (p->discarded)
1039e4b17023SJohn Marino         {
1040e4b17023SJohn Marino           p = p->next;
1041e4b17023SJohn Marino           continue;
1042e4b17023SJohn Marino         }
1043e4b17023SJohn Marino       start = p->page;
1044e4b17023SJohn Marino       len = p->bytes;
1045e4b17023SJohn Marino       start_p = p;
1046e4b17023SJohn Marino       p = p->next;
1047e4b17023SJohn Marino       while (p && p->page == start + len)
1048e4b17023SJohn Marino         {
1049e4b17023SJohn Marino           len += p->bytes;
1050e4b17023SJohn Marino           p = p->next;
1051e4b17023SJohn Marino         }
1052e4b17023SJohn Marino       /* Give the page back to the kernel, but don't free the mapping.
1053e4b17023SJohn Marino          This avoids fragmentation in the virtual memory map of the
1054e4b17023SJohn Marino  	 process. Next time we can reuse it by just touching it. */
1055e4b17023SJohn Marino       madvise (start, len, MADV_DONTNEED);
1056e4b17023SJohn Marino       /* Don't count those pages as mapped to not touch the garbage collector
1057e4b17023SJohn Marino          unnecessarily. */
1058e4b17023SJohn Marino       G.bytes_mapped -= len;
1059e4b17023SJohn Marino       while (start_p != p)
1060e4b17023SJohn Marino         {
1061e4b17023SJohn Marino           start_p->discarded = true;
1062e4b17023SJohn Marino           start_p = start_p->next;
1063e4b17023SJohn Marino         }
1064e4b17023SJohn Marino     }
1065e4b17023SJohn Marino #endif
1066e4b17023SJohn Marino #if defined(USING_MMAP) && !defined(USING_MADVISE)
1067e4b17023SJohn Marino   page_entry *p, *next;
1068e4b17023SJohn Marino   char *start;
1069e4b17023SJohn Marino   size_t len;
1070e4b17023SJohn Marino 
1071e4b17023SJohn Marino   /* Gather up adjacent pages so they are unmapped together.  */
1072e4b17023SJohn Marino   p = G.free_pages;
1073e4b17023SJohn Marino 
1074e4b17023SJohn Marino   while (p)
1075e4b17023SJohn Marino     {
1076e4b17023SJohn Marino       start = p->page;
1077e4b17023SJohn Marino       next = p->next;
1078e4b17023SJohn Marino       len = p->bytes;
1079e4b17023SJohn Marino       free (p);
1080e4b17023SJohn Marino       p = next;
1081e4b17023SJohn Marino 
1082e4b17023SJohn Marino       while (p && p->page == start + len)
1083e4b17023SJohn Marino 	{
1084e4b17023SJohn Marino 	  next = p->next;
1085e4b17023SJohn Marino 	  len += p->bytes;
1086e4b17023SJohn Marino 	  free (p);
1087e4b17023SJohn Marino 	  p = next;
1088e4b17023SJohn Marino 	}
1089e4b17023SJohn Marino 
1090e4b17023SJohn Marino       munmap (start, len);
1091e4b17023SJohn Marino       G.bytes_mapped -= len;
1092e4b17023SJohn Marino     }
1093e4b17023SJohn Marino 
1094e4b17023SJohn Marino   G.free_pages = NULL;
1095e4b17023SJohn Marino #endif
1096e4b17023SJohn Marino #ifdef USING_MALLOC_PAGE_GROUPS
1097e4b17023SJohn Marino   page_entry **pp, *p;
1098e4b17023SJohn Marino   page_group **gp, *g;
1099e4b17023SJohn Marino 
1100e4b17023SJohn Marino   /* Remove all pages from free page groups from the list.  */
1101e4b17023SJohn Marino   pp = &G.free_pages;
1102e4b17023SJohn Marino   while ((p = *pp) != NULL)
1103e4b17023SJohn Marino     if (p->group->in_use == 0)
1104e4b17023SJohn Marino       {
1105e4b17023SJohn Marino 	*pp = p->next;
1106e4b17023SJohn Marino 	free (p);
1107e4b17023SJohn Marino       }
1108e4b17023SJohn Marino     else
1109e4b17023SJohn Marino       pp = &p->next;
1110e4b17023SJohn Marino 
1111e4b17023SJohn Marino   /* Remove all free page groups, and release the storage.  */
1112e4b17023SJohn Marino   gp = &G.page_groups;
1113e4b17023SJohn Marino   while ((g = *gp) != NULL)
1114e4b17023SJohn Marino     if (g->in_use == 0)
1115e4b17023SJohn Marino       {
1116e4b17023SJohn Marino 	*gp = g->next;
1117e4b17023SJohn Marino 	G.bytes_mapped -= g->alloc_size;
1118e4b17023SJohn Marino 	free (g->allocation);
1119e4b17023SJohn Marino       }
1120e4b17023SJohn Marino     else
1121e4b17023SJohn Marino       gp = &g->next;
1122e4b17023SJohn Marino #endif
1123e4b17023SJohn Marino }
1124e4b17023SJohn Marino 
1125e4b17023SJohn Marino /* This table provides a fast way to determine ceil(log_2(size)) for
1126e4b17023SJohn Marino    allocation requests.  The minimum allocation size is eight bytes.  */
1127e4b17023SJohn Marino #define NUM_SIZE_LOOKUP 512
1128e4b17023SJohn Marino static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1129e4b17023SJohn Marino {
1130e4b17023SJohn Marino   3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1131e4b17023SJohn Marino   4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1132e4b17023SJohn Marino   5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1133e4b17023SJohn Marino   6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1134e4b17023SJohn Marino   6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1135e4b17023SJohn Marino   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1136e4b17023SJohn Marino   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1137e4b17023SJohn Marino   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1138e4b17023SJohn Marino   7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1139e4b17023SJohn Marino   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1140e4b17023SJohn Marino   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1141e4b17023SJohn Marino   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1142e4b17023SJohn Marino   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1143e4b17023SJohn Marino   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1144e4b17023SJohn Marino   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1145e4b17023SJohn Marino   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1146e4b17023SJohn Marino   8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1147e4b17023SJohn Marino   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1148e4b17023SJohn Marino   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1149e4b17023SJohn Marino   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1150e4b17023SJohn Marino   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1151e4b17023SJohn Marino   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1152e4b17023SJohn Marino   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1153e4b17023SJohn Marino   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1154e4b17023SJohn Marino   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1155e4b17023SJohn Marino   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1156e4b17023SJohn Marino   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1157e4b17023SJohn Marino   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1158e4b17023SJohn Marino   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1159e4b17023SJohn Marino   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1160e4b17023SJohn Marino   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1161e4b17023SJohn Marino   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1162e4b17023SJohn Marino };
1163e4b17023SJohn Marino 
1164e4b17023SJohn Marino /* For a given size of memory requested for allocation, return the
1165e4b17023SJohn Marino    actual size that is going to be allocated, as well as the size
1166e4b17023SJohn Marino    order.  */
1167e4b17023SJohn Marino 
1168e4b17023SJohn Marino static void
ggc_round_alloc_size_1(size_t requested_size,size_t * size_order,size_t * alloced_size)1169e4b17023SJohn Marino ggc_round_alloc_size_1 (size_t requested_size,
1170e4b17023SJohn Marino 			size_t *size_order,
1171e4b17023SJohn Marino 			size_t *alloced_size)
1172e4b17023SJohn Marino {
1173e4b17023SJohn Marino   size_t order, object_size;
1174e4b17023SJohn Marino 
1175e4b17023SJohn Marino   if (requested_size < NUM_SIZE_LOOKUP)
1176e4b17023SJohn Marino     {
1177e4b17023SJohn Marino       order = size_lookup[requested_size];
1178e4b17023SJohn Marino       object_size = OBJECT_SIZE (order);
1179e4b17023SJohn Marino     }
1180e4b17023SJohn Marino   else
1181e4b17023SJohn Marino     {
1182e4b17023SJohn Marino       order = 10;
1183e4b17023SJohn Marino       while (requested_size > (object_size = OBJECT_SIZE (order)))
1184e4b17023SJohn Marino         order++;
1185e4b17023SJohn Marino     }
1186e4b17023SJohn Marino 
1187e4b17023SJohn Marino   if (size_order)
1188e4b17023SJohn Marino     *size_order = order;
1189e4b17023SJohn Marino   if (alloced_size)
1190e4b17023SJohn Marino     *alloced_size = object_size;
1191e4b17023SJohn Marino }
1192e4b17023SJohn Marino 
1193e4b17023SJohn Marino /* For a given size of memory requested for allocation, return the
1194e4b17023SJohn Marino    actual size that is going to be allocated.  */
1195e4b17023SJohn Marino 
1196e4b17023SJohn Marino size_t
ggc_round_alloc_size(size_t requested_size)1197e4b17023SJohn Marino ggc_round_alloc_size (size_t requested_size)
1198e4b17023SJohn Marino {
1199e4b17023SJohn Marino   size_t size = 0;
1200e4b17023SJohn Marino 
1201e4b17023SJohn Marino   ggc_round_alloc_size_1 (requested_size, NULL, &size);
1202e4b17023SJohn Marino   return size;
1203e4b17023SJohn Marino }
1204e4b17023SJohn Marino 
1205e4b17023SJohn Marino /* Typed allocation function.  Does nothing special in this collector.  */
1206e4b17023SJohn Marino 
1207e4b17023SJohn Marino void *
ggc_alloc_typed_stat(enum gt_types_enum type ATTRIBUTE_UNUSED,size_t size MEM_STAT_DECL)1208e4b17023SJohn Marino ggc_alloc_typed_stat (enum gt_types_enum type ATTRIBUTE_UNUSED, size_t size
1209e4b17023SJohn Marino 		      MEM_STAT_DECL)
1210e4b17023SJohn Marino {
1211e4b17023SJohn Marino   return ggc_internal_alloc_stat (size PASS_MEM_STAT);
1212e4b17023SJohn Marino }
1213e4b17023SJohn Marino 
1214e4b17023SJohn Marino /* Allocate a chunk of memory of SIZE bytes.  Its contents are undefined.  */
1215e4b17023SJohn Marino 
1216e4b17023SJohn Marino void *
ggc_internal_alloc_stat(size_t size MEM_STAT_DECL)1217e4b17023SJohn Marino ggc_internal_alloc_stat (size_t size MEM_STAT_DECL)
1218e4b17023SJohn Marino {
1219e4b17023SJohn Marino   size_t order, word, bit, object_offset, object_size;
1220e4b17023SJohn Marino   struct page_entry *entry;
1221e4b17023SJohn Marino   void *result;
1222e4b17023SJohn Marino 
1223e4b17023SJohn Marino   ggc_round_alloc_size_1 (size, &order, &object_size);
1224e4b17023SJohn Marino 
1225e4b17023SJohn Marino   /* If there are non-full pages for this size allocation, they are at
1226e4b17023SJohn Marino      the head of the list.  */
1227e4b17023SJohn Marino   entry = G.pages[order];
1228e4b17023SJohn Marino 
1229e4b17023SJohn Marino   /* If there is no page for this object size, or all pages in this
1230e4b17023SJohn Marino      context are full, allocate a new page.  */
1231e4b17023SJohn Marino   if (entry == NULL || entry->num_free_objects == 0)
1232e4b17023SJohn Marino     {
1233e4b17023SJohn Marino       struct page_entry *new_entry;
1234e4b17023SJohn Marino       new_entry = alloc_page (order);
1235e4b17023SJohn Marino 
1236e4b17023SJohn Marino       new_entry->index_by_depth = G.by_depth_in_use;
1237e4b17023SJohn Marino       push_by_depth (new_entry, 0);
1238e4b17023SJohn Marino 
1239e4b17023SJohn Marino       /* We can skip context depths, if we do, make sure we go all the
1240e4b17023SJohn Marino 	 way to the new depth.  */
1241e4b17023SJohn Marino       while (new_entry->context_depth >= G.depth_in_use)
1242e4b17023SJohn Marino 	push_depth (G.by_depth_in_use-1);
1243e4b17023SJohn Marino 
1244e4b17023SJohn Marino       /* If this is the only entry, it's also the tail.  If it is not
1245e4b17023SJohn Marino 	 the only entry, then we must update the PREV pointer of the
1246e4b17023SJohn Marino 	 ENTRY (G.pages[order]) to point to our new page entry.  */
1247e4b17023SJohn Marino       if (entry == NULL)
1248e4b17023SJohn Marino 	G.page_tails[order] = new_entry;
1249e4b17023SJohn Marino       else
1250e4b17023SJohn Marino 	entry->prev = new_entry;
1251e4b17023SJohn Marino 
1252e4b17023SJohn Marino       /* Put new pages at the head of the page list.  By definition the
1253e4b17023SJohn Marino 	 entry at the head of the list always has a NULL pointer.  */
1254e4b17023SJohn Marino       new_entry->next = entry;
1255e4b17023SJohn Marino       new_entry->prev = NULL;
1256e4b17023SJohn Marino       entry = new_entry;
1257e4b17023SJohn Marino       G.pages[order] = new_entry;
1258e4b17023SJohn Marino 
1259e4b17023SJohn Marino       /* For a new page, we know the word and bit positions (in the
1260e4b17023SJohn Marino 	 in_use bitmap) of the first available object -- they're zero.  */
1261e4b17023SJohn Marino       new_entry->next_bit_hint = 1;
1262e4b17023SJohn Marino       word = 0;
1263e4b17023SJohn Marino       bit = 0;
1264e4b17023SJohn Marino       object_offset = 0;
1265e4b17023SJohn Marino     }
1266e4b17023SJohn Marino   else
1267e4b17023SJohn Marino     {
1268e4b17023SJohn Marino       /* First try to use the hint left from the previous allocation
1269e4b17023SJohn Marino 	 to locate a clear bit in the in-use bitmap.  We've made sure
1270e4b17023SJohn Marino 	 that the one-past-the-end bit is always set, so if the hint
1271e4b17023SJohn Marino 	 has run over, this test will fail.  */
1272e4b17023SJohn Marino       unsigned hint = entry->next_bit_hint;
1273e4b17023SJohn Marino       word = hint / HOST_BITS_PER_LONG;
1274e4b17023SJohn Marino       bit = hint % HOST_BITS_PER_LONG;
1275e4b17023SJohn Marino 
1276e4b17023SJohn Marino       /* If the hint didn't work, scan the bitmap from the beginning.  */
1277e4b17023SJohn Marino       if ((entry->in_use_p[word] >> bit) & 1)
1278e4b17023SJohn Marino 	{
1279e4b17023SJohn Marino 	  word = bit = 0;
1280e4b17023SJohn Marino 	  while (~entry->in_use_p[word] == 0)
1281e4b17023SJohn Marino 	    ++word;
1282e4b17023SJohn Marino 
1283e4b17023SJohn Marino #if GCC_VERSION >= 3004
1284e4b17023SJohn Marino 	  bit = __builtin_ctzl (~entry->in_use_p[word]);
1285e4b17023SJohn Marino #else
1286e4b17023SJohn Marino 	  while ((entry->in_use_p[word] >> bit) & 1)
1287e4b17023SJohn Marino 	    ++bit;
1288e4b17023SJohn Marino #endif
1289e4b17023SJohn Marino 
1290e4b17023SJohn Marino 	  hint = word * HOST_BITS_PER_LONG + bit;
1291e4b17023SJohn Marino 	}
1292e4b17023SJohn Marino 
1293e4b17023SJohn Marino       /* Next time, try the next bit.  */
1294e4b17023SJohn Marino       entry->next_bit_hint = hint + 1;
1295e4b17023SJohn Marino 
1296e4b17023SJohn Marino       object_offset = hint * object_size;
1297e4b17023SJohn Marino     }
1298e4b17023SJohn Marino 
1299e4b17023SJohn Marino   /* Set the in-use bit.  */
1300e4b17023SJohn Marino   entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1301e4b17023SJohn Marino 
1302e4b17023SJohn Marino   /* Keep a running total of the number of free objects.  If this page
1303e4b17023SJohn Marino      fills up, we may have to move it to the end of the list if the
1304e4b17023SJohn Marino      next page isn't full.  If the next page is full, all subsequent
1305e4b17023SJohn Marino      pages are full, so there's no need to move it.  */
1306e4b17023SJohn Marino   if (--entry->num_free_objects == 0
1307e4b17023SJohn Marino       && entry->next != NULL
1308e4b17023SJohn Marino       && entry->next->num_free_objects > 0)
1309e4b17023SJohn Marino     {
1310e4b17023SJohn Marino       /* We have a new head for the list.  */
1311e4b17023SJohn Marino       G.pages[order] = entry->next;
1312e4b17023SJohn Marino 
1313e4b17023SJohn Marino       /* We are moving ENTRY to the end of the page table list.
1314e4b17023SJohn Marino 	 The new page at the head of the list will have NULL in
1315e4b17023SJohn Marino 	 its PREV field and ENTRY will have NULL in its NEXT field.  */
1316e4b17023SJohn Marino       entry->next->prev = NULL;
1317e4b17023SJohn Marino       entry->next = NULL;
1318e4b17023SJohn Marino 
1319e4b17023SJohn Marino       /* Append ENTRY to the tail of the list.  */
1320e4b17023SJohn Marino       entry->prev = G.page_tails[order];
1321e4b17023SJohn Marino       G.page_tails[order]->next = entry;
1322e4b17023SJohn Marino       G.page_tails[order] = entry;
1323e4b17023SJohn Marino     }
1324e4b17023SJohn Marino 
1325e4b17023SJohn Marino   /* Calculate the object's address.  */
1326e4b17023SJohn Marino   result = entry->page + object_offset;
1327e4b17023SJohn Marino #ifdef GATHER_STATISTICS
1328e4b17023SJohn Marino   ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1329e4b17023SJohn Marino 		       result PASS_MEM_STAT);
1330e4b17023SJohn Marino #endif
1331e4b17023SJohn Marino 
1332e4b17023SJohn Marino #ifdef ENABLE_GC_CHECKING
1333e4b17023SJohn Marino   /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1334e4b17023SJohn Marino      exact same semantics in presence of memory bugs, regardless of
1335e4b17023SJohn Marino      ENABLE_VALGRIND_CHECKING.  We override this request below.  Drop the
1336e4b17023SJohn Marino      handle to avoid handle leak.  */
1337e4b17023SJohn Marino   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1338e4b17023SJohn Marino 
1339e4b17023SJohn Marino   /* `Poison' the entire allocated object, including any padding at
1340e4b17023SJohn Marino      the end.  */
1341e4b17023SJohn Marino   memset (result, 0xaf, object_size);
1342e4b17023SJohn Marino 
1343e4b17023SJohn Marino   /* Make the bytes after the end of the object unaccessible.  Discard the
1344e4b17023SJohn Marino      handle to avoid handle leak.  */
1345e4b17023SJohn Marino   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1346e4b17023SJohn Marino 						object_size - size));
1347e4b17023SJohn Marino #endif
1348e4b17023SJohn Marino 
1349e4b17023SJohn Marino   /* Tell Valgrind that the memory is there, but its content isn't
1350e4b17023SJohn Marino      defined.  The bytes at the end of the object are still marked
1351e4b17023SJohn Marino      unaccessible.  */
1352e4b17023SJohn Marino   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1353e4b17023SJohn Marino 
1354e4b17023SJohn Marino   /* Keep track of how many bytes are being allocated.  This
1355e4b17023SJohn Marino      information is used in deciding when to collect.  */
1356e4b17023SJohn Marino   G.allocated += object_size;
1357e4b17023SJohn Marino 
1358e4b17023SJohn Marino   /* For timevar statistics.  */
1359e4b17023SJohn Marino   timevar_ggc_mem_total += object_size;
1360e4b17023SJohn Marino 
1361e4b17023SJohn Marino #ifdef GATHER_STATISTICS
1362e4b17023SJohn Marino   {
1363e4b17023SJohn Marino     size_t overhead = object_size - size;
1364e4b17023SJohn Marino 
1365e4b17023SJohn Marino     G.stats.total_overhead += overhead;
1366e4b17023SJohn Marino     G.stats.total_allocated += object_size;
1367e4b17023SJohn Marino     G.stats.total_overhead_per_order[order] += overhead;
1368e4b17023SJohn Marino     G.stats.total_allocated_per_order[order] += object_size;
1369e4b17023SJohn Marino 
1370e4b17023SJohn Marino     if (size <= 32)
1371e4b17023SJohn Marino       {
1372e4b17023SJohn Marino 	G.stats.total_overhead_under32 += overhead;
1373e4b17023SJohn Marino 	G.stats.total_allocated_under32 += object_size;
1374e4b17023SJohn Marino       }
1375e4b17023SJohn Marino     if (size <= 64)
1376e4b17023SJohn Marino       {
1377e4b17023SJohn Marino 	G.stats.total_overhead_under64 += overhead;
1378e4b17023SJohn Marino 	G.stats.total_allocated_under64 += object_size;
1379e4b17023SJohn Marino       }
1380e4b17023SJohn Marino     if (size <= 128)
1381e4b17023SJohn Marino       {
1382e4b17023SJohn Marino 	G.stats.total_overhead_under128 += overhead;
1383e4b17023SJohn Marino 	G.stats.total_allocated_under128 += object_size;
1384e4b17023SJohn Marino       }
1385e4b17023SJohn Marino   }
1386e4b17023SJohn Marino #endif
1387e4b17023SJohn Marino 
1388e4b17023SJohn Marino   if (GGC_DEBUG_LEVEL >= 3)
1389e4b17023SJohn Marino     fprintf (G.debug_file,
1390e4b17023SJohn Marino 	     "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1391e4b17023SJohn Marino 	     (unsigned long) size, (unsigned long) object_size, result,
1392e4b17023SJohn Marino 	     (void *) entry);
1393e4b17023SJohn Marino 
1394e4b17023SJohn Marino   return result;
1395e4b17023SJohn Marino }
1396e4b17023SJohn Marino 
1397e4b17023SJohn Marino /* Mark function for strings.  */
1398e4b17023SJohn Marino 
1399e4b17023SJohn Marino void
gt_ggc_m_S(const void * p)1400e4b17023SJohn Marino gt_ggc_m_S (const void *p)
1401e4b17023SJohn Marino {
1402e4b17023SJohn Marino   page_entry *entry;
1403e4b17023SJohn Marino   unsigned bit, word;
1404e4b17023SJohn Marino   unsigned long mask;
1405e4b17023SJohn Marino   unsigned long offset;
1406e4b17023SJohn Marino 
1407e4b17023SJohn Marino   if (!p || !ggc_allocated_p (p))
1408e4b17023SJohn Marino     return;
1409e4b17023SJohn Marino 
1410e4b17023SJohn Marino   /* Look up the page on which the object is alloced.  .  */
1411e4b17023SJohn Marino   entry = lookup_page_table_entry (p);
1412e4b17023SJohn Marino   gcc_assert (entry);
1413e4b17023SJohn Marino 
1414e4b17023SJohn Marino   /* Calculate the index of the object on the page; this is its bit
1415e4b17023SJohn Marino      position in the in_use_p bitmap.  Note that because a char* might
1416e4b17023SJohn Marino      point to the middle of an object, we need special code here to
1417e4b17023SJohn Marino      make sure P points to the start of an object.  */
1418e4b17023SJohn Marino   offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1419e4b17023SJohn Marino   if (offset)
1420e4b17023SJohn Marino     {
1421e4b17023SJohn Marino       /* Here we've seen a char* which does not point to the beginning
1422e4b17023SJohn Marino 	 of an allocated object.  We assume it points to the middle of
1423e4b17023SJohn Marino 	 a STRING_CST.  */
1424e4b17023SJohn Marino       gcc_assert (offset == offsetof (struct tree_string, str));
1425e4b17023SJohn Marino       p = ((const char *) p) - offset;
1426e4b17023SJohn Marino       gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1427e4b17023SJohn Marino       return;
1428e4b17023SJohn Marino     }
1429e4b17023SJohn Marino 
1430e4b17023SJohn Marino   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1431e4b17023SJohn Marino   word = bit / HOST_BITS_PER_LONG;
1432e4b17023SJohn Marino   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1433e4b17023SJohn Marino 
1434e4b17023SJohn Marino   /* If the bit was previously set, skip it.  */
1435e4b17023SJohn Marino   if (entry->in_use_p[word] & mask)
1436e4b17023SJohn Marino     return;
1437e4b17023SJohn Marino 
1438e4b17023SJohn Marino   /* Otherwise set it, and decrement the free object count.  */
1439e4b17023SJohn Marino   entry->in_use_p[word] |= mask;
1440e4b17023SJohn Marino   entry->num_free_objects -= 1;
1441e4b17023SJohn Marino 
1442e4b17023SJohn Marino   if (GGC_DEBUG_LEVEL >= 4)
1443e4b17023SJohn Marino     fprintf (G.debug_file, "Marking %p\n", p);
1444e4b17023SJohn Marino 
1445e4b17023SJohn Marino   return;
1446e4b17023SJohn Marino }
1447e4b17023SJohn Marino 
1448e4b17023SJohn Marino /* If P is not marked, marks it and return false.  Otherwise return true.
1449e4b17023SJohn Marino    P must have been allocated by the GC allocator; it mustn't point to
1450e4b17023SJohn Marino    static objects, stack variables, or memory allocated with malloc.  */
1451e4b17023SJohn Marino 
1452e4b17023SJohn Marino int
ggc_set_mark(const void * p)1453e4b17023SJohn Marino ggc_set_mark (const void *p)
1454e4b17023SJohn Marino {
1455e4b17023SJohn Marino   page_entry *entry;
1456e4b17023SJohn Marino   unsigned bit, word;
1457e4b17023SJohn Marino   unsigned long mask;
1458e4b17023SJohn Marino 
1459e4b17023SJohn Marino   /* Look up the page on which the object is alloced.  If the object
1460e4b17023SJohn Marino      wasn't allocated by the collector, we'll probably die.  */
1461e4b17023SJohn Marino   entry = lookup_page_table_entry (p);
1462e4b17023SJohn Marino   gcc_assert (entry);
1463e4b17023SJohn Marino 
1464e4b17023SJohn Marino   /* Calculate the index of the object on the page; this is its bit
1465e4b17023SJohn Marino      position in the in_use_p bitmap.  */
1466e4b17023SJohn Marino   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1467e4b17023SJohn Marino   word = bit / HOST_BITS_PER_LONG;
1468e4b17023SJohn Marino   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1469e4b17023SJohn Marino 
1470e4b17023SJohn Marino   /* If the bit was previously set, skip it.  */
1471e4b17023SJohn Marino   if (entry->in_use_p[word] & mask)
1472e4b17023SJohn Marino     return 1;
1473e4b17023SJohn Marino 
1474e4b17023SJohn Marino   /* Otherwise set it, and decrement the free object count.  */
1475e4b17023SJohn Marino   entry->in_use_p[word] |= mask;
1476e4b17023SJohn Marino   entry->num_free_objects -= 1;
1477e4b17023SJohn Marino 
1478e4b17023SJohn Marino   if (GGC_DEBUG_LEVEL >= 4)
1479e4b17023SJohn Marino     fprintf (G.debug_file, "Marking %p\n", p);
1480e4b17023SJohn Marino 
1481e4b17023SJohn Marino   return 0;
1482e4b17023SJohn Marino }
1483e4b17023SJohn Marino 
1484e4b17023SJohn Marino /* Return 1 if P has been marked, zero otherwise.
1485e4b17023SJohn Marino    P must have been allocated by the GC allocator; it mustn't point to
1486e4b17023SJohn Marino    static objects, stack variables, or memory allocated with malloc.  */
1487e4b17023SJohn Marino 
1488e4b17023SJohn Marino int
ggc_marked_p(const void * p)1489e4b17023SJohn Marino ggc_marked_p (const void *p)
1490e4b17023SJohn Marino {
1491e4b17023SJohn Marino   page_entry *entry;
1492e4b17023SJohn Marino   unsigned bit, word;
1493e4b17023SJohn Marino   unsigned long mask;
1494e4b17023SJohn Marino 
1495e4b17023SJohn Marino   /* Look up the page on which the object is alloced.  If the object
1496e4b17023SJohn Marino      wasn't allocated by the collector, we'll probably die.  */
1497e4b17023SJohn Marino   entry = lookup_page_table_entry (p);
1498e4b17023SJohn Marino   gcc_assert (entry);
1499e4b17023SJohn Marino 
1500e4b17023SJohn Marino   /* Calculate the index of the object on the page; this is its bit
1501e4b17023SJohn Marino      position in the in_use_p bitmap.  */
1502e4b17023SJohn Marino   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1503e4b17023SJohn Marino   word = bit / HOST_BITS_PER_LONG;
1504e4b17023SJohn Marino   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1505e4b17023SJohn Marino 
1506e4b17023SJohn Marino   return (entry->in_use_p[word] & mask) != 0;
1507e4b17023SJohn Marino }
1508e4b17023SJohn Marino 
1509e4b17023SJohn Marino /* Return the size of the gc-able object P.  */
1510e4b17023SJohn Marino 
1511e4b17023SJohn Marino size_t
ggc_get_size(const void * p)1512e4b17023SJohn Marino ggc_get_size (const void *p)
1513e4b17023SJohn Marino {
1514e4b17023SJohn Marino   page_entry *pe = lookup_page_table_entry (p);
1515e4b17023SJohn Marino   return OBJECT_SIZE (pe->order);
1516e4b17023SJohn Marino }
1517e4b17023SJohn Marino 
1518e4b17023SJohn Marino /* Release the memory for object P.  */
1519e4b17023SJohn Marino 
1520e4b17023SJohn Marino void
ggc_free(void * p)1521e4b17023SJohn Marino ggc_free (void *p)
1522e4b17023SJohn Marino {
1523e4b17023SJohn Marino   page_entry *pe = lookup_page_table_entry (p);
1524e4b17023SJohn Marino   size_t order = pe->order;
1525e4b17023SJohn Marino   size_t size = OBJECT_SIZE (order);
1526e4b17023SJohn Marino 
1527e4b17023SJohn Marino #ifdef GATHER_STATISTICS
1528e4b17023SJohn Marino   ggc_free_overhead (p);
1529e4b17023SJohn Marino #endif
1530e4b17023SJohn Marino 
1531e4b17023SJohn Marino   if (GGC_DEBUG_LEVEL >= 3)
1532e4b17023SJohn Marino     fprintf (G.debug_file,
1533e4b17023SJohn Marino 	     "Freeing object, actual size=%lu, at %p on %p\n",
1534e4b17023SJohn Marino 	     (unsigned long) size, p, (void *) pe);
1535e4b17023SJohn Marino 
1536e4b17023SJohn Marino #ifdef ENABLE_GC_CHECKING
1537e4b17023SJohn Marino   /* Poison the data, to indicate the data is garbage.  */
1538e4b17023SJohn Marino   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1539e4b17023SJohn Marino   memset (p, 0xa5, size);
1540e4b17023SJohn Marino #endif
1541e4b17023SJohn Marino   /* Let valgrind know the object is free.  */
1542e4b17023SJohn Marino   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1543e4b17023SJohn Marino 
1544e4b17023SJohn Marino #ifdef ENABLE_GC_ALWAYS_COLLECT
1545e4b17023SJohn Marino   /* In the completely-anal-checking mode, we do *not* immediately free
1546e4b17023SJohn Marino      the data, but instead verify that the data is *actually* not
1547e4b17023SJohn Marino      reachable the next time we collect.  */
1548e4b17023SJohn Marino   {
1549e4b17023SJohn Marino     struct free_object *fo = XNEW (struct free_object);
1550e4b17023SJohn Marino     fo->object = p;
1551e4b17023SJohn Marino     fo->next = G.free_object_list;
1552e4b17023SJohn Marino     G.free_object_list = fo;
1553e4b17023SJohn Marino   }
1554e4b17023SJohn Marino #else
1555e4b17023SJohn Marino   {
1556e4b17023SJohn Marino     unsigned int bit_offset, word, bit;
1557e4b17023SJohn Marino 
1558e4b17023SJohn Marino     G.allocated -= size;
1559e4b17023SJohn Marino 
1560e4b17023SJohn Marino     /* Mark the object not-in-use.  */
1561e4b17023SJohn Marino     bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1562e4b17023SJohn Marino     word = bit_offset / HOST_BITS_PER_LONG;
1563e4b17023SJohn Marino     bit = bit_offset % HOST_BITS_PER_LONG;
1564e4b17023SJohn Marino     pe->in_use_p[word] &= ~(1UL << bit);
1565e4b17023SJohn Marino 
1566e4b17023SJohn Marino     if (pe->num_free_objects++ == 0)
1567e4b17023SJohn Marino       {
1568e4b17023SJohn Marino 	page_entry *p, *q;
1569e4b17023SJohn Marino 
1570e4b17023SJohn Marino 	/* If the page is completely full, then it's supposed to
1571e4b17023SJohn Marino 	   be after all pages that aren't.  Since we've freed one
1572e4b17023SJohn Marino 	   object from a page that was full, we need to move the
1573e4b17023SJohn Marino 	   page to the head of the list.
1574e4b17023SJohn Marino 
1575e4b17023SJohn Marino 	   PE is the node we want to move.  Q is the previous node
1576e4b17023SJohn Marino 	   and P is the next node in the list.  */
1577e4b17023SJohn Marino 	q = pe->prev;
1578e4b17023SJohn Marino 	if (q && q->num_free_objects == 0)
1579e4b17023SJohn Marino 	  {
1580e4b17023SJohn Marino 	    p = pe->next;
1581e4b17023SJohn Marino 
1582e4b17023SJohn Marino 	    q->next = p;
1583e4b17023SJohn Marino 
1584e4b17023SJohn Marino 	    /* If PE was at the end of the list, then Q becomes the
1585e4b17023SJohn Marino 	       new end of the list.  If PE was not the end of the
1586e4b17023SJohn Marino 	       list, then we need to update the PREV field for P.  */
1587e4b17023SJohn Marino 	    if (!p)
1588e4b17023SJohn Marino 	      G.page_tails[order] = q;
1589e4b17023SJohn Marino 	    else
1590e4b17023SJohn Marino 	      p->prev = q;
1591e4b17023SJohn Marino 
1592e4b17023SJohn Marino 	    /* Move PE to the head of the list.  */
1593e4b17023SJohn Marino 	    pe->next = G.pages[order];
1594e4b17023SJohn Marino 	    pe->prev = NULL;
1595e4b17023SJohn Marino 	    G.pages[order]->prev = pe;
1596e4b17023SJohn Marino 	    G.pages[order] = pe;
1597e4b17023SJohn Marino 	  }
1598e4b17023SJohn Marino 
1599e4b17023SJohn Marino 	/* Reset the hint bit to point to the only free object.  */
1600e4b17023SJohn Marino 	pe->next_bit_hint = bit_offset;
1601e4b17023SJohn Marino       }
1602e4b17023SJohn Marino   }
1603e4b17023SJohn Marino #endif
1604e4b17023SJohn Marino }
1605e4b17023SJohn Marino 
1606e4b17023SJohn Marino /* Subroutine of init_ggc which computes the pair of numbers used to
1607e4b17023SJohn Marino    perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1608e4b17023SJohn Marino 
1609e4b17023SJohn Marino    This algorithm is taken from Granlund and Montgomery's paper
1610e4b17023SJohn Marino    "Division by Invariant Integers using Multiplication"
1611e4b17023SJohn Marino    (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1612e4b17023SJohn Marino    constants).  */
1613e4b17023SJohn Marino 
1614e4b17023SJohn Marino static void
compute_inverse(unsigned order)1615e4b17023SJohn Marino compute_inverse (unsigned order)
1616e4b17023SJohn Marino {
1617e4b17023SJohn Marino   size_t size, inv;
1618e4b17023SJohn Marino   unsigned int e;
1619e4b17023SJohn Marino 
1620e4b17023SJohn Marino   size = OBJECT_SIZE (order);
1621e4b17023SJohn Marino   e = 0;
1622e4b17023SJohn Marino   while (size % 2 == 0)
1623e4b17023SJohn Marino     {
1624e4b17023SJohn Marino       e++;
1625e4b17023SJohn Marino       size >>= 1;
1626e4b17023SJohn Marino     }
1627e4b17023SJohn Marino 
1628e4b17023SJohn Marino   inv = size;
1629e4b17023SJohn Marino   while (inv * size != 1)
1630e4b17023SJohn Marino     inv = inv * (2 - inv*size);
1631e4b17023SJohn Marino 
1632e4b17023SJohn Marino   DIV_MULT (order) = inv;
1633e4b17023SJohn Marino   DIV_SHIFT (order) = e;
1634e4b17023SJohn Marino }
1635e4b17023SJohn Marino 
1636e4b17023SJohn Marino /* Initialize the ggc-mmap allocator.  */
1637e4b17023SJohn Marino void
init_ggc(void)1638e4b17023SJohn Marino init_ggc (void)
1639e4b17023SJohn Marino {
1640e4b17023SJohn Marino   unsigned order;
1641e4b17023SJohn Marino 
1642e4b17023SJohn Marino   G.pagesize = getpagesize();
1643e4b17023SJohn Marino   G.lg_pagesize = exact_log2 (G.pagesize);
1644e4b17023SJohn Marino 
1645e4b17023SJohn Marino #ifdef HAVE_MMAP_DEV_ZERO
1646e4b17023SJohn Marino   G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1647e4b17023SJohn Marino   if (G.dev_zero_fd == -1)
1648e4b17023SJohn Marino     internal_error ("open /dev/zero: %m");
1649e4b17023SJohn Marino #endif
1650e4b17023SJohn Marino 
1651e4b17023SJohn Marino #if 0
1652e4b17023SJohn Marino   G.debug_file = fopen ("ggc-mmap.debug", "w");
1653e4b17023SJohn Marino #else
1654e4b17023SJohn Marino   G.debug_file = stdout;
1655e4b17023SJohn Marino #endif
1656e4b17023SJohn Marino 
1657e4b17023SJohn Marino #ifdef USING_MMAP
1658e4b17023SJohn Marino   /* StunOS has an amazing off-by-one error for the first mmap allocation
1659e4b17023SJohn Marino      after fiddling with RLIMIT_STACK.  The result, as hard as it is to
1660e4b17023SJohn Marino      believe, is an unaligned page allocation, which would cause us to
1661e4b17023SJohn Marino      hork badly if we tried to use it.  */
1662e4b17023SJohn Marino   {
1663e4b17023SJohn Marino     char *p = alloc_anon (NULL, G.pagesize, true);
1664e4b17023SJohn Marino     struct page_entry *e;
1665e4b17023SJohn Marino     if ((size_t)p & (G.pagesize - 1))
1666e4b17023SJohn Marino       {
1667e4b17023SJohn Marino 	/* How losing.  Discard this one and try another.  If we still
1668e4b17023SJohn Marino 	   can't get something useful, give up.  */
1669e4b17023SJohn Marino 
1670e4b17023SJohn Marino 	p = alloc_anon (NULL, G.pagesize, true);
1671e4b17023SJohn Marino 	gcc_assert (!((size_t)p & (G.pagesize - 1)));
1672e4b17023SJohn Marino       }
1673e4b17023SJohn Marino 
1674e4b17023SJohn Marino     /* We have a good page, might as well hold onto it...  */
1675e4b17023SJohn Marino     e = XCNEW (struct page_entry);
1676e4b17023SJohn Marino     e->bytes = G.pagesize;
1677e4b17023SJohn Marino     e->page = p;
1678e4b17023SJohn Marino     e->next = G.free_pages;
1679e4b17023SJohn Marino     G.free_pages = e;
1680e4b17023SJohn Marino   }
1681e4b17023SJohn Marino #endif
1682e4b17023SJohn Marino 
1683e4b17023SJohn Marino   /* Initialize the object size table.  */
1684e4b17023SJohn Marino   for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1685e4b17023SJohn Marino     object_size_table[order] = (size_t) 1 << order;
1686e4b17023SJohn Marino   for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1687e4b17023SJohn Marino     {
1688e4b17023SJohn Marino       size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1689e4b17023SJohn Marino 
1690e4b17023SJohn Marino       /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1691e4b17023SJohn Marino 	 so that we're sure of getting aligned memory.  */
1692e4b17023SJohn Marino       s = ROUND_UP (s, MAX_ALIGNMENT);
1693e4b17023SJohn Marino       object_size_table[order] = s;
1694e4b17023SJohn Marino     }
1695e4b17023SJohn Marino 
1696e4b17023SJohn Marino   /* Initialize the objects-per-page and inverse tables.  */
1697e4b17023SJohn Marino   for (order = 0; order < NUM_ORDERS; ++order)
1698e4b17023SJohn Marino     {
1699e4b17023SJohn Marino       objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1700e4b17023SJohn Marino       if (objects_per_page_table[order] == 0)
1701e4b17023SJohn Marino 	objects_per_page_table[order] = 1;
1702e4b17023SJohn Marino       compute_inverse (order);
1703e4b17023SJohn Marino     }
1704e4b17023SJohn Marino 
1705e4b17023SJohn Marino   /* Reset the size_lookup array to put appropriately sized objects in
1706e4b17023SJohn Marino      the special orders.  All objects bigger than the previous power
1707e4b17023SJohn Marino      of two, but no greater than the special size, should go in the
1708e4b17023SJohn Marino      new order.  */
1709e4b17023SJohn Marino   for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1710e4b17023SJohn Marino     {
1711e4b17023SJohn Marino       int o;
1712e4b17023SJohn Marino       int i;
1713e4b17023SJohn Marino 
1714e4b17023SJohn Marino       i = OBJECT_SIZE (order);
1715e4b17023SJohn Marino       if (i >= NUM_SIZE_LOOKUP)
1716e4b17023SJohn Marino 	continue;
1717e4b17023SJohn Marino 
1718e4b17023SJohn Marino       for (o = size_lookup[i]; o == size_lookup [i]; --i)
1719e4b17023SJohn Marino 	size_lookup[i] = order;
1720e4b17023SJohn Marino     }
1721e4b17023SJohn Marino 
1722e4b17023SJohn Marino   G.depth_in_use = 0;
1723e4b17023SJohn Marino   G.depth_max = 10;
1724e4b17023SJohn Marino   G.depth = XNEWVEC (unsigned int, G.depth_max);
1725e4b17023SJohn Marino 
1726e4b17023SJohn Marino   G.by_depth_in_use = 0;
1727e4b17023SJohn Marino   G.by_depth_max = INITIAL_PTE_COUNT;
1728e4b17023SJohn Marino   G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1729e4b17023SJohn Marino   G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1730e4b17023SJohn Marino }
1731e4b17023SJohn Marino 
1732e4b17023SJohn Marino /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1733e4b17023SJohn Marino    reflects reality.  Recalculate NUM_FREE_OBJECTS as well.  */
1734e4b17023SJohn Marino 
1735e4b17023SJohn Marino static void
ggc_recalculate_in_use_p(page_entry * p)1736e4b17023SJohn Marino ggc_recalculate_in_use_p (page_entry *p)
1737e4b17023SJohn Marino {
1738e4b17023SJohn Marino   unsigned int i;
1739e4b17023SJohn Marino   size_t num_objects;
1740e4b17023SJohn Marino 
1741e4b17023SJohn Marino   /* Because the past-the-end bit in in_use_p is always set, we
1742e4b17023SJohn Marino      pretend there is one additional object.  */
1743e4b17023SJohn Marino   num_objects = OBJECTS_IN_PAGE (p) + 1;
1744e4b17023SJohn Marino 
1745e4b17023SJohn Marino   /* Reset the free object count.  */
1746e4b17023SJohn Marino   p->num_free_objects = num_objects;
1747e4b17023SJohn Marino 
1748e4b17023SJohn Marino   /* Combine the IN_USE_P and SAVE_IN_USE_P arrays.  */
1749e4b17023SJohn Marino   for (i = 0;
1750e4b17023SJohn Marino        i < CEIL (BITMAP_SIZE (num_objects),
1751e4b17023SJohn Marino 		 sizeof (*p->in_use_p));
1752e4b17023SJohn Marino        ++i)
1753e4b17023SJohn Marino     {
1754e4b17023SJohn Marino       unsigned long j;
1755e4b17023SJohn Marino 
1756e4b17023SJohn Marino       /* Something is in use if it is marked, or if it was in use in a
1757e4b17023SJohn Marino 	 context further down the context stack.  */
1758e4b17023SJohn Marino       p->in_use_p[i] |= save_in_use_p (p)[i];
1759e4b17023SJohn Marino 
1760e4b17023SJohn Marino       /* Decrement the free object count for every object allocated.  */
1761e4b17023SJohn Marino       for (j = p->in_use_p[i]; j; j >>= 1)
1762e4b17023SJohn Marino 	p->num_free_objects -= (j & 1);
1763e4b17023SJohn Marino     }
1764e4b17023SJohn Marino 
1765e4b17023SJohn Marino   gcc_assert (p->num_free_objects < num_objects);
1766e4b17023SJohn Marino }
1767e4b17023SJohn Marino 
1768e4b17023SJohn Marino /* Unmark all objects.  */
1769e4b17023SJohn Marino 
1770e4b17023SJohn Marino static void
clear_marks(void)1771e4b17023SJohn Marino clear_marks (void)
1772e4b17023SJohn Marino {
1773e4b17023SJohn Marino   unsigned order;
1774e4b17023SJohn Marino 
1775e4b17023SJohn Marino   for (order = 2; order < NUM_ORDERS; order++)
1776e4b17023SJohn Marino     {
1777e4b17023SJohn Marino       page_entry *p;
1778e4b17023SJohn Marino 
1779e4b17023SJohn Marino       for (p = G.pages[order]; p != NULL; p = p->next)
1780e4b17023SJohn Marino 	{
1781e4b17023SJohn Marino 	  size_t num_objects = OBJECTS_IN_PAGE (p);
1782e4b17023SJohn Marino 	  size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1783e4b17023SJohn Marino 
1784e4b17023SJohn Marino 	  /* The data should be page-aligned.  */
1785e4b17023SJohn Marino 	  gcc_assert (!((size_t) p->page & (G.pagesize - 1)));
1786e4b17023SJohn Marino 
1787e4b17023SJohn Marino 	  /* Pages that aren't in the topmost context are not collected;
1788e4b17023SJohn Marino 	     nevertheless, we need their in-use bit vectors to store GC
1789e4b17023SJohn Marino 	     marks.  So, back them up first.  */
1790e4b17023SJohn Marino 	  if (p->context_depth < G.context_depth)
1791e4b17023SJohn Marino 	    {
1792e4b17023SJohn Marino 	      if (! save_in_use_p (p))
1793e4b17023SJohn Marino 		save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1794e4b17023SJohn Marino 	      memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1795e4b17023SJohn Marino 	    }
1796e4b17023SJohn Marino 
1797e4b17023SJohn Marino 	  /* Reset reset the number of free objects and clear the
1798e4b17023SJohn Marino              in-use bits.  These will be adjusted by mark_obj.  */
1799e4b17023SJohn Marino 	  p->num_free_objects = num_objects;
1800e4b17023SJohn Marino 	  memset (p->in_use_p, 0, bitmap_size);
1801e4b17023SJohn Marino 
1802e4b17023SJohn Marino 	  /* Make sure the one-past-the-end bit is always set.  */
1803e4b17023SJohn Marino 	  p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1804e4b17023SJohn Marino 	    = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1805e4b17023SJohn Marino 	}
1806e4b17023SJohn Marino     }
1807e4b17023SJohn Marino }
1808e4b17023SJohn Marino 
1809e4b17023SJohn Marino /* Free all empty pages.  Partially empty pages need no attention
1810e4b17023SJohn Marino    because the `mark' bit doubles as an `unused' bit.  */
1811e4b17023SJohn Marino 
1812e4b17023SJohn Marino static void
sweep_pages(void)1813e4b17023SJohn Marino sweep_pages (void)
1814e4b17023SJohn Marino {
1815e4b17023SJohn Marino   unsigned order;
1816e4b17023SJohn Marino 
1817e4b17023SJohn Marino   for (order = 2; order < NUM_ORDERS; order++)
1818e4b17023SJohn Marino     {
1819e4b17023SJohn Marino       /* The last page-entry to consider, regardless of entries
1820e4b17023SJohn Marino 	 placed at the end of the list.  */
1821e4b17023SJohn Marino       page_entry * const last = G.page_tails[order];
1822e4b17023SJohn Marino 
1823e4b17023SJohn Marino       size_t num_objects;
1824e4b17023SJohn Marino       size_t live_objects;
1825e4b17023SJohn Marino       page_entry *p, *previous;
1826e4b17023SJohn Marino       int done;
1827e4b17023SJohn Marino 
1828e4b17023SJohn Marino       p = G.pages[order];
1829e4b17023SJohn Marino       if (p == NULL)
1830e4b17023SJohn Marino 	continue;
1831e4b17023SJohn Marino 
1832e4b17023SJohn Marino       previous = NULL;
1833e4b17023SJohn Marino       do
1834e4b17023SJohn Marino 	{
1835e4b17023SJohn Marino 	  page_entry *next = p->next;
1836e4b17023SJohn Marino 
1837e4b17023SJohn Marino 	  /* Loop until all entries have been examined.  */
1838e4b17023SJohn Marino 	  done = (p == last);
1839e4b17023SJohn Marino 
1840e4b17023SJohn Marino 	  num_objects = OBJECTS_IN_PAGE (p);
1841e4b17023SJohn Marino 
1842e4b17023SJohn Marino 	  /* Add all live objects on this page to the count of
1843e4b17023SJohn Marino              allocated memory.  */
1844e4b17023SJohn Marino 	  live_objects = num_objects - p->num_free_objects;
1845e4b17023SJohn Marino 
1846e4b17023SJohn Marino 	  G.allocated += OBJECT_SIZE (order) * live_objects;
1847e4b17023SJohn Marino 
1848e4b17023SJohn Marino 	  /* Only objects on pages in the topmost context should get
1849e4b17023SJohn Marino 	     collected.  */
1850e4b17023SJohn Marino 	  if (p->context_depth < G.context_depth)
1851e4b17023SJohn Marino 	    ;
1852e4b17023SJohn Marino 
1853e4b17023SJohn Marino 	  /* Remove the page if it's empty.  */
1854e4b17023SJohn Marino 	  else if (live_objects == 0)
1855e4b17023SJohn Marino 	    {
1856e4b17023SJohn Marino 	      /* If P was the first page in the list, then NEXT
1857e4b17023SJohn Marino 		 becomes the new first page in the list, otherwise
1858e4b17023SJohn Marino 		 splice P out of the forward pointers.  */
1859e4b17023SJohn Marino 	      if (! previous)
1860e4b17023SJohn Marino 		G.pages[order] = next;
1861e4b17023SJohn Marino 	      else
1862e4b17023SJohn Marino 		previous->next = next;
1863e4b17023SJohn Marino 
1864e4b17023SJohn Marino 	      /* Splice P out of the back pointers too.  */
1865e4b17023SJohn Marino 	      if (next)
1866e4b17023SJohn Marino 		next->prev = previous;
1867e4b17023SJohn Marino 
1868e4b17023SJohn Marino 	      /* Are we removing the last element?  */
1869e4b17023SJohn Marino 	      if (p == G.page_tails[order])
1870e4b17023SJohn Marino 		G.page_tails[order] = previous;
1871e4b17023SJohn Marino 	      free_page (p);
1872e4b17023SJohn Marino 	      p = previous;
1873e4b17023SJohn Marino 	    }
1874e4b17023SJohn Marino 
1875e4b17023SJohn Marino 	  /* If the page is full, move it to the end.  */
1876e4b17023SJohn Marino 	  else if (p->num_free_objects == 0)
1877e4b17023SJohn Marino 	    {
1878e4b17023SJohn Marino 	      /* Don't move it if it's already at the end.  */
1879e4b17023SJohn Marino 	      if (p != G.page_tails[order])
1880e4b17023SJohn Marino 		{
1881e4b17023SJohn Marino 		  /* Move p to the end of the list.  */
1882e4b17023SJohn Marino 		  p->next = NULL;
1883e4b17023SJohn Marino 		  p->prev = G.page_tails[order];
1884e4b17023SJohn Marino 		  G.page_tails[order]->next = p;
1885e4b17023SJohn Marino 
1886e4b17023SJohn Marino 		  /* Update the tail pointer...  */
1887e4b17023SJohn Marino 		  G.page_tails[order] = p;
1888e4b17023SJohn Marino 
1889e4b17023SJohn Marino 		  /* ... and the head pointer, if necessary.  */
1890e4b17023SJohn Marino 		  if (! previous)
1891e4b17023SJohn Marino 		    G.pages[order] = next;
1892e4b17023SJohn Marino 		  else
1893e4b17023SJohn Marino 		    previous->next = next;
1894e4b17023SJohn Marino 
1895e4b17023SJohn Marino 		  /* And update the backpointer in NEXT if necessary.  */
1896e4b17023SJohn Marino 		  if (next)
1897e4b17023SJohn Marino 		    next->prev = previous;
1898e4b17023SJohn Marino 
1899e4b17023SJohn Marino 		  p = previous;
1900e4b17023SJohn Marino 		}
1901e4b17023SJohn Marino 	    }
1902e4b17023SJohn Marino 
1903e4b17023SJohn Marino 	  /* If we've fallen through to here, it's a page in the
1904e4b17023SJohn Marino 	     topmost context that is neither full nor empty.  Such a
1905e4b17023SJohn Marino 	     page must precede pages at lesser context depth in the
1906e4b17023SJohn Marino 	     list, so move it to the head.  */
1907e4b17023SJohn Marino 	  else if (p != G.pages[order])
1908e4b17023SJohn Marino 	    {
1909e4b17023SJohn Marino 	      previous->next = p->next;
1910e4b17023SJohn Marino 
1911e4b17023SJohn Marino 	      /* Update the backchain in the next node if it exists.  */
1912e4b17023SJohn Marino 	      if (p->next)
1913e4b17023SJohn Marino 		p->next->prev = previous;
1914e4b17023SJohn Marino 
1915e4b17023SJohn Marino 	      /* Move P to the head of the list.  */
1916e4b17023SJohn Marino 	      p->next = G.pages[order];
1917e4b17023SJohn Marino 	      p->prev = NULL;
1918e4b17023SJohn Marino 	      G.pages[order]->prev = p;
1919e4b17023SJohn Marino 
1920e4b17023SJohn Marino 	      /* Update the head pointer.  */
1921e4b17023SJohn Marino 	      G.pages[order] = p;
1922e4b17023SJohn Marino 
1923e4b17023SJohn Marino 	      /* Are we moving the last element?  */
1924e4b17023SJohn Marino 	      if (G.page_tails[order] == p)
1925e4b17023SJohn Marino 	        G.page_tails[order] = previous;
1926e4b17023SJohn Marino 	      p = previous;
1927e4b17023SJohn Marino 	    }
1928e4b17023SJohn Marino 
1929e4b17023SJohn Marino 	  previous = p;
1930e4b17023SJohn Marino 	  p = next;
1931e4b17023SJohn Marino 	}
1932e4b17023SJohn Marino       while (! done);
1933e4b17023SJohn Marino 
1934e4b17023SJohn Marino       /* Now, restore the in_use_p vectors for any pages from contexts
1935e4b17023SJohn Marino          other than the current one.  */
1936e4b17023SJohn Marino       for (p = G.pages[order]; p; p = p->next)
1937e4b17023SJohn Marino 	if (p->context_depth != G.context_depth)
1938e4b17023SJohn Marino 	  ggc_recalculate_in_use_p (p);
1939e4b17023SJohn Marino     }
1940e4b17023SJohn Marino }
1941e4b17023SJohn Marino 
1942e4b17023SJohn Marino #ifdef ENABLE_GC_CHECKING
1943e4b17023SJohn Marino /* Clobber all free objects.  */
1944e4b17023SJohn Marino 
1945e4b17023SJohn Marino static void
poison_pages(void)1946e4b17023SJohn Marino poison_pages (void)
1947e4b17023SJohn Marino {
1948e4b17023SJohn Marino   unsigned order;
1949e4b17023SJohn Marino 
1950e4b17023SJohn Marino   for (order = 2; order < NUM_ORDERS; order++)
1951e4b17023SJohn Marino     {
1952e4b17023SJohn Marino       size_t size = OBJECT_SIZE (order);
1953e4b17023SJohn Marino       page_entry *p;
1954e4b17023SJohn Marino 
1955e4b17023SJohn Marino       for (p = G.pages[order]; p != NULL; p = p->next)
1956e4b17023SJohn Marino 	{
1957e4b17023SJohn Marino 	  size_t num_objects;
1958e4b17023SJohn Marino 	  size_t i;
1959e4b17023SJohn Marino 
1960e4b17023SJohn Marino 	  if (p->context_depth != G.context_depth)
1961e4b17023SJohn Marino 	    /* Since we don't do any collection for pages in pushed
1962e4b17023SJohn Marino 	       contexts, there's no need to do any poisoning.  And
1963e4b17023SJohn Marino 	       besides, the IN_USE_P array isn't valid until we pop
1964e4b17023SJohn Marino 	       contexts.  */
1965e4b17023SJohn Marino 	    continue;
1966e4b17023SJohn Marino 
1967e4b17023SJohn Marino 	  num_objects = OBJECTS_IN_PAGE (p);
1968e4b17023SJohn Marino 	  for (i = 0; i < num_objects; i++)
1969e4b17023SJohn Marino 	    {
1970e4b17023SJohn Marino 	      size_t word, bit;
1971e4b17023SJohn Marino 	      word = i / HOST_BITS_PER_LONG;
1972e4b17023SJohn Marino 	      bit = i % HOST_BITS_PER_LONG;
1973e4b17023SJohn Marino 	      if (((p->in_use_p[word] >> bit) & 1) == 0)
1974e4b17023SJohn Marino 		{
1975e4b17023SJohn Marino 		  char *object = p->page + i * size;
1976e4b17023SJohn Marino 
1977e4b17023SJohn Marino 		  /* Keep poison-by-write when we expect to use Valgrind,
1978e4b17023SJohn Marino 		     so the exact same memory semantics is kept, in case
1979e4b17023SJohn Marino 		     there are memory errors.  We override this request
1980e4b17023SJohn Marino 		     below.  */
1981e4b17023SJohn Marino 		  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
1982e4b17023SJohn Marino 								 size));
1983e4b17023SJohn Marino 		  memset (object, 0xa5, size);
1984e4b17023SJohn Marino 
1985e4b17023SJohn Marino 		  /* Drop the handle to avoid handle leak.  */
1986e4b17023SJohn Marino 		  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
1987e4b17023SJohn Marino 		}
1988e4b17023SJohn Marino 	    }
1989e4b17023SJohn Marino 	}
1990e4b17023SJohn Marino     }
1991e4b17023SJohn Marino }
1992e4b17023SJohn Marino #else
1993e4b17023SJohn Marino #define poison_pages()
1994e4b17023SJohn Marino #endif
1995e4b17023SJohn Marino 
1996e4b17023SJohn Marino #ifdef ENABLE_GC_ALWAYS_COLLECT
1997e4b17023SJohn Marino /* Validate that the reportedly free objects actually are.  */
1998e4b17023SJohn Marino 
1999e4b17023SJohn Marino static void
validate_free_objects(void)2000e4b17023SJohn Marino validate_free_objects (void)
2001e4b17023SJohn Marino {
2002e4b17023SJohn Marino   struct free_object *f, *next, *still_free = NULL;
2003e4b17023SJohn Marino 
2004e4b17023SJohn Marino   for (f = G.free_object_list; f ; f = next)
2005e4b17023SJohn Marino     {
2006e4b17023SJohn Marino       page_entry *pe = lookup_page_table_entry (f->object);
2007e4b17023SJohn Marino       size_t bit, word;
2008e4b17023SJohn Marino 
2009e4b17023SJohn Marino       bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
2010e4b17023SJohn Marino       word = bit / HOST_BITS_PER_LONG;
2011e4b17023SJohn Marino       bit = bit % HOST_BITS_PER_LONG;
2012e4b17023SJohn Marino       next = f->next;
2013e4b17023SJohn Marino 
2014e4b17023SJohn Marino       /* Make certain it isn't visible from any root.  Notice that we
2015e4b17023SJohn Marino 	 do this check before sweep_pages merges save_in_use_p.  */
2016e4b17023SJohn Marino       gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
2017e4b17023SJohn Marino 
2018e4b17023SJohn Marino       /* If the object comes from an outer context, then retain the
2019e4b17023SJohn Marino 	 free_object entry, so that we can verify that the address
2020e4b17023SJohn Marino 	 isn't live on the stack in some outer context.  */
2021e4b17023SJohn Marino       if (pe->context_depth != G.context_depth)
2022e4b17023SJohn Marino 	{
2023e4b17023SJohn Marino 	  f->next = still_free;
2024e4b17023SJohn Marino 	  still_free = f;
2025e4b17023SJohn Marino 	}
2026e4b17023SJohn Marino       else
2027e4b17023SJohn Marino 	free (f);
2028e4b17023SJohn Marino     }
2029e4b17023SJohn Marino 
2030e4b17023SJohn Marino   G.free_object_list = still_free;
2031e4b17023SJohn Marino }
2032e4b17023SJohn Marino #else
2033e4b17023SJohn Marino #define validate_free_objects()
2034e4b17023SJohn Marino #endif
2035e4b17023SJohn Marino 
2036e4b17023SJohn Marino /* Top level mark-and-sweep routine.  */
2037e4b17023SJohn Marino 
2038e4b17023SJohn Marino void
ggc_collect(void)2039e4b17023SJohn Marino ggc_collect (void)
2040e4b17023SJohn Marino {
2041e4b17023SJohn Marino   /* Avoid frequent unnecessary work by skipping collection if the
2042e4b17023SJohn Marino      total allocations haven't expanded much since the last
2043e4b17023SJohn Marino      collection.  */
2044e4b17023SJohn Marino   float allocated_last_gc =
2045e4b17023SJohn Marino     MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
2046e4b17023SJohn Marino 
2047e4b17023SJohn Marino   float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
2048e4b17023SJohn Marino 
2049e4b17023SJohn Marino   if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
2050e4b17023SJohn Marino     return;
2051e4b17023SJohn Marino 
2052e4b17023SJohn Marino   timevar_push (TV_GC);
2053e4b17023SJohn Marino   if (!quiet_flag)
2054e4b17023SJohn Marino     fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
2055e4b17023SJohn Marino   if (GGC_DEBUG_LEVEL >= 2)
2056e4b17023SJohn Marino     fprintf (G.debug_file, "BEGIN COLLECTING\n");
2057e4b17023SJohn Marino 
2058e4b17023SJohn Marino   /* Zero the total allocated bytes.  This will be recalculated in the
2059e4b17023SJohn Marino      sweep phase.  */
2060e4b17023SJohn Marino   G.allocated = 0;
2061e4b17023SJohn Marino 
2062e4b17023SJohn Marino   /* Release the pages we freed the last time we collected, but didn't
2063e4b17023SJohn Marino      reuse in the interim.  */
2064e4b17023SJohn Marino   release_pages ();
2065e4b17023SJohn Marino 
2066e4b17023SJohn Marino   /* Indicate that we've seen collections at this context depth.  */
2067e4b17023SJohn Marino   G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
2068e4b17023SJohn Marino 
2069e4b17023SJohn Marino   invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2070e4b17023SJohn Marino 
2071e4b17023SJohn Marino   clear_marks ();
2072e4b17023SJohn Marino   ggc_mark_roots ();
2073e4b17023SJohn Marino #ifdef GATHER_STATISTICS
2074e4b17023SJohn Marino   ggc_prune_overhead_list ();
2075e4b17023SJohn Marino #endif
2076e4b17023SJohn Marino   poison_pages ();
2077e4b17023SJohn Marino   validate_free_objects ();
2078e4b17023SJohn Marino   sweep_pages ();
2079e4b17023SJohn Marino 
2080e4b17023SJohn Marino   G.allocated_last_gc = G.allocated;
2081e4b17023SJohn Marino 
2082e4b17023SJohn Marino   invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2083e4b17023SJohn Marino 
2084e4b17023SJohn Marino   timevar_pop (TV_GC);
2085e4b17023SJohn Marino 
2086e4b17023SJohn Marino   if (!quiet_flag)
2087e4b17023SJohn Marino     fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
2088e4b17023SJohn Marino   if (GGC_DEBUG_LEVEL >= 2)
2089e4b17023SJohn Marino     fprintf (G.debug_file, "END COLLECTING\n");
2090e4b17023SJohn Marino }
2091e4b17023SJohn Marino 
2092e4b17023SJohn Marino /* Print allocation statistics.  */
2093e4b17023SJohn Marino #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2094e4b17023SJohn Marino 		  ? (x) \
2095e4b17023SJohn Marino 		  : ((x) < 1024*1024*10 \
2096e4b17023SJohn Marino 		     ? (x) / 1024 \
2097e4b17023SJohn Marino 		     : (x) / (1024*1024))))
2098e4b17023SJohn Marino #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2099e4b17023SJohn Marino 
2100e4b17023SJohn Marino void
ggc_print_statistics(void)2101e4b17023SJohn Marino ggc_print_statistics (void)
2102e4b17023SJohn Marino {
2103e4b17023SJohn Marino   struct ggc_statistics stats;
2104e4b17023SJohn Marino   unsigned int i;
2105e4b17023SJohn Marino   size_t total_overhead = 0;
2106e4b17023SJohn Marino 
2107e4b17023SJohn Marino   /* Clear the statistics.  */
2108e4b17023SJohn Marino   memset (&stats, 0, sizeof (stats));
2109e4b17023SJohn Marino 
2110e4b17023SJohn Marino   /* Make sure collection will really occur.  */
2111e4b17023SJohn Marino   G.allocated_last_gc = 0;
2112e4b17023SJohn Marino 
2113e4b17023SJohn Marino   /* Collect and print the statistics common across collectors.  */
2114e4b17023SJohn Marino   ggc_print_common_statistics (stderr, &stats);
2115e4b17023SJohn Marino 
2116e4b17023SJohn Marino   /* Release free pages so that we will not count the bytes allocated
2117e4b17023SJohn Marino      there as part of the total allocated memory.  */
2118e4b17023SJohn Marino   release_pages ();
2119e4b17023SJohn Marino 
2120e4b17023SJohn Marino   /* Collect some information about the various sizes of
2121e4b17023SJohn Marino      allocation.  */
2122e4b17023SJohn Marino   fprintf (stderr,
2123e4b17023SJohn Marino            "Memory still allocated at the end of the compilation process\n");
2124e4b17023SJohn Marino   fprintf (stderr, "%-5s %10s  %10s  %10s\n",
2125e4b17023SJohn Marino 	   "Size", "Allocated", "Used", "Overhead");
2126e4b17023SJohn Marino   for (i = 0; i < NUM_ORDERS; ++i)
2127e4b17023SJohn Marino     {
2128e4b17023SJohn Marino       page_entry *p;
2129e4b17023SJohn Marino       size_t allocated;
2130e4b17023SJohn Marino       size_t in_use;
2131e4b17023SJohn Marino       size_t overhead;
2132e4b17023SJohn Marino 
2133e4b17023SJohn Marino       /* Skip empty entries.  */
2134e4b17023SJohn Marino       if (!G.pages[i])
2135e4b17023SJohn Marino 	continue;
2136e4b17023SJohn Marino 
2137e4b17023SJohn Marino       overhead = allocated = in_use = 0;
2138e4b17023SJohn Marino 
2139e4b17023SJohn Marino       /* Figure out the total number of bytes allocated for objects of
2140e4b17023SJohn Marino 	 this size, and how many of them are actually in use.  Also figure
2141e4b17023SJohn Marino 	 out how much memory the page table is using.  */
2142e4b17023SJohn Marino       for (p = G.pages[i]; p; p = p->next)
2143e4b17023SJohn Marino 	{
2144e4b17023SJohn Marino 	  allocated += p->bytes;
2145e4b17023SJohn Marino 	  in_use +=
2146e4b17023SJohn Marino 	    (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2147e4b17023SJohn Marino 
2148e4b17023SJohn Marino 	  overhead += (sizeof (page_entry) - sizeof (long)
2149e4b17023SJohn Marino 		       + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2150e4b17023SJohn Marino 	}
2151e4b17023SJohn Marino       fprintf (stderr, "%-5lu %10lu%c %10lu%c %10lu%c\n",
2152e4b17023SJohn Marino 	       (unsigned long) OBJECT_SIZE (i),
2153e4b17023SJohn Marino 	       SCALE (allocated), STAT_LABEL (allocated),
2154e4b17023SJohn Marino 	       SCALE (in_use), STAT_LABEL (in_use),
2155e4b17023SJohn Marino 	       SCALE (overhead), STAT_LABEL (overhead));
2156e4b17023SJohn Marino       total_overhead += overhead;
2157e4b17023SJohn Marino     }
2158e4b17023SJohn Marino   fprintf (stderr, "%-5s %10lu%c %10lu%c %10lu%c\n", "Total",
2159e4b17023SJohn Marino 	   SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
2160e4b17023SJohn Marino 	   SCALE (G.allocated), STAT_LABEL(G.allocated),
2161e4b17023SJohn Marino 	   SCALE (total_overhead), STAT_LABEL (total_overhead));
2162e4b17023SJohn Marino 
2163e4b17023SJohn Marino #ifdef GATHER_STATISTICS
2164e4b17023SJohn Marino   {
2165e4b17023SJohn Marino     fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
2166e4b17023SJohn Marino 
2167*5ce9237cSJohn Marino     fprintf (stderr, "Total Overhead:                        %10" HOST_LONG_LONG_FORMAT "d\n",
2168e4b17023SJohn Marino              G.stats.total_overhead);
2169*5ce9237cSJohn Marino     fprintf (stderr, "Total Allocated:                       %10" HOST_LONG_LONG_FORMAT "d\n",
2170e4b17023SJohn Marino              G.stats.total_allocated);
2171e4b17023SJohn Marino 
2172*5ce9237cSJohn Marino     fprintf (stderr, "Total Overhead  under  32B:            %10" HOST_LONG_LONG_FORMAT "d\n",
2173e4b17023SJohn Marino              G.stats.total_overhead_under32);
2174*5ce9237cSJohn Marino     fprintf (stderr, "Total Allocated under  32B:            %10" HOST_LONG_LONG_FORMAT "d\n",
2175e4b17023SJohn Marino              G.stats.total_allocated_under32);
2176*5ce9237cSJohn Marino     fprintf (stderr, "Total Overhead  under  64B:            %10" HOST_LONG_LONG_FORMAT "d\n",
2177e4b17023SJohn Marino              G.stats.total_overhead_under64);
2178*5ce9237cSJohn Marino     fprintf (stderr, "Total Allocated under  64B:            %10" HOST_LONG_LONG_FORMAT "d\n",
2179e4b17023SJohn Marino              G.stats.total_allocated_under64);
2180*5ce9237cSJohn Marino     fprintf (stderr, "Total Overhead  under 128B:            %10" HOST_LONG_LONG_FORMAT "d\n",
2181e4b17023SJohn Marino              G.stats.total_overhead_under128);
2182*5ce9237cSJohn Marino     fprintf (stderr, "Total Allocated under 128B:            %10" HOST_LONG_LONG_FORMAT "d\n",
2183e4b17023SJohn Marino              G.stats.total_allocated_under128);
2184e4b17023SJohn Marino 
2185e4b17023SJohn Marino     for (i = 0; i < NUM_ORDERS; i++)
2186e4b17023SJohn Marino       if (G.stats.total_allocated_per_order[i])
2187e4b17023SJohn Marino         {
2188*5ce9237cSJohn Marino           fprintf (stderr, "Total Overhead  page size %7lu:     %10" HOST_LONG_LONG_FORMAT "d\n",
2189e4b17023SJohn Marino                    (unsigned long) OBJECT_SIZE (i),
2190e4b17023SJohn Marino 		   G.stats.total_overhead_per_order[i]);
2191*5ce9237cSJohn Marino           fprintf (stderr, "Total Allocated page size %7lu:     %10" HOST_LONG_LONG_FORMAT "d\n",
2192e4b17023SJohn Marino                    (unsigned long) OBJECT_SIZE (i),
2193e4b17023SJohn Marino 		   G.stats.total_allocated_per_order[i]);
2194e4b17023SJohn Marino         }
2195e4b17023SJohn Marino   }
2196e4b17023SJohn Marino #endif
2197e4b17023SJohn Marino }
2198e4b17023SJohn Marino 
2199e4b17023SJohn Marino struct ggc_pch_ondisk
2200e4b17023SJohn Marino {
2201e4b17023SJohn Marino   unsigned totals[NUM_ORDERS];
2202e4b17023SJohn Marino };
2203e4b17023SJohn Marino 
2204e4b17023SJohn Marino struct ggc_pch_data
2205e4b17023SJohn Marino {
2206e4b17023SJohn Marino   struct ggc_pch_ondisk d;
2207e4b17023SJohn Marino   size_t base[NUM_ORDERS];
2208e4b17023SJohn Marino   size_t written[NUM_ORDERS];
2209e4b17023SJohn Marino };
2210e4b17023SJohn Marino 
2211e4b17023SJohn Marino struct ggc_pch_data *
init_ggc_pch(void)2212e4b17023SJohn Marino init_ggc_pch (void)
2213e4b17023SJohn Marino {
2214e4b17023SJohn Marino   return XCNEW (struct ggc_pch_data);
2215e4b17023SJohn Marino }
2216e4b17023SJohn Marino 
2217e4b17023SJohn Marino void
ggc_pch_count_object(struct ggc_pch_data * d,void * x ATTRIBUTE_UNUSED,size_t size,bool is_string ATTRIBUTE_UNUSED,enum gt_types_enum type ATTRIBUTE_UNUSED)2218e4b17023SJohn Marino ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2219e4b17023SJohn Marino 		      size_t size, bool is_string ATTRIBUTE_UNUSED,
2220e4b17023SJohn Marino 		      enum gt_types_enum type ATTRIBUTE_UNUSED)
2221e4b17023SJohn Marino {
2222e4b17023SJohn Marino   unsigned order;
2223e4b17023SJohn Marino 
2224e4b17023SJohn Marino   if (size < NUM_SIZE_LOOKUP)
2225e4b17023SJohn Marino     order = size_lookup[size];
2226e4b17023SJohn Marino   else
2227e4b17023SJohn Marino     {
2228e4b17023SJohn Marino       order = 10;
2229e4b17023SJohn Marino       while (size > OBJECT_SIZE (order))
2230e4b17023SJohn Marino 	order++;
2231e4b17023SJohn Marino     }
2232e4b17023SJohn Marino 
2233e4b17023SJohn Marino   d->d.totals[order]++;
2234e4b17023SJohn Marino }
2235e4b17023SJohn Marino 
2236e4b17023SJohn Marino size_t
ggc_pch_total_size(struct ggc_pch_data * d)2237e4b17023SJohn Marino ggc_pch_total_size (struct ggc_pch_data *d)
2238e4b17023SJohn Marino {
2239e4b17023SJohn Marino   size_t a = 0;
2240e4b17023SJohn Marino   unsigned i;
2241e4b17023SJohn Marino 
2242e4b17023SJohn Marino   for (i = 0; i < NUM_ORDERS; i++)
2243e4b17023SJohn Marino     a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2244e4b17023SJohn Marino   return a;
2245e4b17023SJohn Marino }
2246e4b17023SJohn Marino 
2247e4b17023SJohn Marino void
ggc_pch_this_base(struct ggc_pch_data * d,void * base)2248e4b17023SJohn Marino ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2249e4b17023SJohn Marino {
2250e4b17023SJohn Marino   size_t a = (size_t) base;
2251e4b17023SJohn Marino   unsigned i;
2252e4b17023SJohn Marino 
2253e4b17023SJohn Marino   for (i = 0; i < NUM_ORDERS; i++)
2254e4b17023SJohn Marino     {
2255e4b17023SJohn Marino       d->base[i] = a;
2256e4b17023SJohn Marino       a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2257e4b17023SJohn Marino     }
2258e4b17023SJohn Marino }
2259e4b17023SJohn Marino 
2260e4b17023SJohn Marino 
2261e4b17023SJohn Marino char *
ggc_pch_alloc_object(struct ggc_pch_data * d,void * x ATTRIBUTE_UNUSED,size_t size,bool is_string ATTRIBUTE_UNUSED,enum gt_types_enum type ATTRIBUTE_UNUSED)2262e4b17023SJohn Marino ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2263e4b17023SJohn Marino 		      size_t size, bool is_string ATTRIBUTE_UNUSED,
2264e4b17023SJohn Marino 		      enum gt_types_enum type ATTRIBUTE_UNUSED)
2265e4b17023SJohn Marino {
2266e4b17023SJohn Marino   unsigned order;
2267e4b17023SJohn Marino   char *result;
2268e4b17023SJohn Marino 
2269e4b17023SJohn Marino   if (size < NUM_SIZE_LOOKUP)
2270e4b17023SJohn Marino     order = size_lookup[size];
2271e4b17023SJohn Marino   else
2272e4b17023SJohn Marino     {
2273e4b17023SJohn Marino       order = 10;
2274e4b17023SJohn Marino       while (size > OBJECT_SIZE (order))
2275e4b17023SJohn Marino 	order++;
2276e4b17023SJohn Marino     }
2277e4b17023SJohn Marino 
2278e4b17023SJohn Marino   result = (char *) d->base[order];
2279e4b17023SJohn Marino   d->base[order] += OBJECT_SIZE (order);
2280e4b17023SJohn Marino   return result;
2281e4b17023SJohn Marino }
2282e4b17023SJohn Marino 
2283e4b17023SJohn Marino void
ggc_pch_prepare_write(struct ggc_pch_data * d ATTRIBUTE_UNUSED,FILE * f ATTRIBUTE_UNUSED)2284e4b17023SJohn Marino ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2285e4b17023SJohn Marino 		       FILE *f ATTRIBUTE_UNUSED)
2286e4b17023SJohn Marino {
2287e4b17023SJohn Marino   /* Nothing to do.  */
2288e4b17023SJohn Marino }
2289e4b17023SJohn Marino 
2290e4b17023SJohn Marino void
ggc_pch_write_object(struct ggc_pch_data * d ATTRIBUTE_UNUSED,FILE * f,void * x,void * newx ATTRIBUTE_UNUSED,size_t size,bool is_string ATTRIBUTE_UNUSED)2291e4b17023SJohn Marino ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2292e4b17023SJohn Marino 		      FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2293e4b17023SJohn Marino 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2294e4b17023SJohn Marino {
2295e4b17023SJohn Marino   unsigned order;
2296e4b17023SJohn Marino   static const char emptyBytes[256] = { 0 };
2297e4b17023SJohn Marino 
2298e4b17023SJohn Marino   if (size < NUM_SIZE_LOOKUP)
2299e4b17023SJohn Marino     order = size_lookup[size];
2300e4b17023SJohn Marino   else
2301e4b17023SJohn Marino     {
2302e4b17023SJohn Marino       order = 10;
2303e4b17023SJohn Marino       while (size > OBJECT_SIZE (order))
2304e4b17023SJohn Marino 	order++;
2305e4b17023SJohn Marino     }
2306e4b17023SJohn Marino 
2307e4b17023SJohn Marino   if (fwrite (x, size, 1, f) != 1)
2308e4b17023SJohn Marino     fatal_error ("can%'t write PCH file: %m");
2309e4b17023SJohn Marino 
2310e4b17023SJohn Marino   /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2311e4b17023SJohn Marino      object out to OBJECT_SIZE(order).  This happens for strings.  */
2312e4b17023SJohn Marino 
2313e4b17023SJohn Marino   if (size != OBJECT_SIZE (order))
2314e4b17023SJohn Marino     {
2315e4b17023SJohn Marino       unsigned padding = OBJECT_SIZE(order) - size;
2316e4b17023SJohn Marino 
2317e4b17023SJohn Marino       /* To speed small writes, we use a nulled-out array that's larger
2318e4b17023SJohn Marino          than most padding requests as the source for our null bytes.  This
2319e4b17023SJohn Marino          permits us to do the padding with fwrite() rather than fseek(), and
2320e4b17023SJohn Marino          limits the chance the OS may try to flush any outstanding writes.  */
2321e4b17023SJohn Marino       if (padding <= sizeof(emptyBytes))
2322e4b17023SJohn Marino         {
2323e4b17023SJohn Marino           if (fwrite (emptyBytes, 1, padding, f) != padding)
2324e4b17023SJohn Marino             fatal_error ("can%'t write PCH file");
2325e4b17023SJohn Marino         }
2326e4b17023SJohn Marino       else
2327e4b17023SJohn Marino         {
2328e4b17023SJohn Marino           /* Larger than our buffer?  Just default to fseek.  */
2329e4b17023SJohn Marino           if (fseek (f, padding, SEEK_CUR) != 0)
2330e4b17023SJohn Marino             fatal_error ("can%'t write PCH file");
2331e4b17023SJohn Marino         }
2332e4b17023SJohn Marino     }
2333e4b17023SJohn Marino 
2334e4b17023SJohn Marino   d->written[order]++;
2335e4b17023SJohn Marino   if (d->written[order] == d->d.totals[order]
2336e4b17023SJohn Marino       && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2337e4b17023SJohn Marino 				   G.pagesize),
2338e4b17023SJohn Marino 		SEEK_CUR) != 0)
2339e4b17023SJohn Marino     fatal_error ("can%'t write PCH file: %m");
2340e4b17023SJohn Marino }
2341e4b17023SJohn Marino 
2342e4b17023SJohn Marino void
ggc_pch_finish(struct ggc_pch_data * d,FILE * f)2343e4b17023SJohn Marino ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2344e4b17023SJohn Marino {
2345e4b17023SJohn Marino   if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2346e4b17023SJohn Marino     fatal_error ("can%'t write PCH file: %m");
2347e4b17023SJohn Marino   free (d);
2348e4b17023SJohn Marino }
2349e4b17023SJohn Marino 
2350e4b17023SJohn Marino /* Move the PCH PTE entries just added to the end of by_depth, to the
2351e4b17023SJohn Marino    front.  */
2352e4b17023SJohn Marino 
2353e4b17023SJohn Marino static void
move_ptes_to_front(int count_old_page_tables,int count_new_page_tables)2354e4b17023SJohn Marino move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2355e4b17023SJohn Marino {
2356e4b17023SJohn Marino   unsigned i;
2357e4b17023SJohn Marino 
2358e4b17023SJohn Marino   /* First, we swap the new entries to the front of the varrays.  */
2359e4b17023SJohn Marino   page_entry **new_by_depth;
2360e4b17023SJohn Marino   unsigned long **new_save_in_use;
2361e4b17023SJohn Marino 
2362e4b17023SJohn Marino   new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2363e4b17023SJohn Marino   new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2364e4b17023SJohn Marino 
2365e4b17023SJohn Marino   memcpy (&new_by_depth[0],
2366e4b17023SJohn Marino 	  &G.by_depth[count_old_page_tables],
2367e4b17023SJohn Marino 	  count_new_page_tables * sizeof (void *));
2368e4b17023SJohn Marino   memcpy (&new_by_depth[count_new_page_tables],
2369e4b17023SJohn Marino 	  &G.by_depth[0],
2370e4b17023SJohn Marino 	  count_old_page_tables * sizeof (void *));
2371e4b17023SJohn Marino   memcpy (&new_save_in_use[0],
2372e4b17023SJohn Marino 	  &G.save_in_use[count_old_page_tables],
2373e4b17023SJohn Marino 	  count_new_page_tables * sizeof (void *));
2374e4b17023SJohn Marino   memcpy (&new_save_in_use[count_new_page_tables],
2375e4b17023SJohn Marino 	  &G.save_in_use[0],
2376e4b17023SJohn Marino 	  count_old_page_tables * sizeof (void *));
2377e4b17023SJohn Marino 
2378e4b17023SJohn Marino   free (G.by_depth);
2379e4b17023SJohn Marino   free (G.save_in_use);
2380e4b17023SJohn Marino 
2381e4b17023SJohn Marino   G.by_depth = new_by_depth;
2382e4b17023SJohn Marino   G.save_in_use = new_save_in_use;
2383e4b17023SJohn Marino 
2384e4b17023SJohn Marino   /* Now update all the index_by_depth fields.  */
2385e4b17023SJohn Marino   for (i = G.by_depth_in_use; i > 0; --i)
2386e4b17023SJohn Marino     {
2387e4b17023SJohn Marino       page_entry *p = G.by_depth[i-1];
2388e4b17023SJohn Marino       p->index_by_depth = i-1;
2389e4b17023SJohn Marino     }
2390e4b17023SJohn Marino 
2391e4b17023SJohn Marino   /* And last, we update the depth pointers in G.depth.  The first
2392e4b17023SJohn Marino      entry is already 0, and context 0 entries always start at index
2393e4b17023SJohn Marino      0, so there is nothing to update in the first slot.  We need a
2394e4b17023SJohn Marino      second slot, only if we have old ptes, and if we do, they start
2395e4b17023SJohn Marino      at index count_new_page_tables.  */
2396e4b17023SJohn Marino   if (count_old_page_tables)
2397e4b17023SJohn Marino     push_depth (count_new_page_tables);
2398e4b17023SJohn Marino }
2399e4b17023SJohn Marino 
2400e4b17023SJohn Marino void
ggc_pch_read(FILE * f,void * addr)2401e4b17023SJohn Marino ggc_pch_read (FILE *f, void *addr)
2402e4b17023SJohn Marino {
2403e4b17023SJohn Marino   struct ggc_pch_ondisk d;
2404e4b17023SJohn Marino   unsigned i;
2405e4b17023SJohn Marino   char *offs = (char *) addr;
2406e4b17023SJohn Marino   unsigned long count_old_page_tables;
2407e4b17023SJohn Marino   unsigned long count_new_page_tables;
2408e4b17023SJohn Marino 
2409e4b17023SJohn Marino   count_old_page_tables = G.by_depth_in_use;
2410e4b17023SJohn Marino 
2411e4b17023SJohn Marino   /* We've just read in a PCH file.  So, every object that used to be
2412e4b17023SJohn Marino      allocated is now free.  */
2413e4b17023SJohn Marino   clear_marks ();
2414e4b17023SJohn Marino #ifdef ENABLE_GC_CHECKING
2415e4b17023SJohn Marino   poison_pages ();
2416e4b17023SJohn Marino #endif
2417e4b17023SJohn Marino   /* Since we free all the allocated objects, the free list becomes
2418e4b17023SJohn Marino      useless.  Validate it now, which will also clear it.  */
2419e4b17023SJohn Marino   validate_free_objects();
2420e4b17023SJohn Marino 
2421e4b17023SJohn Marino   /* No object read from a PCH file should ever be freed.  So, set the
2422e4b17023SJohn Marino      context depth to 1, and set the depth of all the currently-allocated
2423e4b17023SJohn Marino      pages to be 1 too.  PCH pages will have depth 0.  */
2424e4b17023SJohn Marino   gcc_assert (!G.context_depth);
2425e4b17023SJohn Marino   G.context_depth = 1;
2426e4b17023SJohn Marino   for (i = 0; i < NUM_ORDERS; i++)
2427e4b17023SJohn Marino     {
2428e4b17023SJohn Marino       page_entry *p;
2429e4b17023SJohn Marino       for (p = G.pages[i]; p != NULL; p = p->next)
2430e4b17023SJohn Marino 	p->context_depth = G.context_depth;
2431e4b17023SJohn Marino     }
2432e4b17023SJohn Marino 
2433e4b17023SJohn Marino   /* Allocate the appropriate page-table entries for the pages read from
2434e4b17023SJohn Marino      the PCH file.  */
2435e4b17023SJohn Marino   if (fread (&d, sizeof (d), 1, f) != 1)
2436e4b17023SJohn Marino     fatal_error ("can%'t read PCH file: %m");
2437e4b17023SJohn Marino 
2438e4b17023SJohn Marino   for (i = 0; i < NUM_ORDERS; i++)
2439e4b17023SJohn Marino     {
2440e4b17023SJohn Marino       struct page_entry *entry;
2441e4b17023SJohn Marino       char *pte;
2442e4b17023SJohn Marino       size_t bytes;
2443e4b17023SJohn Marino       size_t num_objs;
2444e4b17023SJohn Marino       size_t j;
2445e4b17023SJohn Marino 
2446e4b17023SJohn Marino       if (d.totals[i] == 0)
2447e4b17023SJohn Marino 	continue;
2448e4b17023SJohn Marino 
2449e4b17023SJohn Marino       bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i));
2450e4b17023SJohn Marino       num_objs = bytes / OBJECT_SIZE (i);
2451e4b17023SJohn Marino       entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2452e4b17023SJohn Marino 					    - sizeof (long)
2453e4b17023SJohn Marino 					    + BITMAP_SIZE (num_objs + 1)));
2454e4b17023SJohn Marino       entry->bytes = bytes;
2455e4b17023SJohn Marino       entry->page = offs;
2456e4b17023SJohn Marino       entry->context_depth = 0;
2457e4b17023SJohn Marino       offs += bytes;
2458e4b17023SJohn Marino       entry->num_free_objects = 0;
2459e4b17023SJohn Marino       entry->order = i;
2460e4b17023SJohn Marino 
2461e4b17023SJohn Marino       for (j = 0;
2462e4b17023SJohn Marino 	   j + HOST_BITS_PER_LONG <= num_objs + 1;
2463e4b17023SJohn Marino 	   j += HOST_BITS_PER_LONG)
2464e4b17023SJohn Marino 	entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2465e4b17023SJohn Marino       for (; j < num_objs + 1; j++)
2466e4b17023SJohn Marino 	entry->in_use_p[j / HOST_BITS_PER_LONG]
2467e4b17023SJohn Marino 	  |= 1L << (j % HOST_BITS_PER_LONG);
2468e4b17023SJohn Marino 
2469e4b17023SJohn Marino       for (pte = entry->page;
2470e4b17023SJohn Marino 	   pte < entry->page + entry->bytes;
2471e4b17023SJohn Marino 	   pte += G.pagesize)
2472e4b17023SJohn Marino 	set_page_table_entry (pte, entry);
2473e4b17023SJohn Marino 
2474e4b17023SJohn Marino       if (G.page_tails[i] != NULL)
2475e4b17023SJohn Marino 	G.page_tails[i]->next = entry;
2476e4b17023SJohn Marino       else
2477e4b17023SJohn Marino 	G.pages[i] = entry;
2478e4b17023SJohn Marino       G.page_tails[i] = entry;
2479e4b17023SJohn Marino 
2480e4b17023SJohn Marino       /* We start off by just adding all the new information to the
2481e4b17023SJohn Marino 	 end of the varrays, later, we will move the new information
2482e4b17023SJohn Marino 	 to the front of the varrays, as the PCH page tables are at
2483e4b17023SJohn Marino 	 context 0.  */
2484e4b17023SJohn Marino       push_by_depth (entry, 0);
2485e4b17023SJohn Marino     }
2486e4b17023SJohn Marino 
2487e4b17023SJohn Marino   /* Now, we update the various data structures that speed page table
2488e4b17023SJohn Marino      handling.  */
2489e4b17023SJohn Marino   count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2490e4b17023SJohn Marino 
2491e4b17023SJohn Marino   move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2492e4b17023SJohn Marino 
2493e4b17023SJohn Marino   /* Update the statistics.  */
2494e4b17023SJohn Marino   G.allocated = G.allocated_last_gc = offs - (char *)addr;
2495e4b17023SJohn Marino }
2496e4b17023SJohn Marino 
2497e4b17023SJohn Marino struct alloc_zone
2498e4b17023SJohn Marino {
2499e4b17023SJohn Marino   int dummy;
2500e4b17023SJohn Marino };
2501e4b17023SJohn Marino 
2502e4b17023SJohn Marino struct alloc_zone rtl_zone;
2503e4b17023SJohn Marino struct alloc_zone tree_zone;
2504e4b17023SJohn Marino struct alloc_zone tree_id_zone;
2505