xref: /dflybsd-src/contrib/gcc-8.0/gcc/ggc-page.c (revision 38fd149817dfbff97799f62fcb70be98c4e32523)
1*38fd1498Szrj /* "Bag-of-pages" garbage collector for the GNU compiler.
2*38fd1498Szrj    Copyright (C) 1999-2018 Free Software Foundation, Inc.
3*38fd1498Szrj 
4*38fd1498Szrj This file is part of GCC.
5*38fd1498Szrj 
6*38fd1498Szrj GCC is free software; you can redistribute it and/or modify it under
7*38fd1498Szrj the terms of the GNU General Public License as published by the Free
8*38fd1498Szrj Software Foundation; either version 3, or (at your option) any later
9*38fd1498Szrj version.
10*38fd1498Szrj 
11*38fd1498Szrj GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12*38fd1498Szrj WARRANTY; without even the implied warranty of MERCHANTABILITY or
13*38fd1498Szrj FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14*38fd1498Szrj for more details.
15*38fd1498Szrj 
16*38fd1498Szrj You should have received a copy of the GNU General Public License
17*38fd1498Szrj along with GCC; see the file COPYING3.  If not see
18*38fd1498Szrj <http://www.gnu.org/licenses/>.  */
19*38fd1498Szrj 
20*38fd1498Szrj #include "config.h"
21*38fd1498Szrj #include "system.h"
22*38fd1498Szrj #include "coretypes.h"
23*38fd1498Szrj #include "backend.h"
24*38fd1498Szrj #include "alias.h"
25*38fd1498Szrj #include "tree.h"
26*38fd1498Szrj #include "rtl.h"
27*38fd1498Szrj #include "memmodel.h"
28*38fd1498Szrj #include "tm_p.h"
29*38fd1498Szrj #include "diagnostic-core.h"
30*38fd1498Szrj #include "flags.h"
31*38fd1498Szrj #include "ggc-internal.h"
32*38fd1498Szrj #include "timevar.h"
33*38fd1498Szrj #include "params.h"
34*38fd1498Szrj #include "cgraph.h"
35*38fd1498Szrj #include "cfgloop.h"
36*38fd1498Szrj #include "plugin.h"
37*38fd1498Szrj 
38*38fd1498Szrj /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
39*38fd1498Szrj    file open.  Prefer either to valloc.  */
40*38fd1498Szrj #ifdef HAVE_MMAP_ANON
41*38fd1498Szrj # undef HAVE_MMAP_DEV_ZERO
42*38fd1498Szrj # define USING_MMAP
43*38fd1498Szrj #endif
44*38fd1498Szrj 
45*38fd1498Szrj #ifdef HAVE_MMAP_DEV_ZERO
46*38fd1498Szrj # define USING_MMAP
47*38fd1498Szrj #endif
48*38fd1498Szrj 
49*38fd1498Szrj #ifndef USING_MMAP
50*38fd1498Szrj #define USING_MALLOC_PAGE_GROUPS
51*38fd1498Szrj #endif
52*38fd1498Szrj 
53*38fd1498Szrj #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
54*38fd1498Szrj     && defined(USING_MMAP)
55*38fd1498Szrj # define USING_MADVISE
56*38fd1498Szrj #endif
57*38fd1498Szrj 
58*38fd1498Szrj /* Strategy:
59*38fd1498Szrj 
60*38fd1498Szrj    This garbage-collecting allocator allocates objects on one of a set
61*38fd1498Szrj    of pages.  Each page can allocate objects of a single size only;
62*38fd1498Szrj    available sizes are powers of two starting at four bytes.  The size
63*38fd1498Szrj    of an allocation request is rounded up to the next power of two
64*38fd1498Szrj    (`order'), and satisfied from the appropriate page.
65*38fd1498Szrj 
66*38fd1498Szrj    Each page is recorded in a page-entry, which also maintains an
67*38fd1498Szrj    in-use bitmap of object positions on the page.  This allows the
68*38fd1498Szrj    allocation state of a particular object to be flipped without
69*38fd1498Szrj    touching the page itself.
70*38fd1498Szrj 
71*38fd1498Szrj    Each page-entry also has a context depth, which is used to track
72*38fd1498Szrj    pushing and popping of allocation contexts.  Only objects allocated
73*38fd1498Szrj    in the current (highest-numbered) context may be collected.
74*38fd1498Szrj 
75*38fd1498Szrj    Page entries are arranged in an array of singly-linked lists.  The
76*38fd1498Szrj    array is indexed by the allocation size, in bits, of the pages on
77*38fd1498Szrj    it; i.e. all pages on a list allocate objects of the same size.
78*38fd1498Szrj    Pages are ordered on the list such that all non-full pages precede
79*38fd1498Szrj    all full pages, with non-full pages arranged in order of decreasing
80*38fd1498Szrj    context depth.
81*38fd1498Szrj 
82*38fd1498Szrj    Empty pages (of all orders) are kept on a single page cache list,
83*38fd1498Szrj    and are considered first when new pages are required; they are
84*38fd1498Szrj    deallocated at the start of the next collection if they haven't
85*38fd1498Szrj    been recycled by then.  */
86*38fd1498Szrj 
87*38fd1498Szrj /* Define GGC_DEBUG_LEVEL to print debugging information.
88*38fd1498Szrj      0: No debugging output.
89*38fd1498Szrj      1: GC statistics only.
90*38fd1498Szrj      2: Page-entry allocations/deallocations as well.
91*38fd1498Szrj      3: Object allocations as well.
92*38fd1498Szrj      4: Object marks as well.  */
93*38fd1498Szrj #define GGC_DEBUG_LEVEL (0)
94*38fd1498Szrj 
95*38fd1498Szrj /* A two-level tree is used to look up the page-entry for a given
96*38fd1498Szrj    pointer.  Two chunks of the pointer's bits are extracted to index
97*38fd1498Szrj    the first and second levels of the tree, as follows:
98*38fd1498Szrj 
99*38fd1498Szrj 				   HOST_PAGE_SIZE_BITS
100*38fd1498Szrj 			   32		|      |
101*38fd1498Szrj        msb +----------------+----+------+------+ lsb
102*38fd1498Szrj 			    |    |      |
103*38fd1498Szrj 			 PAGE_L1_BITS   |
104*38fd1498Szrj 				 |      |
105*38fd1498Szrj 			       PAGE_L2_BITS
106*38fd1498Szrj 
107*38fd1498Szrj    The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
108*38fd1498Szrj    pages are aligned on system page boundaries.  The next most
109*38fd1498Szrj    significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
110*38fd1498Szrj    index values in the lookup table, respectively.
111*38fd1498Szrj 
112*38fd1498Szrj    For 32-bit architectures and the settings below, there are no
113*38fd1498Szrj    leftover bits.  For architectures with wider pointers, the lookup
114*38fd1498Szrj    tree points to a list of pages, which must be scanned to find the
115*38fd1498Szrj    correct one.  */
116*38fd1498Szrj 
117*38fd1498Szrj #define PAGE_L1_BITS	(8)
118*38fd1498Szrj #define PAGE_L2_BITS	(32 - PAGE_L1_BITS - G.lg_pagesize)
119*38fd1498Szrj #define PAGE_L1_SIZE	((uintptr_t) 1 << PAGE_L1_BITS)
120*38fd1498Szrj #define PAGE_L2_SIZE	((uintptr_t) 1 << PAGE_L2_BITS)
121*38fd1498Szrj 
122*38fd1498Szrj #define LOOKUP_L1(p) \
123*38fd1498Szrj   (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
124*38fd1498Szrj 
125*38fd1498Szrj #define LOOKUP_L2(p) \
126*38fd1498Szrj   (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
127*38fd1498Szrj 
128*38fd1498Szrj /* The number of objects per allocation page, for objects on a page of
129*38fd1498Szrj    the indicated ORDER.  */
130*38fd1498Szrj #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
131*38fd1498Szrj 
132*38fd1498Szrj /* The number of objects in P.  */
133*38fd1498Szrj #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
134*38fd1498Szrj 
135*38fd1498Szrj /* The size of an object on a page of the indicated ORDER.  */
136*38fd1498Szrj #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
137*38fd1498Szrj 
138*38fd1498Szrj /* For speed, we avoid doing a general integer divide to locate the
139*38fd1498Szrj    offset in the allocation bitmap, by precalculating numbers M, S
140*38fd1498Szrj    such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
141*38fd1498Szrj    within the page which is evenly divisible by the object size Z.  */
142*38fd1498Szrj #define DIV_MULT(ORDER) inverse_table[ORDER].mult
143*38fd1498Szrj #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
144*38fd1498Szrj #define OFFSET_TO_BIT(OFFSET, ORDER) \
145*38fd1498Szrj   (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
146*38fd1498Szrj 
147*38fd1498Szrj /* We use this structure to determine the alignment required for
148*38fd1498Szrj    allocations.  For power-of-two sized allocations, that's not a
149*38fd1498Szrj    problem, but it does matter for odd-sized allocations.
150*38fd1498Szrj    We do not care about alignment for floating-point types.  */
151*38fd1498Szrj 
152*38fd1498Szrj struct max_alignment {
153*38fd1498Szrj   char c;
154*38fd1498Szrj   union {
155*38fd1498Szrj     int64_t i;
156*38fd1498Szrj     void *p;
157*38fd1498Szrj   } u;
158*38fd1498Szrj };
159*38fd1498Szrj 
160*38fd1498Szrj /* The biggest alignment required.  */
161*38fd1498Szrj 
162*38fd1498Szrj #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
163*38fd1498Szrj 
164*38fd1498Szrj 
165*38fd1498Szrj /* The number of extra orders, not corresponding to power-of-two sized
166*38fd1498Szrj    objects.  */
167*38fd1498Szrj 
168*38fd1498Szrj #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
169*38fd1498Szrj 
170*38fd1498Szrj #define RTL_SIZE(NSLOTS) \
171*38fd1498Szrj   (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
172*38fd1498Szrj 
173*38fd1498Szrj #define TREE_EXP_SIZE(OPS) \
174*38fd1498Szrj   (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
175*38fd1498Szrj 
176*38fd1498Szrj /* The Ith entry is the maximum size of an object to be stored in the
177*38fd1498Szrj    Ith extra order.  Adding a new entry to this array is the *only*
178*38fd1498Szrj    thing you need to do to add a new special allocation size.  */
179*38fd1498Szrj 
180*38fd1498Szrj static const size_t extra_order_size_table[] = {
181*38fd1498Szrj   /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
182*38fd1498Szrj      There are a lot of structures with these sizes and explicitly
183*38fd1498Szrj      listing them risks orders being dropped because they changed size.  */
184*38fd1498Szrj   MAX_ALIGNMENT * 3,
185*38fd1498Szrj   MAX_ALIGNMENT * 5,
186*38fd1498Szrj   MAX_ALIGNMENT * 6,
187*38fd1498Szrj   MAX_ALIGNMENT * 7,
188*38fd1498Szrj   MAX_ALIGNMENT * 9,
189*38fd1498Szrj   MAX_ALIGNMENT * 10,
190*38fd1498Szrj   MAX_ALIGNMENT * 11,
191*38fd1498Szrj   MAX_ALIGNMENT * 12,
192*38fd1498Szrj   MAX_ALIGNMENT * 13,
193*38fd1498Szrj   MAX_ALIGNMENT * 14,
194*38fd1498Szrj   MAX_ALIGNMENT * 15,
195*38fd1498Szrj   sizeof (struct tree_decl_non_common),
196*38fd1498Szrj   sizeof (struct tree_field_decl),
197*38fd1498Szrj   sizeof (struct tree_parm_decl),
198*38fd1498Szrj   sizeof (struct tree_var_decl),
199*38fd1498Szrj   sizeof (struct tree_type_non_common),
200*38fd1498Szrj   sizeof (struct function),
201*38fd1498Szrj   sizeof (struct basic_block_def),
202*38fd1498Szrj   sizeof (struct cgraph_node),
203*38fd1498Szrj   sizeof (struct loop),
204*38fd1498Szrj };
205*38fd1498Szrj 
206*38fd1498Szrj /* The total number of orders.  */
207*38fd1498Szrj 
208*38fd1498Szrj #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
209*38fd1498Szrj 
210*38fd1498Szrj /* Compute the smallest nonnegative number which when added to X gives
211*38fd1498Szrj    a multiple of F.  */
212*38fd1498Szrj 
213*38fd1498Szrj #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
214*38fd1498Szrj 
215*38fd1498Szrj /* Round X to next multiple of the page size */
216*38fd1498Szrj 
217*38fd1498Szrj #define PAGE_ALIGN(x) ROUND_UP ((x), G.pagesize)
218*38fd1498Szrj 
219*38fd1498Szrj /* The Ith entry is the number of objects on a page or order I.  */
220*38fd1498Szrj 
221*38fd1498Szrj static unsigned objects_per_page_table[NUM_ORDERS];
222*38fd1498Szrj 
223*38fd1498Szrj /* The Ith entry is the size of an object on a page of order I.  */
224*38fd1498Szrj 
225*38fd1498Szrj static size_t object_size_table[NUM_ORDERS];
226*38fd1498Szrj 
227*38fd1498Szrj /* The Ith entry is a pair of numbers (mult, shift) such that
228*38fd1498Szrj    ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
229*38fd1498Szrj    for all k evenly divisible by OBJECT_SIZE(I).  */
230*38fd1498Szrj 
231*38fd1498Szrj static struct
232*38fd1498Szrj {
233*38fd1498Szrj   size_t mult;
234*38fd1498Szrj   unsigned int shift;
235*38fd1498Szrj }
236*38fd1498Szrj inverse_table[NUM_ORDERS];
237*38fd1498Szrj 
238*38fd1498Szrj /* A page_entry records the status of an allocation page.  This
239*38fd1498Szrj    structure is dynamically sized to fit the bitmap in_use_p.  */
240*38fd1498Szrj struct page_entry
241*38fd1498Szrj {
242*38fd1498Szrj   /* The next page-entry with objects of the same size, or NULL if
243*38fd1498Szrj      this is the last page-entry.  */
244*38fd1498Szrj   struct page_entry *next;
245*38fd1498Szrj 
246*38fd1498Szrj   /* The previous page-entry with objects of the same size, or NULL if
247*38fd1498Szrj      this is the first page-entry.   The PREV pointer exists solely to
248*38fd1498Szrj      keep the cost of ggc_free manageable.  */
249*38fd1498Szrj   struct page_entry *prev;
250*38fd1498Szrj 
251*38fd1498Szrj   /* The number of bytes allocated.  (This will always be a multiple
252*38fd1498Szrj      of the host system page size.)  */
253*38fd1498Szrj   size_t bytes;
254*38fd1498Szrj 
255*38fd1498Szrj   /* The address at which the memory is allocated.  */
256*38fd1498Szrj   char *page;
257*38fd1498Szrj 
258*38fd1498Szrj #ifdef USING_MALLOC_PAGE_GROUPS
259*38fd1498Szrj   /* Back pointer to the page group this page came from.  */
260*38fd1498Szrj   struct page_group *group;
261*38fd1498Szrj #endif
262*38fd1498Szrj 
263*38fd1498Szrj   /* This is the index in the by_depth varray where this page table
264*38fd1498Szrj      can be found.  */
265*38fd1498Szrj   unsigned long index_by_depth;
266*38fd1498Szrj 
267*38fd1498Szrj   /* Context depth of this page.  */
268*38fd1498Szrj   unsigned short context_depth;
269*38fd1498Szrj 
270*38fd1498Szrj   /* The number of free objects remaining on this page.  */
271*38fd1498Szrj   unsigned short num_free_objects;
272*38fd1498Szrj 
273*38fd1498Szrj   /* A likely candidate for the bit position of a free object for the
274*38fd1498Szrj      next allocation from this page.  */
275*38fd1498Szrj   unsigned short next_bit_hint;
276*38fd1498Szrj 
277*38fd1498Szrj   /* The lg of size of objects allocated from this page.  */
278*38fd1498Szrj   unsigned char order;
279*38fd1498Szrj 
280*38fd1498Szrj   /* Discarded page? */
281*38fd1498Szrj   bool discarded;
282*38fd1498Szrj 
283*38fd1498Szrj   /* A bit vector indicating whether or not objects are in use.  The
284*38fd1498Szrj      Nth bit is one if the Nth object on this page is allocated.  This
285*38fd1498Szrj      array is dynamically sized.  */
286*38fd1498Szrj   unsigned long in_use_p[1];
287*38fd1498Szrj };
288*38fd1498Szrj 
289*38fd1498Szrj #ifdef USING_MALLOC_PAGE_GROUPS
290*38fd1498Szrj /* A page_group describes a large allocation from malloc, from which
291*38fd1498Szrj    we parcel out aligned pages.  */
292*38fd1498Szrj struct page_group
293*38fd1498Szrj {
294*38fd1498Szrj   /* A linked list of all extant page groups.  */
295*38fd1498Szrj   struct page_group *next;
296*38fd1498Szrj 
297*38fd1498Szrj   /* The address we received from malloc.  */
298*38fd1498Szrj   char *allocation;
299*38fd1498Szrj 
300*38fd1498Szrj   /* The size of the block.  */
301*38fd1498Szrj   size_t alloc_size;
302*38fd1498Szrj 
303*38fd1498Szrj   /* A bitmask of pages in use.  */
304*38fd1498Szrj   unsigned int in_use;
305*38fd1498Szrj };
306*38fd1498Szrj #endif
307*38fd1498Szrj 
308*38fd1498Szrj #if HOST_BITS_PER_PTR <= 32
309*38fd1498Szrj 
310*38fd1498Szrj /* On 32-bit hosts, we use a two level page table, as pictured above.  */
311*38fd1498Szrj typedef page_entry **page_table[PAGE_L1_SIZE];
312*38fd1498Szrj 
313*38fd1498Szrj #else
314*38fd1498Szrj 
315*38fd1498Szrj /* On 64-bit hosts, we use the same two level page tables plus a linked
316*38fd1498Szrj    list that disambiguates the top 32-bits.  There will almost always be
317*38fd1498Szrj    exactly one entry in the list.  */
318*38fd1498Szrj typedef struct page_table_chain
319*38fd1498Szrj {
320*38fd1498Szrj   struct page_table_chain *next;
321*38fd1498Szrj   size_t high_bits;
322*38fd1498Szrj   page_entry **table[PAGE_L1_SIZE];
323*38fd1498Szrj } *page_table;
324*38fd1498Szrj 
325*38fd1498Szrj #endif
326*38fd1498Szrj 
327*38fd1498Szrj class finalizer
328*38fd1498Szrj {
329*38fd1498Szrj public:
finalizer(void * addr,void (* f)(void *))330*38fd1498Szrj   finalizer (void *addr, void (*f)(void *)) : m_addr (addr), m_function (f) {}
331*38fd1498Szrj 
addr()332*38fd1498Szrj   void *addr () const { return m_addr; }
333*38fd1498Szrj 
call()334*38fd1498Szrj   void call () const { m_function (m_addr); }
335*38fd1498Szrj 
336*38fd1498Szrj private:
337*38fd1498Szrj   void *m_addr;
338*38fd1498Szrj   void (*m_function)(void *);
339*38fd1498Szrj };
340*38fd1498Szrj 
341*38fd1498Szrj class vec_finalizer
342*38fd1498Szrj {
343*38fd1498Szrj public:
vec_finalizer(uintptr_t addr,void (* f)(void *),size_t s,size_t n)344*38fd1498Szrj   vec_finalizer (uintptr_t addr, void (*f)(void *), size_t s, size_t n) :
345*38fd1498Szrj     m_addr (addr), m_function (f), m_object_size (s), m_n_objects (n) {}
346*38fd1498Szrj 
call()347*38fd1498Szrj   void call () const
348*38fd1498Szrj     {
349*38fd1498Szrj       for (size_t i = 0; i < m_n_objects; i++)
350*38fd1498Szrj 	m_function (reinterpret_cast<void *> (m_addr + (i * m_object_size)));
351*38fd1498Szrj     }
352*38fd1498Szrj 
addr()353*38fd1498Szrj   void *addr () const { return reinterpret_cast<void *> (m_addr); }
354*38fd1498Szrj 
355*38fd1498Szrj private:
356*38fd1498Szrj   uintptr_t m_addr;
357*38fd1498Szrj   void (*m_function)(void *);
358*38fd1498Szrj   size_t m_object_size;
359*38fd1498Szrj   size_t m_n_objects;
360*38fd1498Szrj };
361*38fd1498Szrj 
362*38fd1498Szrj #ifdef ENABLE_GC_ALWAYS_COLLECT
363*38fd1498Szrj /* List of free objects to be verified as actually free on the
364*38fd1498Szrj    next collection.  */
365*38fd1498Szrj struct free_object
366*38fd1498Szrj {
367*38fd1498Szrj   void *object;
368*38fd1498Szrj   struct free_object *next;
369*38fd1498Szrj };
370*38fd1498Szrj #endif
371*38fd1498Szrj 
372*38fd1498Szrj /* The rest of the global variables.  */
373*38fd1498Szrj static struct ggc_globals
374*38fd1498Szrj {
375*38fd1498Szrj   /* The Nth element in this array is a page with objects of size 2^N.
376*38fd1498Szrj      If there are any pages with free objects, they will be at the
377*38fd1498Szrj      head of the list.  NULL if there are no page-entries for this
378*38fd1498Szrj      object size.  */
379*38fd1498Szrj   page_entry *pages[NUM_ORDERS];
380*38fd1498Szrj 
381*38fd1498Szrj   /* The Nth element in this array is the last page with objects of
382*38fd1498Szrj      size 2^N.  NULL if there are no page-entries for this object
383*38fd1498Szrj      size.  */
384*38fd1498Szrj   page_entry *page_tails[NUM_ORDERS];
385*38fd1498Szrj 
386*38fd1498Szrj   /* Lookup table for associating allocation pages with object addresses.  */
387*38fd1498Szrj   page_table lookup;
388*38fd1498Szrj 
389*38fd1498Szrj   /* The system's page size.  */
390*38fd1498Szrj   size_t pagesize;
391*38fd1498Szrj   size_t lg_pagesize;
392*38fd1498Szrj 
393*38fd1498Szrj   /* Bytes currently allocated.  */
394*38fd1498Szrj   size_t allocated;
395*38fd1498Szrj 
396*38fd1498Szrj   /* Bytes currently allocated at the end of the last collection.  */
397*38fd1498Szrj   size_t allocated_last_gc;
398*38fd1498Szrj 
399*38fd1498Szrj   /* Total amount of memory mapped.  */
400*38fd1498Szrj   size_t bytes_mapped;
401*38fd1498Szrj 
402*38fd1498Szrj   /* Bit N set if any allocations have been done at context depth N.  */
403*38fd1498Szrj   unsigned long context_depth_allocations;
404*38fd1498Szrj 
405*38fd1498Szrj   /* Bit N set if any collections have been done at context depth N.  */
406*38fd1498Szrj   unsigned long context_depth_collections;
407*38fd1498Szrj 
408*38fd1498Szrj   /* The current depth in the context stack.  */
409*38fd1498Szrj   unsigned short context_depth;
410*38fd1498Szrj 
411*38fd1498Szrj   /* A file descriptor open to /dev/zero for reading.  */
412*38fd1498Szrj #if defined (HAVE_MMAP_DEV_ZERO)
413*38fd1498Szrj   int dev_zero_fd;
414*38fd1498Szrj #endif
415*38fd1498Szrj 
416*38fd1498Szrj   /* A cache of free system pages.  */
417*38fd1498Szrj   page_entry *free_pages;
418*38fd1498Szrj 
419*38fd1498Szrj #ifdef USING_MALLOC_PAGE_GROUPS
420*38fd1498Szrj   page_group *page_groups;
421*38fd1498Szrj #endif
422*38fd1498Szrj 
423*38fd1498Szrj   /* The file descriptor for debugging output.  */
424*38fd1498Szrj   FILE *debug_file;
425*38fd1498Szrj 
426*38fd1498Szrj   /* Current number of elements in use in depth below.  */
427*38fd1498Szrj   unsigned int depth_in_use;
428*38fd1498Szrj 
429*38fd1498Szrj   /* Maximum number of elements that can be used before resizing.  */
430*38fd1498Szrj   unsigned int depth_max;
431*38fd1498Szrj 
432*38fd1498Szrj   /* Each element of this array is an index in by_depth where the given
433*38fd1498Szrj      depth starts.  This structure is indexed by that given depth we
434*38fd1498Szrj      are interested in.  */
435*38fd1498Szrj   unsigned int *depth;
436*38fd1498Szrj 
437*38fd1498Szrj   /* Current number of elements in use in by_depth below.  */
438*38fd1498Szrj   unsigned int by_depth_in_use;
439*38fd1498Szrj 
440*38fd1498Szrj   /* Maximum number of elements that can be used before resizing.  */
441*38fd1498Szrj   unsigned int by_depth_max;
442*38fd1498Szrj 
443*38fd1498Szrj   /* Each element of this array is a pointer to a page_entry, all
444*38fd1498Szrj      page_entries can be found in here by increasing depth.
445*38fd1498Szrj      index_by_depth in the page_entry is the index into this data
446*38fd1498Szrj      structure where that page_entry can be found.  This is used to
447*38fd1498Szrj      speed up finding all page_entries at a particular depth.  */
448*38fd1498Szrj   page_entry **by_depth;
449*38fd1498Szrj 
450*38fd1498Szrj   /* Each element is a pointer to the saved in_use_p bits, if any,
451*38fd1498Szrj      zero otherwise.  We allocate them all together, to enable a
452*38fd1498Szrj      better runtime data access pattern.  */
453*38fd1498Szrj   unsigned long **save_in_use;
454*38fd1498Szrj 
455*38fd1498Szrj   /* Finalizers for single objects.  The first index is collection_depth.  */
456*38fd1498Szrj   vec<vec<finalizer> > finalizers;
457*38fd1498Szrj 
458*38fd1498Szrj   /* Finalizers for vectors of objects.  */
459*38fd1498Szrj   vec<vec<vec_finalizer> > vec_finalizers;
460*38fd1498Szrj 
461*38fd1498Szrj #ifdef ENABLE_GC_ALWAYS_COLLECT
462*38fd1498Szrj   /* List of free objects to be verified as actually free on the
463*38fd1498Szrj      next collection.  */
464*38fd1498Szrj   struct free_object *free_object_list;
465*38fd1498Szrj #endif
466*38fd1498Szrj 
467*38fd1498Szrj   struct
468*38fd1498Szrj   {
469*38fd1498Szrj     /* Total GC-allocated memory.  */
470*38fd1498Szrj     unsigned long long total_allocated;
471*38fd1498Szrj     /* Total overhead for GC-allocated memory.  */
472*38fd1498Szrj     unsigned long long total_overhead;
473*38fd1498Szrj 
474*38fd1498Szrj     /* Total allocations and overhead for sizes less than 32, 64 and 128.
475*38fd1498Szrj        These sizes are interesting because they are typical cache line
476*38fd1498Szrj        sizes.  */
477*38fd1498Szrj 
478*38fd1498Szrj     unsigned long long total_allocated_under32;
479*38fd1498Szrj     unsigned long long total_overhead_under32;
480*38fd1498Szrj 
481*38fd1498Szrj     unsigned long long total_allocated_under64;
482*38fd1498Szrj     unsigned long long total_overhead_under64;
483*38fd1498Szrj 
484*38fd1498Szrj     unsigned long long total_allocated_under128;
485*38fd1498Szrj     unsigned long long total_overhead_under128;
486*38fd1498Szrj 
487*38fd1498Szrj     /* The allocations for each of the allocation orders.  */
488*38fd1498Szrj     unsigned long long total_allocated_per_order[NUM_ORDERS];
489*38fd1498Szrj 
490*38fd1498Szrj     /* The overhead for each of the allocation orders.  */
491*38fd1498Szrj     unsigned long long total_overhead_per_order[NUM_ORDERS];
492*38fd1498Szrj   } stats;
493*38fd1498Szrj } G;
494*38fd1498Szrj 
495*38fd1498Szrj /* True if a gc is currently taking place.  */
496*38fd1498Szrj 
497*38fd1498Szrj static bool in_gc = false;
498*38fd1498Szrj 
499*38fd1498Szrj /* The size in bytes required to maintain a bitmap for the objects
500*38fd1498Szrj    on a page-entry.  */
501*38fd1498Szrj #define BITMAP_SIZE(Num_objects) \
502*38fd1498Szrj   (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long))
503*38fd1498Szrj 
504*38fd1498Szrj /* Allocate pages in chunks of this size, to throttle calls to memory
505*38fd1498Szrj    allocation routines.  The first page is used, the rest go onto the
506*38fd1498Szrj    free list.  This cannot be larger than HOST_BITS_PER_INT for the
507*38fd1498Szrj    in_use bitmask for page_group.  Hosts that need a different value
508*38fd1498Szrj    can override this by defining GGC_QUIRE_SIZE explicitly.  */
509*38fd1498Szrj #ifndef GGC_QUIRE_SIZE
510*38fd1498Szrj # ifdef USING_MMAP
511*38fd1498Szrj #  define GGC_QUIRE_SIZE 512	/* 2MB for 4K pages */
512*38fd1498Szrj # else
513*38fd1498Szrj #  define GGC_QUIRE_SIZE 16
514*38fd1498Szrj # endif
515*38fd1498Szrj #endif
516*38fd1498Szrj 
517*38fd1498Szrj /* Initial guess as to how many page table entries we might need.  */
518*38fd1498Szrj #define INITIAL_PTE_COUNT 128
519*38fd1498Szrj 
520*38fd1498Szrj static page_entry *lookup_page_table_entry (const void *);
521*38fd1498Szrj static void set_page_table_entry (void *, page_entry *);
522*38fd1498Szrj #ifdef USING_MMAP
523*38fd1498Szrj static char *alloc_anon (char *, size_t, bool check);
524*38fd1498Szrj #endif
525*38fd1498Szrj #ifdef USING_MALLOC_PAGE_GROUPS
526*38fd1498Szrj static size_t page_group_index (char *, char *);
527*38fd1498Szrj static void set_page_group_in_use (page_group *, char *);
528*38fd1498Szrj static void clear_page_group_in_use (page_group *, char *);
529*38fd1498Szrj #endif
530*38fd1498Szrj static struct page_entry * alloc_page (unsigned);
531*38fd1498Szrj static void free_page (struct page_entry *);
532*38fd1498Szrj static void release_pages (void);
533*38fd1498Szrj static void clear_marks (void);
534*38fd1498Szrj static void sweep_pages (void);
535*38fd1498Szrj static void ggc_recalculate_in_use_p (page_entry *);
536*38fd1498Szrj static void compute_inverse (unsigned);
537*38fd1498Szrj static inline void adjust_depth (void);
538*38fd1498Szrj static void move_ptes_to_front (int, int);
539*38fd1498Szrj 
540*38fd1498Szrj void debug_print_page_list (int);
541*38fd1498Szrj static void push_depth (unsigned int);
542*38fd1498Szrj static void push_by_depth (page_entry *, unsigned long *);
543*38fd1498Szrj 
544*38fd1498Szrj /* Push an entry onto G.depth.  */
545*38fd1498Szrj 
546*38fd1498Szrj inline static void
push_depth(unsigned int i)547*38fd1498Szrj push_depth (unsigned int i)
548*38fd1498Szrj {
549*38fd1498Szrj   if (G.depth_in_use >= G.depth_max)
550*38fd1498Szrj     {
551*38fd1498Szrj       G.depth_max *= 2;
552*38fd1498Szrj       G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
553*38fd1498Szrj     }
554*38fd1498Szrj   G.depth[G.depth_in_use++] = i;
555*38fd1498Szrj }
556*38fd1498Szrj 
557*38fd1498Szrj /* Push an entry onto G.by_depth and G.save_in_use.  */
558*38fd1498Szrj 
559*38fd1498Szrj inline static void
push_by_depth(page_entry * p,unsigned long * s)560*38fd1498Szrj push_by_depth (page_entry *p, unsigned long *s)
561*38fd1498Szrj {
562*38fd1498Szrj   if (G.by_depth_in_use >= G.by_depth_max)
563*38fd1498Szrj     {
564*38fd1498Szrj       G.by_depth_max *= 2;
565*38fd1498Szrj       G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
566*38fd1498Szrj       G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
567*38fd1498Szrj 				  G.by_depth_max);
568*38fd1498Szrj     }
569*38fd1498Szrj   G.by_depth[G.by_depth_in_use] = p;
570*38fd1498Szrj   G.save_in_use[G.by_depth_in_use++] = s;
571*38fd1498Szrj }
572*38fd1498Szrj 
573*38fd1498Szrj #if (GCC_VERSION < 3001)
574*38fd1498Szrj #define prefetch(X) ((void) X)
575*38fd1498Szrj #else
576*38fd1498Szrj #define prefetch(X) __builtin_prefetch (X)
577*38fd1498Szrj #endif
578*38fd1498Szrj 
579*38fd1498Szrj #define save_in_use_p_i(__i) \
580*38fd1498Szrj   (G.save_in_use[__i])
581*38fd1498Szrj #define save_in_use_p(__p) \
582*38fd1498Szrj   (save_in_use_p_i (__p->index_by_depth))
583*38fd1498Szrj 
584*38fd1498Szrj /* Traverse the page table and find the entry for a page.
585*38fd1498Szrj    If the object wasn't allocated in GC return NULL.  */
586*38fd1498Szrj 
587*38fd1498Szrj static inline page_entry *
safe_lookup_page_table_entry(const void * p)588*38fd1498Szrj safe_lookup_page_table_entry (const void *p)
589*38fd1498Szrj {
590*38fd1498Szrj   page_entry ***base;
591*38fd1498Szrj   size_t L1, L2;
592*38fd1498Szrj 
593*38fd1498Szrj #if HOST_BITS_PER_PTR <= 32
594*38fd1498Szrj   base = &G.lookup[0];
595*38fd1498Szrj #else
596*38fd1498Szrj   page_table table = G.lookup;
597*38fd1498Szrj   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
598*38fd1498Szrj   while (1)
599*38fd1498Szrj     {
600*38fd1498Szrj       if (table == NULL)
601*38fd1498Szrj 	return NULL;
602*38fd1498Szrj       if (table->high_bits == high_bits)
603*38fd1498Szrj 	break;
604*38fd1498Szrj       table = table->next;
605*38fd1498Szrj     }
606*38fd1498Szrj   base = &table->table[0];
607*38fd1498Szrj #endif
608*38fd1498Szrj 
609*38fd1498Szrj   /* Extract the level 1 and 2 indices.  */
610*38fd1498Szrj   L1 = LOOKUP_L1 (p);
611*38fd1498Szrj   L2 = LOOKUP_L2 (p);
612*38fd1498Szrj   if (! base[L1])
613*38fd1498Szrj     return NULL;
614*38fd1498Szrj 
615*38fd1498Szrj   return base[L1][L2];
616*38fd1498Szrj }
617*38fd1498Szrj 
618*38fd1498Szrj /* Traverse the page table and find the entry for a page.
619*38fd1498Szrj    Die (probably) if the object wasn't allocated via GC.  */
620*38fd1498Szrj 
621*38fd1498Szrj static inline page_entry *
lookup_page_table_entry(const void * p)622*38fd1498Szrj lookup_page_table_entry (const void *p)
623*38fd1498Szrj {
624*38fd1498Szrj   page_entry ***base;
625*38fd1498Szrj   size_t L1, L2;
626*38fd1498Szrj 
627*38fd1498Szrj #if HOST_BITS_PER_PTR <= 32
628*38fd1498Szrj   base = &G.lookup[0];
629*38fd1498Szrj #else
630*38fd1498Szrj   page_table table = G.lookup;
631*38fd1498Szrj   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
632*38fd1498Szrj   while (table->high_bits != high_bits)
633*38fd1498Szrj     table = table->next;
634*38fd1498Szrj   base = &table->table[0];
635*38fd1498Szrj #endif
636*38fd1498Szrj 
637*38fd1498Szrj   /* Extract the level 1 and 2 indices.  */
638*38fd1498Szrj   L1 = LOOKUP_L1 (p);
639*38fd1498Szrj   L2 = LOOKUP_L2 (p);
640*38fd1498Szrj 
641*38fd1498Szrj   return base[L1][L2];
642*38fd1498Szrj }
643*38fd1498Szrj 
644*38fd1498Szrj /* Set the page table entry for a page.  */
645*38fd1498Szrj 
646*38fd1498Szrj static void
set_page_table_entry(void * p,page_entry * entry)647*38fd1498Szrj set_page_table_entry (void *p, page_entry *entry)
648*38fd1498Szrj {
649*38fd1498Szrj   page_entry ***base;
650*38fd1498Szrj   size_t L1, L2;
651*38fd1498Szrj 
652*38fd1498Szrj #if HOST_BITS_PER_PTR <= 32
653*38fd1498Szrj   base = &G.lookup[0];
654*38fd1498Szrj #else
655*38fd1498Szrj   page_table table;
656*38fd1498Szrj   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
657*38fd1498Szrj   for (table = G.lookup; table; table = table->next)
658*38fd1498Szrj     if (table->high_bits == high_bits)
659*38fd1498Szrj       goto found;
660*38fd1498Szrj 
661*38fd1498Szrj   /* Not found -- allocate a new table.  */
662*38fd1498Szrj   table = XCNEW (struct page_table_chain);
663*38fd1498Szrj   table->next = G.lookup;
664*38fd1498Szrj   table->high_bits = high_bits;
665*38fd1498Szrj   G.lookup = table;
666*38fd1498Szrj found:
667*38fd1498Szrj   base = &table->table[0];
668*38fd1498Szrj #endif
669*38fd1498Szrj 
670*38fd1498Szrj   /* Extract the level 1 and 2 indices.  */
671*38fd1498Szrj   L1 = LOOKUP_L1 (p);
672*38fd1498Szrj   L2 = LOOKUP_L2 (p);
673*38fd1498Szrj 
674*38fd1498Szrj   if (base[L1] == NULL)
675*38fd1498Szrj     base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
676*38fd1498Szrj 
677*38fd1498Szrj   base[L1][L2] = entry;
678*38fd1498Szrj }
679*38fd1498Szrj 
680*38fd1498Szrj /* Prints the page-entry for object size ORDER, for debugging.  */
681*38fd1498Szrj 
682*38fd1498Szrj DEBUG_FUNCTION void
debug_print_page_list(int order)683*38fd1498Szrj debug_print_page_list (int order)
684*38fd1498Szrj {
685*38fd1498Szrj   page_entry *p;
686*38fd1498Szrj   printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
687*38fd1498Szrj 	  (void *) G.page_tails[order]);
688*38fd1498Szrj   p = G.pages[order];
689*38fd1498Szrj   while (p != NULL)
690*38fd1498Szrj     {
691*38fd1498Szrj       printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
692*38fd1498Szrj 	      p->num_free_objects);
693*38fd1498Szrj       p = p->next;
694*38fd1498Szrj     }
695*38fd1498Szrj   printf ("NULL\n");
696*38fd1498Szrj   fflush (stdout);
697*38fd1498Szrj }
698*38fd1498Szrj 
699*38fd1498Szrj #ifdef USING_MMAP
700*38fd1498Szrj /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
701*38fd1498Szrj    (if non-null).  The ifdef structure here is intended to cause a
702*38fd1498Szrj    compile error unless exactly one of the HAVE_* is defined.  */
703*38fd1498Szrj 
704*38fd1498Szrj static inline char *
alloc_anon(char * pref ATTRIBUTE_UNUSED,size_t size,bool check)705*38fd1498Szrj alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
706*38fd1498Szrj {
707*38fd1498Szrj #ifdef HAVE_MMAP_ANON
708*38fd1498Szrj   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
709*38fd1498Szrj 			      MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
710*38fd1498Szrj #endif
711*38fd1498Szrj #ifdef HAVE_MMAP_DEV_ZERO
712*38fd1498Szrj   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
713*38fd1498Szrj 			      MAP_PRIVATE, G.dev_zero_fd, 0);
714*38fd1498Szrj #endif
715*38fd1498Szrj 
716*38fd1498Szrj   if (page == (char *) MAP_FAILED)
717*38fd1498Szrj     {
718*38fd1498Szrj       if (!check)
719*38fd1498Szrj         return NULL;
720*38fd1498Szrj       perror ("virtual memory exhausted");
721*38fd1498Szrj       exit (FATAL_EXIT_CODE);
722*38fd1498Szrj     }
723*38fd1498Szrj 
724*38fd1498Szrj   /* Remember that we allocated this memory.  */
725*38fd1498Szrj   G.bytes_mapped += size;
726*38fd1498Szrj 
727*38fd1498Szrj   /* Pretend we don't have access to the allocated pages.  We'll enable
728*38fd1498Szrj      access to smaller pieces of the area in ggc_internal_alloc.  Discard the
729*38fd1498Szrj      handle to avoid handle leak.  */
730*38fd1498Szrj   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
731*38fd1498Szrj 
732*38fd1498Szrj   return page;
733*38fd1498Szrj }
734*38fd1498Szrj #endif
735*38fd1498Szrj #ifdef USING_MALLOC_PAGE_GROUPS
736*38fd1498Szrj /* Compute the index for this page into the page group.  */
737*38fd1498Szrj 
738*38fd1498Szrj static inline size_t
page_group_index(char * allocation,char * page)739*38fd1498Szrj page_group_index (char *allocation, char *page)
740*38fd1498Szrj {
741*38fd1498Szrj   return (size_t) (page - allocation) >> G.lg_pagesize;
742*38fd1498Szrj }
743*38fd1498Szrj 
744*38fd1498Szrj /* Set and clear the in_use bit for this page in the page group.  */
745*38fd1498Szrj 
746*38fd1498Szrj static inline void
set_page_group_in_use(page_group * group,char * page)747*38fd1498Szrj set_page_group_in_use (page_group *group, char *page)
748*38fd1498Szrj {
749*38fd1498Szrj   group->in_use |= 1 << page_group_index (group->allocation, page);
750*38fd1498Szrj }
751*38fd1498Szrj 
752*38fd1498Szrj static inline void
clear_page_group_in_use(page_group * group,char * page)753*38fd1498Szrj clear_page_group_in_use (page_group *group, char *page)
754*38fd1498Szrj {
755*38fd1498Szrj   group->in_use &= ~(1 << page_group_index (group->allocation, page));
756*38fd1498Szrj }
757*38fd1498Szrj #endif
758*38fd1498Szrj 
759*38fd1498Szrj /* Allocate a new page for allocating objects of size 2^ORDER,
760*38fd1498Szrj    and return an entry for it.  The entry is not added to the
761*38fd1498Szrj    appropriate page_table list.  */
762*38fd1498Szrj 
763*38fd1498Szrj static inline struct page_entry *
alloc_page(unsigned order)764*38fd1498Szrj alloc_page (unsigned order)
765*38fd1498Szrj {
766*38fd1498Szrj   struct page_entry *entry, *p, **pp;
767*38fd1498Szrj   char *page;
768*38fd1498Szrj   size_t num_objects;
769*38fd1498Szrj   size_t bitmap_size;
770*38fd1498Szrj   size_t page_entry_size;
771*38fd1498Szrj   size_t entry_size;
772*38fd1498Szrj #ifdef USING_MALLOC_PAGE_GROUPS
773*38fd1498Szrj   page_group *group;
774*38fd1498Szrj #endif
775*38fd1498Szrj 
776*38fd1498Szrj   num_objects = OBJECTS_PER_PAGE (order);
777*38fd1498Szrj   bitmap_size = BITMAP_SIZE (num_objects + 1);
778*38fd1498Szrj   page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
779*38fd1498Szrj   entry_size = num_objects * OBJECT_SIZE (order);
780*38fd1498Szrj   if (entry_size < G.pagesize)
781*38fd1498Szrj     entry_size = G.pagesize;
782*38fd1498Szrj   entry_size = PAGE_ALIGN (entry_size);
783*38fd1498Szrj 
784*38fd1498Szrj   entry = NULL;
785*38fd1498Szrj   page = NULL;
786*38fd1498Szrj 
787*38fd1498Szrj   /* Check the list of free pages for one we can use.  */
788*38fd1498Szrj   for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
789*38fd1498Szrj     if (p->bytes == entry_size)
790*38fd1498Szrj       break;
791*38fd1498Szrj 
792*38fd1498Szrj   if (p != NULL)
793*38fd1498Szrj     {
794*38fd1498Szrj       if (p->discarded)
795*38fd1498Szrj         G.bytes_mapped += p->bytes;
796*38fd1498Szrj       p->discarded = false;
797*38fd1498Szrj 
798*38fd1498Szrj       /* Recycle the allocated memory from this page ...  */
799*38fd1498Szrj       *pp = p->next;
800*38fd1498Szrj       page = p->page;
801*38fd1498Szrj 
802*38fd1498Szrj #ifdef USING_MALLOC_PAGE_GROUPS
803*38fd1498Szrj       group = p->group;
804*38fd1498Szrj #endif
805*38fd1498Szrj 
806*38fd1498Szrj       /* ... and, if possible, the page entry itself.  */
807*38fd1498Szrj       if (p->order == order)
808*38fd1498Szrj 	{
809*38fd1498Szrj 	  entry = p;
810*38fd1498Szrj 	  memset (entry, 0, page_entry_size);
811*38fd1498Szrj 	}
812*38fd1498Szrj       else
813*38fd1498Szrj 	free (p);
814*38fd1498Szrj     }
815*38fd1498Szrj #ifdef USING_MMAP
816*38fd1498Szrj   else if (entry_size == G.pagesize)
817*38fd1498Szrj     {
818*38fd1498Szrj       /* We want just one page.  Allocate a bunch of them and put the
819*38fd1498Szrj 	 extras on the freelist.  (Can only do this optimization with
820*38fd1498Szrj 	 mmap for backing store.)  */
821*38fd1498Szrj       struct page_entry *e, *f = G.free_pages;
822*38fd1498Szrj       int i, entries = GGC_QUIRE_SIZE;
823*38fd1498Szrj 
824*38fd1498Szrj       page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
825*38fd1498Szrj       if (page == NULL)
826*38fd1498Szrj      	{
827*38fd1498Szrj 	  page = alloc_anon (NULL, G.pagesize, true);
828*38fd1498Szrj           entries = 1;
829*38fd1498Szrj 	}
830*38fd1498Szrj 
831*38fd1498Szrj       /* This loop counts down so that the chain will be in ascending
832*38fd1498Szrj 	 memory order.  */
833*38fd1498Szrj       for (i = entries - 1; i >= 1; i--)
834*38fd1498Szrj 	{
835*38fd1498Szrj 	  e = XCNEWVAR (struct page_entry, page_entry_size);
836*38fd1498Szrj 	  e->order = order;
837*38fd1498Szrj 	  e->bytes = G.pagesize;
838*38fd1498Szrj 	  e->page = page + (i << G.lg_pagesize);
839*38fd1498Szrj 	  e->next = f;
840*38fd1498Szrj 	  f = e;
841*38fd1498Szrj 	}
842*38fd1498Szrj 
843*38fd1498Szrj       G.free_pages = f;
844*38fd1498Szrj     }
845*38fd1498Szrj   else
846*38fd1498Szrj     page = alloc_anon (NULL, entry_size, true);
847*38fd1498Szrj #endif
848*38fd1498Szrj #ifdef USING_MALLOC_PAGE_GROUPS
849*38fd1498Szrj   else
850*38fd1498Szrj     {
851*38fd1498Szrj       /* Allocate a large block of memory and serve out the aligned
852*38fd1498Szrj 	 pages therein.  This results in much less memory wastage
853*38fd1498Szrj 	 than the traditional implementation of valloc.  */
854*38fd1498Szrj 
855*38fd1498Szrj       char *allocation, *a, *enda;
856*38fd1498Szrj       size_t alloc_size, head_slop, tail_slop;
857*38fd1498Szrj       int multiple_pages = (entry_size == G.pagesize);
858*38fd1498Szrj 
859*38fd1498Szrj       if (multiple_pages)
860*38fd1498Szrj 	alloc_size = GGC_QUIRE_SIZE * G.pagesize;
861*38fd1498Szrj       else
862*38fd1498Szrj 	alloc_size = entry_size + G.pagesize - 1;
863*38fd1498Szrj       allocation = XNEWVEC (char, alloc_size);
864*38fd1498Szrj 
865*38fd1498Szrj       page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize);
866*38fd1498Szrj       head_slop = page - allocation;
867*38fd1498Szrj       if (multiple_pages)
868*38fd1498Szrj 	tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
869*38fd1498Szrj       else
870*38fd1498Szrj 	tail_slop = alloc_size - entry_size - head_slop;
871*38fd1498Szrj       enda = allocation + alloc_size - tail_slop;
872*38fd1498Szrj 
873*38fd1498Szrj       /* We allocated N pages, which are likely not aligned, leaving
874*38fd1498Szrj 	 us with N-1 usable pages.  We plan to place the page_group
875*38fd1498Szrj 	 structure somewhere in the slop.  */
876*38fd1498Szrj       if (head_slop >= sizeof (page_group))
877*38fd1498Szrj 	group = (page_group *)page - 1;
878*38fd1498Szrj       else
879*38fd1498Szrj 	{
880*38fd1498Szrj 	  /* We magically got an aligned allocation.  Too bad, we have
881*38fd1498Szrj 	     to waste a page anyway.  */
882*38fd1498Szrj 	  if (tail_slop == 0)
883*38fd1498Szrj 	    {
884*38fd1498Szrj 	      enda -= G.pagesize;
885*38fd1498Szrj 	      tail_slop += G.pagesize;
886*38fd1498Szrj 	    }
887*38fd1498Szrj 	  gcc_assert (tail_slop >= sizeof (page_group));
888*38fd1498Szrj 	  group = (page_group *)enda;
889*38fd1498Szrj 	  tail_slop -= sizeof (page_group);
890*38fd1498Szrj 	}
891*38fd1498Szrj 
892*38fd1498Szrj       /* Remember that we allocated this memory.  */
893*38fd1498Szrj       group->next = G.page_groups;
894*38fd1498Szrj       group->allocation = allocation;
895*38fd1498Szrj       group->alloc_size = alloc_size;
896*38fd1498Szrj       group->in_use = 0;
897*38fd1498Szrj       G.page_groups = group;
898*38fd1498Szrj       G.bytes_mapped += alloc_size;
899*38fd1498Szrj 
900*38fd1498Szrj       /* If we allocated multiple pages, put the rest on the free list.  */
901*38fd1498Szrj       if (multiple_pages)
902*38fd1498Szrj 	{
903*38fd1498Szrj 	  struct page_entry *e, *f = G.free_pages;
904*38fd1498Szrj 	  for (a = enda - G.pagesize; a != page; a -= G.pagesize)
905*38fd1498Szrj 	    {
906*38fd1498Szrj 	      e = XCNEWVAR (struct page_entry, page_entry_size);
907*38fd1498Szrj 	      e->order = order;
908*38fd1498Szrj 	      e->bytes = G.pagesize;
909*38fd1498Szrj 	      e->page = a;
910*38fd1498Szrj 	      e->group = group;
911*38fd1498Szrj 	      e->next = f;
912*38fd1498Szrj 	      f = e;
913*38fd1498Szrj 	    }
914*38fd1498Szrj 	  G.free_pages = f;
915*38fd1498Szrj 	}
916*38fd1498Szrj     }
917*38fd1498Szrj #endif
918*38fd1498Szrj 
919*38fd1498Szrj   if (entry == NULL)
920*38fd1498Szrj     entry = XCNEWVAR (struct page_entry, page_entry_size);
921*38fd1498Szrj 
922*38fd1498Szrj   entry->bytes = entry_size;
923*38fd1498Szrj   entry->page = page;
924*38fd1498Szrj   entry->context_depth = G.context_depth;
925*38fd1498Szrj   entry->order = order;
926*38fd1498Szrj   entry->num_free_objects = num_objects;
927*38fd1498Szrj   entry->next_bit_hint = 1;
928*38fd1498Szrj 
929*38fd1498Szrj   G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
930*38fd1498Szrj 
931*38fd1498Szrj #ifdef USING_MALLOC_PAGE_GROUPS
932*38fd1498Szrj   entry->group = group;
933*38fd1498Szrj   set_page_group_in_use (group, page);
934*38fd1498Szrj #endif
935*38fd1498Szrj 
936*38fd1498Szrj   /* Set the one-past-the-end in-use bit.  This acts as a sentry as we
937*38fd1498Szrj      increment the hint.  */
938*38fd1498Szrj   entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
939*38fd1498Szrj     = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
940*38fd1498Szrj 
941*38fd1498Szrj   set_page_table_entry (page, entry);
942*38fd1498Szrj 
943*38fd1498Szrj   if (GGC_DEBUG_LEVEL >= 2)
944*38fd1498Szrj     fprintf (G.debug_file,
945*38fd1498Szrj 	     "Allocating page at %p, object size=%lu, data %p-%p\n",
946*38fd1498Szrj 	     (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
947*38fd1498Szrj 	     page + entry_size - 1);
948*38fd1498Szrj 
949*38fd1498Szrj   return entry;
950*38fd1498Szrj }
951*38fd1498Szrj 
952*38fd1498Szrj /* Adjust the size of G.depth so that no index greater than the one
953*38fd1498Szrj    used by the top of the G.by_depth is used.  */
954*38fd1498Szrj 
955*38fd1498Szrj static inline void
adjust_depth(void)956*38fd1498Szrj adjust_depth (void)
957*38fd1498Szrj {
958*38fd1498Szrj   page_entry *top;
959*38fd1498Szrj 
960*38fd1498Szrj   if (G.by_depth_in_use)
961*38fd1498Szrj     {
962*38fd1498Szrj       top = G.by_depth[G.by_depth_in_use-1];
963*38fd1498Szrj 
964*38fd1498Szrj       /* Peel back indices in depth that index into by_depth, so that
965*38fd1498Szrj 	 as new elements are added to by_depth, we note the indices
966*38fd1498Szrj 	 of those elements, if they are for new context depths.  */
967*38fd1498Szrj       while (G.depth_in_use > (size_t)top->context_depth+1)
968*38fd1498Szrj 	--G.depth_in_use;
969*38fd1498Szrj     }
970*38fd1498Szrj }
971*38fd1498Szrj 
972*38fd1498Szrj /* For a page that is no longer needed, put it on the free page list.  */
973*38fd1498Szrj 
974*38fd1498Szrj static void
free_page(page_entry * entry)975*38fd1498Szrj free_page (page_entry *entry)
976*38fd1498Szrj {
977*38fd1498Szrj   if (GGC_DEBUG_LEVEL >= 2)
978*38fd1498Szrj     fprintf (G.debug_file,
979*38fd1498Szrj 	     "Deallocating page at %p, data %p-%p\n", (void *) entry,
980*38fd1498Szrj 	     entry->page, entry->page + entry->bytes - 1);
981*38fd1498Szrj 
982*38fd1498Szrj   /* Mark the page as inaccessible.  Discard the handle to avoid handle
983*38fd1498Szrj      leak.  */
984*38fd1498Szrj   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
985*38fd1498Szrj 
986*38fd1498Szrj   set_page_table_entry (entry->page, NULL);
987*38fd1498Szrj 
988*38fd1498Szrj #ifdef USING_MALLOC_PAGE_GROUPS
989*38fd1498Szrj   clear_page_group_in_use (entry->group, entry->page);
990*38fd1498Szrj #endif
991*38fd1498Szrj 
992*38fd1498Szrj   if (G.by_depth_in_use > 1)
993*38fd1498Szrj     {
994*38fd1498Szrj       page_entry *top = G.by_depth[G.by_depth_in_use-1];
995*38fd1498Szrj       int i = entry->index_by_depth;
996*38fd1498Szrj 
997*38fd1498Szrj       /* We cannot free a page from a context deeper than the current
998*38fd1498Szrj 	 one.  */
999*38fd1498Szrj       gcc_assert (entry->context_depth == top->context_depth);
1000*38fd1498Szrj 
1001*38fd1498Szrj       /* Put top element into freed slot.  */
1002*38fd1498Szrj       G.by_depth[i] = top;
1003*38fd1498Szrj       G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
1004*38fd1498Szrj       top->index_by_depth = i;
1005*38fd1498Szrj     }
1006*38fd1498Szrj   --G.by_depth_in_use;
1007*38fd1498Szrj 
1008*38fd1498Szrj   adjust_depth ();
1009*38fd1498Szrj 
1010*38fd1498Szrj   entry->next = G.free_pages;
1011*38fd1498Szrj   G.free_pages = entry;
1012*38fd1498Szrj }
1013*38fd1498Szrj 
1014*38fd1498Szrj /* Release the free page cache to the system.  */
1015*38fd1498Szrj 
1016*38fd1498Szrj static void
release_pages(void)1017*38fd1498Szrj release_pages (void)
1018*38fd1498Szrj {
1019*38fd1498Szrj #ifdef USING_MADVISE
1020*38fd1498Szrj   page_entry *p, *start_p;
1021*38fd1498Szrj   char *start;
1022*38fd1498Szrj   size_t len;
1023*38fd1498Szrj   size_t mapped_len;
1024*38fd1498Szrj   page_entry *next, *prev, *newprev;
1025*38fd1498Szrj   size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
1026*38fd1498Szrj 
1027*38fd1498Szrj   /* First free larger continuous areas to the OS.
1028*38fd1498Szrj      This allows other allocators to grab these areas if needed.
1029*38fd1498Szrj      This is only done on larger chunks to avoid fragmentation.
1030*38fd1498Szrj      This does not always work because the free_pages list is only
1031*38fd1498Szrj      approximately sorted. */
1032*38fd1498Szrj 
1033*38fd1498Szrj   p = G.free_pages;
1034*38fd1498Szrj   prev = NULL;
1035*38fd1498Szrj   while (p)
1036*38fd1498Szrj     {
1037*38fd1498Szrj       start = p->page;
1038*38fd1498Szrj       start_p = p;
1039*38fd1498Szrj       len = 0;
1040*38fd1498Szrj       mapped_len = 0;
1041*38fd1498Szrj       newprev = prev;
1042*38fd1498Szrj       while (p && p->page == start + len)
1043*38fd1498Szrj         {
1044*38fd1498Szrj           len += p->bytes;
1045*38fd1498Szrj 	  if (!p->discarded)
1046*38fd1498Szrj 	      mapped_len += p->bytes;
1047*38fd1498Szrj 	  newprev = p;
1048*38fd1498Szrj           p = p->next;
1049*38fd1498Szrj         }
1050*38fd1498Szrj       if (len >= free_unit)
1051*38fd1498Szrj         {
1052*38fd1498Szrj           while (start_p != p)
1053*38fd1498Szrj             {
1054*38fd1498Szrj               next = start_p->next;
1055*38fd1498Szrj               free (start_p);
1056*38fd1498Szrj               start_p = next;
1057*38fd1498Szrj             }
1058*38fd1498Szrj           munmap (start, len);
1059*38fd1498Szrj 	  if (prev)
1060*38fd1498Szrj 	    prev->next = p;
1061*38fd1498Szrj           else
1062*38fd1498Szrj             G.free_pages = p;
1063*38fd1498Szrj           G.bytes_mapped -= mapped_len;
1064*38fd1498Szrj 	  continue;
1065*38fd1498Szrj         }
1066*38fd1498Szrj       prev = newprev;
1067*38fd1498Szrj    }
1068*38fd1498Szrj 
1069*38fd1498Szrj   /* Now give back the fragmented pages to the OS, but keep the address
1070*38fd1498Szrj      space to reuse it next time. */
1071*38fd1498Szrj 
1072*38fd1498Szrj   for (p = G.free_pages; p; )
1073*38fd1498Szrj     {
1074*38fd1498Szrj       if (p->discarded)
1075*38fd1498Szrj         {
1076*38fd1498Szrj           p = p->next;
1077*38fd1498Szrj           continue;
1078*38fd1498Szrj         }
1079*38fd1498Szrj       start = p->page;
1080*38fd1498Szrj       len = p->bytes;
1081*38fd1498Szrj       start_p = p;
1082*38fd1498Szrj       p = p->next;
1083*38fd1498Szrj       while (p && p->page == start + len)
1084*38fd1498Szrj         {
1085*38fd1498Szrj           len += p->bytes;
1086*38fd1498Szrj           p = p->next;
1087*38fd1498Szrj         }
1088*38fd1498Szrj       /* Give the page back to the kernel, but don't free the mapping.
1089*38fd1498Szrj          This avoids fragmentation in the virtual memory map of the
1090*38fd1498Szrj  	 process. Next time we can reuse it by just touching it. */
1091*38fd1498Szrj       madvise (start, len, MADV_DONTNEED);
1092*38fd1498Szrj       /* Don't count those pages as mapped to not touch the garbage collector
1093*38fd1498Szrj          unnecessarily. */
1094*38fd1498Szrj       G.bytes_mapped -= len;
1095*38fd1498Szrj       while (start_p != p)
1096*38fd1498Szrj         {
1097*38fd1498Szrj           start_p->discarded = true;
1098*38fd1498Szrj           start_p = start_p->next;
1099*38fd1498Szrj         }
1100*38fd1498Szrj     }
1101*38fd1498Szrj #endif
1102*38fd1498Szrj #if defined(USING_MMAP) && !defined(USING_MADVISE)
1103*38fd1498Szrj   page_entry *p, *next;
1104*38fd1498Szrj   char *start;
1105*38fd1498Szrj   size_t len;
1106*38fd1498Szrj 
1107*38fd1498Szrj   /* Gather up adjacent pages so they are unmapped together.  */
1108*38fd1498Szrj   p = G.free_pages;
1109*38fd1498Szrj 
1110*38fd1498Szrj   while (p)
1111*38fd1498Szrj     {
1112*38fd1498Szrj       start = p->page;
1113*38fd1498Szrj       next = p->next;
1114*38fd1498Szrj       len = p->bytes;
1115*38fd1498Szrj       free (p);
1116*38fd1498Szrj       p = next;
1117*38fd1498Szrj 
1118*38fd1498Szrj       while (p && p->page == start + len)
1119*38fd1498Szrj 	{
1120*38fd1498Szrj 	  next = p->next;
1121*38fd1498Szrj 	  len += p->bytes;
1122*38fd1498Szrj 	  free (p);
1123*38fd1498Szrj 	  p = next;
1124*38fd1498Szrj 	}
1125*38fd1498Szrj 
1126*38fd1498Szrj       munmap (start, len);
1127*38fd1498Szrj       G.bytes_mapped -= len;
1128*38fd1498Szrj     }
1129*38fd1498Szrj 
1130*38fd1498Szrj   G.free_pages = NULL;
1131*38fd1498Szrj #endif
1132*38fd1498Szrj #ifdef USING_MALLOC_PAGE_GROUPS
1133*38fd1498Szrj   page_entry **pp, *p;
1134*38fd1498Szrj   page_group **gp, *g;
1135*38fd1498Szrj 
1136*38fd1498Szrj   /* Remove all pages from free page groups from the list.  */
1137*38fd1498Szrj   pp = &G.free_pages;
1138*38fd1498Szrj   while ((p = *pp) != NULL)
1139*38fd1498Szrj     if (p->group->in_use == 0)
1140*38fd1498Szrj       {
1141*38fd1498Szrj 	*pp = p->next;
1142*38fd1498Szrj 	free (p);
1143*38fd1498Szrj       }
1144*38fd1498Szrj     else
1145*38fd1498Szrj       pp = &p->next;
1146*38fd1498Szrj 
1147*38fd1498Szrj   /* Remove all free page groups, and release the storage.  */
1148*38fd1498Szrj   gp = &G.page_groups;
1149*38fd1498Szrj   while ((g = *gp) != NULL)
1150*38fd1498Szrj     if (g->in_use == 0)
1151*38fd1498Szrj       {
1152*38fd1498Szrj 	*gp = g->next;
1153*38fd1498Szrj 	G.bytes_mapped -= g->alloc_size;
1154*38fd1498Szrj 	free (g->allocation);
1155*38fd1498Szrj       }
1156*38fd1498Szrj     else
1157*38fd1498Szrj       gp = &g->next;
1158*38fd1498Szrj #endif
1159*38fd1498Szrj }
1160*38fd1498Szrj 
1161*38fd1498Szrj /* This table provides a fast way to determine ceil(log_2(size)) for
1162*38fd1498Szrj    allocation requests.  The minimum allocation size is eight bytes.  */
1163*38fd1498Szrj #define NUM_SIZE_LOOKUP 512
1164*38fd1498Szrj static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1165*38fd1498Szrj {
1166*38fd1498Szrj   3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1167*38fd1498Szrj   4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1168*38fd1498Szrj   5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1169*38fd1498Szrj   6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1170*38fd1498Szrj   6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1171*38fd1498Szrj   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1172*38fd1498Szrj   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1173*38fd1498Szrj   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1174*38fd1498Szrj   7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1175*38fd1498Szrj   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1176*38fd1498Szrj   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1177*38fd1498Szrj   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1178*38fd1498Szrj   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1179*38fd1498Szrj   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1180*38fd1498Szrj   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1181*38fd1498Szrj   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1182*38fd1498Szrj   8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1183*38fd1498Szrj   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1184*38fd1498Szrj   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1185*38fd1498Szrj   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1186*38fd1498Szrj   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1187*38fd1498Szrj   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1188*38fd1498Szrj   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1189*38fd1498Szrj   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1190*38fd1498Szrj   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1191*38fd1498Szrj   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1192*38fd1498Szrj   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1193*38fd1498Szrj   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1194*38fd1498Szrj   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1195*38fd1498Szrj   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1196*38fd1498Szrj   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1197*38fd1498Szrj   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1198*38fd1498Szrj };
1199*38fd1498Szrj 
1200*38fd1498Szrj /* For a given size of memory requested for allocation, return the
1201*38fd1498Szrj    actual size that is going to be allocated, as well as the size
1202*38fd1498Szrj    order.  */
1203*38fd1498Szrj 
1204*38fd1498Szrj static void
ggc_round_alloc_size_1(size_t requested_size,size_t * size_order,size_t * alloced_size)1205*38fd1498Szrj ggc_round_alloc_size_1 (size_t requested_size,
1206*38fd1498Szrj 			size_t *size_order,
1207*38fd1498Szrj 			size_t *alloced_size)
1208*38fd1498Szrj {
1209*38fd1498Szrj   size_t order, object_size;
1210*38fd1498Szrj 
1211*38fd1498Szrj   if (requested_size < NUM_SIZE_LOOKUP)
1212*38fd1498Szrj     {
1213*38fd1498Szrj       order = size_lookup[requested_size];
1214*38fd1498Szrj       object_size = OBJECT_SIZE (order);
1215*38fd1498Szrj     }
1216*38fd1498Szrj   else
1217*38fd1498Szrj     {
1218*38fd1498Szrj       order = 10;
1219*38fd1498Szrj       while (requested_size > (object_size = OBJECT_SIZE (order)))
1220*38fd1498Szrj         order++;
1221*38fd1498Szrj     }
1222*38fd1498Szrj 
1223*38fd1498Szrj   if (size_order)
1224*38fd1498Szrj     *size_order = order;
1225*38fd1498Szrj   if (alloced_size)
1226*38fd1498Szrj     *alloced_size = object_size;
1227*38fd1498Szrj }
1228*38fd1498Szrj 
1229*38fd1498Szrj /* For a given size of memory requested for allocation, return the
1230*38fd1498Szrj    actual size that is going to be allocated.  */
1231*38fd1498Szrj 
1232*38fd1498Szrj size_t
ggc_round_alloc_size(size_t requested_size)1233*38fd1498Szrj ggc_round_alloc_size (size_t requested_size)
1234*38fd1498Szrj {
1235*38fd1498Szrj   size_t size = 0;
1236*38fd1498Szrj 
1237*38fd1498Szrj   ggc_round_alloc_size_1 (requested_size, NULL, &size);
1238*38fd1498Szrj   return size;
1239*38fd1498Szrj }
1240*38fd1498Szrj 
1241*38fd1498Szrj /* Push a finalizer onto the appropriate vec.  */
1242*38fd1498Szrj 
1243*38fd1498Szrj static void
add_finalizer(void * result,void (* f)(void *),size_t s,size_t n)1244*38fd1498Szrj add_finalizer (void *result, void (*f)(void *), size_t s, size_t n)
1245*38fd1498Szrj {
1246*38fd1498Szrj   if (f == NULL)
1247*38fd1498Szrj     /* No finalizer.  */;
1248*38fd1498Szrj   else if (n == 1)
1249*38fd1498Szrj     {
1250*38fd1498Szrj       finalizer fin (result, f);
1251*38fd1498Szrj       G.finalizers[G.context_depth].safe_push (fin);
1252*38fd1498Szrj     }
1253*38fd1498Szrj   else
1254*38fd1498Szrj     {
1255*38fd1498Szrj       vec_finalizer fin (reinterpret_cast<uintptr_t> (result), f, s, n);
1256*38fd1498Szrj       G.vec_finalizers[G.context_depth].safe_push (fin);
1257*38fd1498Szrj     }
1258*38fd1498Szrj }
1259*38fd1498Szrj 
1260*38fd1498Szrj /* Allocate a chunk of memory of SIZE bytes.  Its contents are undefined.  */
1261*38fd1498Szrj 
1262*38fd1498Szrj void *
ggc_internal_alloc(size_t size,void (* f)(void *),size_t s,size_t n MEM_STAT_DECL)1263*38fd1498Szrj ggc_internal_alloc (size_t size, void (*f)(void *), size_t s, size_t n
1264*38fd1498Szrj 		    MEM_STAT_DECL)
1265*38fd1498Szrj {
1266*38fd1498Szrj   size_t order, word, bit, object_offset, object_size;
1267*38fd1498Szrj   struct page_entry *entry;
1268*38fd1498Szrj   void *result;
1269*38fd1498Szrj 
1270*38fd1498Szrj   ggc_round_alloc_size_1 (size, &order, &object_size);
1271*38fd1498Szrj 
1272*38fd1498Szrj   /* If there are non-full pages for this size allocation, they are at
1273*38fd1498Szrj      the head of the list.  */
1274*38fd1498Szrj   entry = G.pages[order];
1275*38fd1498Szrj 
1276*38fd1498Szrj   /* If there is no page for this object size, or all pages in this
1277*38fd1498Szrj      context are full, allocate a new page.  */
1278*38fd1498Szrj   if (entry == NULL || entry->num_free_objects == 0)
1279*38fd1498Szrj     {
1280*38fd1498Szrj       struct page_entry *new_entry;
1281*38fd1498Szrj       new_entry = alloc_page (order);
1282*38fd1498Szrj 
1283*38fd1498Szrj       new_entry->index_by_depth = G.by_depth_in_use;
1284*38fd1498Szrj       push_by_depth (new_entry, 0);
1285*38fd1498Szrj 
1286*38fd1498Szrj       /* We can skip context depths, if we do, make sure we go all the
1287*38fd1498Szrj 	 way to the new depth.  */
1288*38fd1498Szrj       while (new_entry->context_depth >= G.depth_in_use)
1289*38fd1498Szrj 	push_depth (G.by_depth_in_use-1);
1290*38fd1498Szrj 
1291*38fd1498Szrj       /* If this is the only entry, it's also the tail.  If it is not
1292*38fd1498Szrj 	 the only entry, then we must update the PREV pointer of the
1293*38fd1498Szrj 	 ENTRY (G.pages[order]) to point to our new page entry.  */
1294*38fd1498Szrj       if (entry == NULL)
1295*38fd1498Szrj 	G.page_tails[order] = new_entry;
1296*38fd1498Szrj       else
1297*38fd1498Szrj 	entry->prev = new_entry;
1298*38fd1498Szrj 
1299*38fd1498Szrj       /* Put new pages at the head of the page list.  By definition the
1300*38fd1498Szrj 	 entry at the head of the list always has a NULL pointer.  */
1301*38fd1498Szrj       new_entry->next = entry;
1302*38fd1498Szrj       new_entry->prev = NULL;
1303*38fd1498Szrj       entry = new_entry;
1304*38fd1498Szrj       G.pages[order] = new_entry;
1305*38fd1498Szrj 
1306*38fd1498Szrj       /* For a new page, we know the word and bit positions (in the
1307*38fd1498Szrj 	 in_use bitmap) of the first available object -- they're zero.  */
1308*38fd1498Szrj       new_entry->next_bit_hint = 1;
1309*38fd1498Szrj       word = 0;
1310*38fd1498Szrj       bit = 0;
1311*38fd1498Szrj       object_offset = 0;
1312*38fd1498Szrj     }
1313*38fd1498Szrj   else
1314*38fd1498Szrj     {
1315*38fd1498Szrj       /* First try to use the hint left from the previous allocation
1316*38fd1498Szrj 	 to locate a clear bit in the in-use bitmap.  We've made sure
1317*38fd1498Szrj 	 that the one-past-the-end bit is always set, so if the hint
1318*38fd1498Szrj 	 has run over, this test will fail.  */
1319*38fd1498Szrj       unsigned hint = entry->next_bit_hint;
1320*38fd1498Szrj       word = hint / HOST_BITS_PER_LONG;
1321*38fd1498Szrj       bit = hint % HOST_BITS_PER_LONG;
1322*38fd1498Szrj 
1323*38fd1498Szrj       /* If the hint didn't work, scan the bitmap from the beginning.  */
1324*38fd1498Szrj       if ((entry->in_use_p[word] >> bit) & 1)
1325*38fd1498Szrj 	{
1326*38fd1498Szrj 	  word = bit = 0;
1327*38fd1498Szrj 	  while (~entry->in_use_p[word] == 0)
1328*38fd1498Szrj 	    ++word;
1329*38fd1498Szrj 
1330*38fd1498Szrj #if GCC_VERSION >= 3004
1331*38fd1498Szrj 	  bit = __builtin_ctzl (~entry->in_use_p[word]);
1332*38fd1498Szrj #else
1333*38fd1498Szrj 	  while ((entry->in_use_p[word] >> bit) & 1)
1334*38fd1498Szrj 	    ++bit;
1335*38fd1498Szrj #endif
1336*38fd1498Szrj 
1337*38fd1498Szrj 	  hint = word * HOST_BITS_PER_LONG + bit;
1338*38fd1498Szrj 	}
1339*38fd1498Szrj 
1340*38fd1498Szrj       /* Next time, try the next bit.  */
1341*38fd1498Szrj       entry->next_bit_hint = hint + 1;
1342*38fd1498Szrj 
1343*38fd1498Szrj       object_offset = hint * object_size;
1344*38fd1498Szrj     }
1345*38fd1498Szrj 
1346*38fd1498Szrj   /* Set the in-use bit.  */
1347*38fd1498Szrj   entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1348*38fd1498Szrj 
1349*38fd1498Szrj   /* Keep a running total of the number of free objects.  If this page
1350*38fd1498Szrj      fills up, we may have to move it to the end of the list if the
1351*38fd1498Szrj      next page isn't full.  If the next page is full, all subsequent
1352*38fd1498Szrj      pages are full, so there's no need to move it.  */
1353*38fd1498Szrj   if (--entry->num_free_objects == 0
1354*38fd1498Szrj       && entry->next != NULL
1355*38fd1498Szrj       && entry->next->num_free_objects > 0)
1356*38fd1498Szrj     {
1357*38fd1498Szrj       /* We have a new head for the list.  */
1358*38fd1498Szrj       G.pages[order] = entry->next;
1359*38fd1498Szrj 
1360*38fd1498Szrj       /* We are moving ENTRY to the end of the page table list.
1361*38fd1498Szrj 	 The new page at the head of the list will have NULL in
1362*38fd1498Szrj 	 its PREV field and ENTRY will have NULL in its NEXT field.  */
1363*38fd1498Szrj       entry->next->prev = NULL;
1364*38fd1498Szrj       entry->next = NULL;
1365*38fd1498Szrj 
1366*38fd1498Szrj       /* Append ENTRY to the tail of the list.  */
1367*38fd1498Szrj       entry->prev = G.page_tails[order];
1368*38fd1498Szrj       G.page_tails[order]->next = entry;
1369*38fd1498Szrj       G.page_tails[order] = entry;
1370*38fd1498Szrj     }
1371*38fd1498Szrj 
1372*38fd1498Szrj   /* Calculate the object's address.  */
1373*38fd1498Szrj   result = entry->page + object_offset;
1374*38fd1498Szrj   if (GATHER_STATISTICS)
1375*38fd1498Szrj     ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1376*38fd1498Szrj 			 result FINAL_PASS_MEM_STAT);
1377*38fd1498Szrj 
1378*38fd1498Szrj #ifdef ENABLE_GC_CHECKING
1379*38fd1498Szrj   /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1380*38fd1498Szrj      exact same semantics in presence of memory bugs, regardless of
1381*38fd1498Szrj      ENABLE_VALGRIND_CHECKING.  We override this request below.  Drop the
1382*38fd1498Szrj      handle to avoid handle leak.  */
1383*38fd1498Szrj   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1384*38fd1498Szrj 
1385*38fd1498Szrj   /* `Poison' the entire allocated object, including any padding at
1386*38fd1498Szrj      the end.  */
1387*38fd1498Szrj   memset (result, 0xaf, object_size);
1388*38fd1498Szrj 
1389*38fd1498Szrj   /* Make the bytes after the end of the object unaccessible.  Discard the
1390*38fd1498Szrj      handle to avoid handle leak.  */
1391*38fd1498Szrj   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1392*38fd1498Szrj 						object_size - size));
1393*38fd1498Szrj #endif
1394*38fd1498Szrj 
1395*38fd1498Szrj   /* Tell Valgrind that the memory is there, but its content isn't
1396*38fd1498Szrj      defined.  The bytes at the end of the object are still marked
1397*38fd1498Szrj      unaccessible.  */
1398*38fd1498Szrj   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1399*38fd1498Szrj 
1400*38fd1498Szrj   /* Keep track of how many bytes are being allocated.  This
1401*38fd1498Szrj      information is used in deciding when to collect.  */
1402*38fd1498Szrj   G.allocated += object_size;
1403*38fd1498Szrj 
1404*38fd1498Szrj   /* For timevar statistics.  */
1405*38fd1498Szrj   timevar_ggc_mem_total += object_size;
1406*38fd1498Szrj 
1407*38fd1498Szrj   if (f)
1408*38fd1498Szrj     add_finalizer (result, f, s, n);
1409*38fd1498Szrj 
1410*38fd1498Szrj   if (GATHER_STATISTICS)
1411*38fd1498Szrj     {
1412*38fd1498Szrj       size_t overhead = object_size - size;
1413*38fd1498Szrj 
1414*38fd1498Szrj       G.stats.total_overhead += overhead;
1415*38fd1498Szrj       G.stats.total_allocated += object_size;
1416*38fd1498Szrj       G.stats.total_overhead_per_order[order] += overhead;
1417*38fd1498Szrj       G.stats.total_allocated_per_order[order] += object_size;
1418*38fd1498Szrj 
1419*38fd1498Szrj       if (size <= 32)
1420*38fd1498Szrj 	{
1421*38fd1498Szrj 	  G.stats.total_overhead_under32 += overhead;
1422*38fd1498Szrj 	  G.stats.total_allocated_under32 += object_size;
1423*38fd1498Szrj 	}
1424*38fd1498Szrj       if (size <= 64)
1425*38fd1498Szrj 	{
1426*38fd1498Szrj 	  G.stats.total_overhead_under64 += overhead;
1427*38fd1498Szrj 	  G.stats.total_allocated_under64 += object_size;
1428*38fd1498Szrj 	}
1429*38fd1498Szrj       if (size <= 128)
1430*38fd1498Szrj 	{
1431*38fd1498Szrj 	  G.stats.total_overhead_under128 += overhead;
1432*38fd1498Szrj 	  G.stats.total_allocated_under128 += object_size;
1433*38fd1498Szrj 	}
1434*38fd1498Szrj     }
1435*38fd1498Szrj 
1436*38fd1498Szrj   if (GGC_DEBUG_LEVEL >= 3)
1437*38fd1498Szrj     fprintf (G.debug_file,
1438*38fd1498Szrj 	     "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1439*38fd1498Szrj 	     (unsigned long) size, (unsigned long) object_size, result,
1440*38fd1498Szrj 	     (void *) entry);
1441*38fd1498Szrj 
1442*38fd1498Szrj   return result;
1443*38fd1498Szrj }
1444*38fd1498Szrj 
1445*38fd1498Szrj /* Mark function for strings.  */
1446*38fd1498Szrj 
1447*38fd1498Szrj void
gt_ggc_m_S(const void * p)1448*38fd1498Szrj gt_ggc_m_S (const void *p)
1449*38fd1498Szrj {
1450*38fd1498Szrj   page_entry *entry;
1451*38fd1498Szrj   unsigned bit, word;
1452*38fd1498Szrj   unsigned long mask;
1453*38fd1498Szrj   unsigned long offset;
1454*38fd1498Szrj 
1455*38fd1498Szrj   if (!p)
1456*38fd1498Szrj     return;
1457*38fd1498Szrj 
1458*38fd1498Szrj   /* Look up the page on which the object is alloced.  If it was not
1459*38fd1498Szrj      GC allocated, gracefully bail out.  */
1460*38fd1498Szrj   entry = safe_lookup_page_table_entry (p);
1461*38fd1498Szrj   if (!entry)
1462*38fd1498Szrj     return;
1463*38fd1498Szrj 
1464*38fd1498Szrj   /* Calculate the index of the object on the page; this is its bit
1465*38fd1498Szrj      position in the in_use_p bitmap.  Note that because a char* might
1466*38fd1498Szrj      point to the middle of an object, we need special code here to
1467*38fd1498Szrj      make sure P points to the start of an object.  */
1468*38fd1498Szrj   offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1469*38fd1498Szrj   if (offset)
1470*38fd1498Szrj     {
1471*38fd1498Szrj       /* Here we've seen a char* which does not point to the beginning
1472*38fd1498Szrj 	 of an allocated object.  We assume it points to the middle of
1473*38fd1498Szrj 	 a STRING_CST.  */
1474*38fd1498Szrj       gcc_assert (offset == offsetof (struct tree_string, str));
1475*38fd1498Szrj       p = ((const char *) p) - offset;
1476*38fd1498Szrj       gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1477*38fd1498Szrj       return;
1478*38fd1498Szrj     }
1479*38fd1498Szrj 
1480*38fd1498Szrj   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1481*38fd1498Szrj   word = bit / HOST_BITS_PER_LONG;
1482*38fd1498Szrj   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1483*38fd1498Szrj 
1484*38fd1498Szrj   /* If the bit was previously set, skip it.  */
1485*38fd1498Szrj   if (entry->in_use_p[word] & mask)
1486*38fd1498Szrj     return;
1487*38fd1498Szrj 
1488*38fd1498Szrj   /* Otherwise set it, and decrement the free object count.  */
1489*38fd1498Szrj   entry->in_use_p[word] |= mask;
1490*38fd1498Szrj   entry->num_free_objects -= 1;
1491*38fd1498Szrj 
1492*38fd1498Szrj   if (GGC_DEBUG_LEVEL >= 4)
1493*38fd1498Szrj     fprintf (G.debug_file, "Marking %p\n", p);
1494*38fd1498Szrj 
1495*38fd1498Szrj   return;
1496*38fd1498Szrj }
1497*38fd1498Szrj 
1498*38fd1498Szrj 
1499*38fd1498Szrj /* User-callable entry points for marking string X.  */
1500*38fd1498Szrj 
1501*38fd1498Szrj void
gt_ggc_mx(const char * & x)1502*38fd1498Szrj gt_ggc_mx (const char *& x)
1503*38fd1498Szrj {
1504*38fd1498Szrj   gt_ggc_m_S (x);
1505*38fd1498Szrj }
1506*38fd1498Szrj 
1507*38fd1498Szrj void
gt_ggc_mx(unsigned char * & x)1508*38fd1498Szrj gt_ggc_mx (unsigned char *& x)
1509*38fd1498Szrj {
1510*38fd1498Szrj   gt_ggc_m_S (x);
1511*38fd1498Szrj }
1512*38fd1498Szrj 
1513*38fd1498Szrj void
gt_ggc_mx(unsigned char & x ATTRIBUTE_UNUSED)1514*38fd1498Szrj gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED)
1515*38fd1498Szrj {
1516*38fd1498Szrj }
1517*38fd1498Szrj 
1518*38fd1498Szrj /* If P is not marked, marks it and return false.  Otherwise return true.
1519*38fd1498Szrj    P must have been allocated by the GC allocator; it mustn't point to
1520*38fd1498Szrj    static objects, stack variables, or memory allocated with malloc.  */
1521*38fd1498Szrj 
1522*38fd1498Szrj int
ggc_set_mark(const void * p)1523*38fd1498Szrj ggc_set_mark (const void *p)
1524*38fd1498Szrj {
1525*38fd1498Szrj   page_entry *entry;
1526*38fd1498Szrj   unsigned bit, word;
1527*38fd1498Szrj   unsigned long mask;
1528*38fd1498Szrj 
1529*38fd1498Szrj   /* Look up the page on which the object is alloced.  If the object
1530*38fd1498Szrj      wasn't allocated by the collector, we'll probably die.  */
1531*38fd1498Szrj   entry = lookup_page_table_entry (p);
1532*38fd1498Szrj   gcc_assert (entry);
1533*38fd1498Szrj 
1534*38fd1498Szrj   /* Calculate the index of the object on the page; this is its bit
1535*38fd1498Szrj      position in the in_use_p bitmap.  */
1536*38fd1498Szrj   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1537*38fd1498Szrj   word = bit / HOST_BITS_PER_LONG;
1538*38fd1498Szrj   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1539*38fd1498Szrj 
1540*38fd1498Szrj   /* If the bit was previously set, skip it.  */
1541*38fd1498Szrj   if (entry->in_use_p[word] & mask)
1542*38fd1498Szrj     return 1;
1543*38fd1498Szrj 
1544*38fd1498Szrj   /* Otherwise set it, and decrement the free object count.  */
1545*38fd1498Szrj   entry->in_use_p[word] |= mask;
1546*38fd1498Szrj   entry->num_free_objects -= 1;
1547*38fd1498Szrj 
1548*38fd1498Szrj   if (GGC_DEBUG_LEVEL >= 4)
1549*38fd1498Szrj     fprintf (G.debug_file, "Marking %p\n", p);
1550*38fd1498Szrj 
1551*38fd1498Szrj   return 0;
1552*38fd1498Szrj }
1553*38fd1498Szrj 
1554*38fd1498Szrj /* Return 1 if P has been marked, zero otherwise.
1555*38fd1498Szrj    P must have been allocated by the GC allocator; it mustn't point to
1556*38fd1498Szrj    static objects, stack variables, or memory allocated with malloc.  */
1557*38fd1498Szrj 
1558*38fd1498Szrj int
ggc_marked_p(const void * p)1559*38fd1498Szrj ggc_marked_p (const void *p)
1560*38fd1498Szrj {
1561*38fd1498Szrj   page_entry *entry;
1562*38fd1498Szrj   unsigned bit, word;
1563*38fd1498Szrj   unsigned long mask;
1564*38fd1498Szrj 
1565*38fd1498Szrj   /* Look up the page on which the object is alloced.  If the object
1566*38fd1498Szrj      wasn't allocated by the collector, we'll probably die.  */
1567*38fd1498Szrj   entry = lookup_page_table_entry (p);
1568*38fd1498Szrj   gcc_assert (entry);
1569*38fd1498Szrj 
1570*38fd1498Szrj   /* Calculate the index of the object on the page; this is its bit
1571*38fd1498Szrj      position in the in_use_p bitmap.  */
1572*38fd1498Szrj   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1573*38fd1498Szrj   word = bit / HOST_BITS_PER_LONG;
1574*38fd1498Szrj   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1575*38fd1498Szrj 
1576*38fd1498Szrj   return (entry->in_use_p[word] & mask) != 0;
1577*38fd1498Szrj }
1578*38fd1498Szrj 
1579*38fd1498Szrj /* Return the size of the gc-able object P.  */
1580*38fd1498Szrj 
1581*38fd1498Szrj size_t
ggc_get_size(const void * p)1582*38fd1498Szrj ggc_get_size (const void *p)
1583*38fd1498Szrj {
1584*38fd1498Szrj   page_entry *pe = lookup_page_table_entry (p);
1585*38fd1498Szrj   return OBJECT_SIZE (pe->order);
1586*38fd1498Szrj }
1587*38fd1498Szrj 
1588*38fd1498Szrj /* Release the memory for object P.  */
1589*38fd1498Szrj 
1590*38fd1498Szrj void
ggc_free(void * p)1591*38fd1498Szrj ggc_free (void *p)
1592*38fd1498Szrj {
1593*38fd1498Szrj   if (in_gc)
1594*38fd1498Szrj     return;
1595*38fd1498Szrj 
1596*38fd1498Szrj   page_entry *pe = lookup_page_table_entry (p);
1597*38fd1498Szrj   size_t order = pe->order;
1598*38fd1498Szrj   size_t size = OBJECT_SIZE (order);
1599*38fd1498Szrj 
1600*38fd1498Szrj   if (GATHER_STATISTICS)
1601*38fd1498Szrj     ggc_free_overhead (p);
1602*38fd1498Szrj 
1603*38fd1498Szrj   if (GGC_DEBUG_LEVEL >= 3)
1604*38fd1498Szrj     fprintf (G.debug_file,
1605*38fd1498Szrj 	     "Freeing object, actual size=%lu, at %p on %p\n",
1606*38fd1498Szrj 	     (unsigned long) size, p, (void *) pe);
1607*38fd1498Szrj 
1608*38fd1498Szrj #ifdef ENABLE_GC_CHECKING
1609*38fd1498Szrj   /* Poison the data, to indicate the data is garbage.  */
1610*38fd1498Szrj   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1611*38fd1498Szrj   memset (p, 0xa5, size);
1612*38fd1498Szrj #endif
1613*38fd1498Szrj   /* Let valgrind know the object is free.  */
1614*38fd1498Szrj   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1615*38fd1498Szrj 
1616*38fd1498Szrj #ifdef ENABLE_GC_ALWAYS_COLLECT
1617*38fd1498Szrj   /* In the completely-anal-checking mode, we do *not* immediately free
1618*38fd1498Szrj      the data, but instead verify that the data is *actually* not
1619*38fd1498Szrj      reachable the next time we collect.  */
1620*38fd1498Szrj   {
1621*38fd1498Szrj     struct free_object *fo = XNEW (struct free_object);
1622*38fd1498Szrj     fo->object = p;
1623*38fd1498Szrj     fo->next = G.free_object_list;
1624*38fd1498Szrj     G.free_object_list = fo;
1625*38fd1498Szrj   }
1626*38fd1498Szrj #else
1627*38fd1498Szrj   {
1628*38fd1498Szrj     unsigned int bit_offset, word, bit;
1629*38fd1498Szrj 
1630*38fd1498Szrj     G.allocated -= size;
1631*38fd1498Szrj 
1632*38fd1498Szrj     /* Mark the object not-in-use.  */
1633*38fd1498Szrj     bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1634*38fd1498Szrj     word = bit_offset / HOST_BITS_PER_LONG;
1635*38fd1498Szrj     bit = bit_offset % HOST_BITS_PER_LONG;
1636*38fd1498Szrj     pe->in_use_p[word] &= ~(1UL << bit);
1637*38fd1498Szrj 
1638*38fd1498Szrj     if (pe->num_free_objects++ == 0)
1639*38fd1498Szrj       {
1640*38fd1498Szrj 	page_entry *p, *q;
1641*38fd1498Szrj 
1642*38fd1498Szrj 	/* If the page is completely full, then it's supposed to
1643*38fd1498Szrj 	   be after all pages that aren't.  Since we've freed one
1644*38fd1498Szrj 	   object from a page that was full, we need to move the
1645*38fd1498Szrj 	   page to the head of the list.
1646*38fd1498Szrj 
1647*38fd1498Szrj 	   PE is the node we want to move.  Q is the previous node
1648*38fd1498Szrj 	   and P is the next node in the list.  */
1649*38fd1498Szrj 	q = pe->prev;
1650*38fd1498Szrj 	if (q && q->num_free_objects == 0)
1651*38fd1498Szrj 	  {
1652*38fd1498Szrj 	    p = pe->next;
1653*38fd1498Szrj 
1654*38fd1498Szrj 	    q->next = p;
1655*38fd1498Szrj 
1656*38fd1498Szrj 	    /* If PE was at the end of the list, then Q becomes the
1657*38fd1498Szrj 	       new end of the list.  If PE was not the end of the
1658*38fd1498Szrj 	       list, then we need to update the PREV field for P.  */
1659*38fd1498Szrj 	    if (!p)
1660*38fd1498Szrj 	      G.page_tails[order] = q;
1661*38fd1498Szrj 	    else
1662*38fd1498Szrj 	      p->prev = q;
1663*38fd1498Szrj 
1664*38fd1498Szrj 	    /* Move PE to the head of the list.  */
1665*38fd1498Szrj 	    pe->next = G.pages[order];
1666*38fd1498Szrj 	    pe->prev = NULL;
1667*38fd1498Szrj 	    G.pages[order]->prev = pe;
1668*38fd1498Szrj 	    G.pages[order] = pe;
1669*38fd1498Szrj 	  }
1670*38fd1498Szrj 
1671*38fd1498Szrj 	/* Reset the hint bit to point to the only free object.  */
1672*38fd1498Szrj 	pe->next_bit_hint = bit_offset;
1673*38fd1498Szrj       }
1674*38fd1498Szrj   }
1675*38fd1498Szrj #endif
1676*38fd1498Szrj }
1677*38fd1498Szrj 
1678*38fd1498Szrj /* Subroutine of init_ggc which computes the pair of numbers used to
1679*38fd1498Szrj    perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1680*38fd1498Szrj 
1681*38fd1498Szrj    This algorithm is taken from Granlund and Montgomery's paper
1682*38fd1498Szrj    "Division by Invariant Integers using Multiplication"
1683*38fd1498Szrj    (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1684*38fd1498Szrj    constants).  */
1685*38fd1498Szrj 
1686*38fd1498Szrj static void
compute_inverse(unsigned order)1687*38fd1498Szrj compute_inverse (unsigned order)
1688*38fd1498Szrj {
1689*38fd1498Szrj   size_t size, inv;
1690*38fd1498Szrj   unsigned int e;
1691*38fd1498Szrj 
1692*38fd1498Szrj   size = OBJECT_SIZE (order);
1693*38fd1498Szrj   e = 0;
1694*38fd1498Szrj   while (size % 2 == 0)
1695*38fd1498Szrj     {
1696*38fd1498Szrj       e++;
1697*38fd1498Szrj       size >>= 1;
1698*38fd1498Szrj     }
1699*38fd1498Szrj 
1700*38fd1498Szrj   inv = size;
1701*38fd1498Szrj   while (inv * size != 1)
1702*38fd1498Szrj     inv = inv * (2 - inv*size);
1703*38fd1498Szrj 
1704*38fd1498Szrj   DIV_MULT (order) = inv;
1705*38fd1498Szrj   DIV_SHIFT (order) = e;
1706*38fd1498Szrj }
1707*38fd1498Szrj 
1708*38fd1498Szrj /* Initialize the ggc-mmap allocator.  */
1709*38fd1498Szrj void
init_ggc(void)1710*38fd1498Szrj init_ggc (void)
1711*38fd1498Szrj {
1712*38fd1498Szrj   static bool init_p = false;
1713*38fd1498Szrj   unsigned order;
1714*38fd1498Szrj 
1715*38fd1498Szrj   if (init_p)
1716*38fd1498Szrj     return;
1717*38fd1498Szrj   init_p = true;
1718*38fd1498Szrj 
1719*38fd1498Szrj   G.pagesize = getpagesize ();
1720*38fd1498Szrj   G.lg_pagesize = exact_log2 (G.pagesize);
1721*38fd1498Szrj 
1722*38fd1498Szrj #ifdef HAVE_MMAP_DEV_ZERO
1723*38fd1498Szrj   G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1724*38fd1498Szrj   if (G.dev_zero_fd == -1)
1725*38fd1498Szrj     internal_error ("open /dev/zero: %m");
1726*38fd1498Szrj #endif
1727*38fd1498Szrj 
1728*38fd1498Szrj #if 0
1729*38fd1498Szrj   G.debug_file = fopen ("ggc-mmap.debug", "w");
1730*38fd1498Szrj #else
1731*38fd1498Szrj   G.debug_file = stdout;
1732*38fd1498Szrj #endif
1733*38fd1498Szrj 
1734*38fd1498Szrj #ifdef USING_MMAP
1735*38fd1498Szrj   /* StunOS has an amazing off-by-one error for the first mmap allocation
1736*38fd1498Szrj      after fiddling with RLIMIT_STACK.  The result, as hard as it is to
1737*38fd1498Szrj      believe, is an unaligned page allocation, which would cause us to
1738*38fd1498Szrj      hork badly if we tried to use it.  */
1739*38fd1498Szrj   {
1740*38fd1498Szrj     char *p = alloc_anon (NULL, G.pagesize, true);
1741*38fd1498Szrj     struct page_entry *e;
1742*38fd1498Szrj     if ((uintptr_t)p & (G.pagesize - 1))
1743*38fd1498Szrj       {
1744*38fd1498Szrj 	/* How losing.  Discard this one and try another.  If we still
1745*38fd1498Szrj 	   can't get something useful, give up.  */
1746*38fd1498Szrj 
1747*38fd1498Szrj 	p = alloc_anon (NULL, G.pagesize, true);
1748*38fd1498Szrj 	gcc_assert (!((uintptr_t)p & (G.pagesize - 1)));
1749*38fd1498Szrj       }
1750*38fd1498Szrj 
1751*38fd1498Szrj     /* We have a good page, might as well hold onto it...  */
1752*38fd1498Szrj     e = XCNEW (struct page_entry);
1753*38fd1498Szrj     e->bytes = G.pagesize;
1754*38fd1498Szrj     e->page = p;
1755*38fd1498Szrj     e->next = G.free_pages;
1756*38fd1498Szrj     G.free_pages = e;
1757*38fd1498Szrj   }
1758*38fd1498Szrj #endif
1759*38fd1498Szrj 
1760*38fd1498Szrj   /* Initialize the object size table.  */
1761*38fd1498Szrj   for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1762*38fd1498Szrj     object_size_table[order] = (size_t) 1 << order;
1763*38fd1498Szrj   for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1764*38fd1498Szrj     {
1765*38fd1498Szrj       size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1766*38fd1498Szrj 
1767*38fd1498Szrj       /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1768*38fd1498Szrj 	 so that we're sure of getting aligned memory.  */
1769*38fd1498Szrj       s = ROUND_UP (s, MAX_ALIGNMENT);
1770*38fd1498Szrj       object_size_table[order] = s;
1771*38fd1498Szrj     }
1772*38fd1498Szrj 
1773*38fd1498Szrj   /* Initialize the objects-per-page and inverse tables.  */
1774*38fd1498Szrj   for (order = 0; order < NUM_ORDERS; ++order)
1775*38fd1498Szrj     {
1776*38fd1498Szrj       objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1777*38fd1498Szrj       if (objects_per_page_table[order] == 0)
1778*38fd1498Szrj 	objects_per_page_table[order] = 1;
1779*38fd1498Szrj       compute_inverse (order);
1780*38fd1498Szrj     }
1781*38fd1498Szrj 
1782*38fd1498Szrj   /* Reset the size_lookup array to put appropriately sized objects in
1783*38fd1498Szrj      the special orders.  All objects bigger than the previous power
1784*38fd1498Szrj      of two, but no greater than the special size, should go in the
1785*38fd1498Szrj      new order.  */
1786*38fd1498Szrj   for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1787*38fd1498Szrj     {
1788*38fd1498Szrj       int o;
1789*38fd1498Szrj       int i;
1790*38fd1498Szrj 
1791*38fd1498Szrj       i = OBJECT_SIZE (order);
1792*38fd1498Szrj       if (i >= NUM_SIZE_LOOKUP)
1793*38fd1498Szrj 	continue;
1794*38fd1498Szrj 
1795*38fd1498Szrj       for (o = size_lookup[i]; o == size_lookup [i]; --i)
1796*38fd1498Szrj 	size_lookup[i] = order;
1797*38fd1498Szrj     }
1798*38fd1498Szrj 
1799*38fd1498Szrj   G.depth_in_use = 0;
1800*38fd1498Szrj   G.depth_max = 10;
1801*38fd1498Szrj   G.depth = XNEWVEC (unsigned int, G.depth_max);
1802*38fd1498Szrj 
1803*38fd1498Szrj   G.by_depth_in_use = 0;
1804*38fd1498Szrj   G.by_depth_max = INITIAL_PTE_COUNT;
1805*38fd1498Szrj   G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1806*38fd1498Szrj   G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1807*38fd1498Szrj 
1808*38fd1498Szrj   /* Allocate space for the depth 0 finalizers.  */
1809*38fd1498Szrj   G.finalizers.safe_push (vNULL);
1810*38fd1498Szrj   G.vec_finalizers.safe_push (vNULL);
1811*38fd1498Szrj   gcc_assert (G.finalizers.length() == 1);
1812*38fd1498Szrj }
1813*38fd1498Szrj 
1814*38fd1498Szrj /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1815*38fd1498Szrj    reflects reality.  Recalculate NUM_FREE_OBJECTS as well.  */
1816*38fd1498Szrj 
1817*38fd1498Szrj static void
ggc_recalculate_in_use_p(page_entry * p)1818*38fd1498Szrj ggc_recalculate_in_use_p (page_entry *p)
1819*38fd1498Szrj {
1820*38fd1498Szrj   unsigned int i;
1821*38fd1498Szrj   size_t num_objects;
1822*38fd1498Szrj 
1823*38fd1498Szrj   /* Because the past-the-end bit in in_use_p is always set, we
1824*38fd1498Szrj      pretend there is one additional object.  */
1825*38fd1498Szrj   num_objects = OBJECTS_IN_PAGE (p) + 1;
1826*38fd1498Szrj 
1827*38fd1498Szrj   /* Reset the free object count.  */
1828*38fd1498Szrj   p->num_free_objects = num_objects;
1829*38fd1498Szrj 
1830*38fd1498Szrj   /* Combine the IN_USE_P and SAVE_IN_USE_P arrays.  */
1831*38fd1498Szrj   for (i = 0;
1832*38fd1498Szrj        i < CEIL (BITMAP_SIZE (num_objects),
1833*38fd1498Szrj 		 sizeof (*p->in_use_p));
1834*38fd1498Szrj        ++i)
1835*38fd1498Szrj     {
1836*38fd1498Szrj       unsigned long j;
1837*38fd1498Szrj 
1838*38fd1498Szrj       /* Something is in use if it is marked, or if it was in use in a
1839*38fd1498Szrj 	 context further down the context stack.  */
1840*38fd1498Szrj       p->in_use_p[i] |= save_in_use_p (p)[i];
1841*38fd1498Szrj 
1842*38fd1498Szrj       /* Decrement the free object count for every object allocated.  */
1843*38fd1498Szrj       for (j = p->in_use_p[i]; j; j >>= 1)
1844*38fd1498Szrj 	p->num_free_objects -= (j & 1);
1845*38fd1498Szrj     }
1846*38fd1498Szrj 
1847*38fd1498Szrj   gcc_assert (p->num_free_objects < num_objects);
1848*38fd1498Szrj }
1849*38fd1498Szrj 
1850*38fd1498Szrj /* Unmark all objects.  */
1851*38fd1498Szrj 
1852*38fd1498Szrj static void
clear_marks(void)1853*38fd1498Szrj clear_marks (void)
1854*38fd1498Szrj {
1855*38fd1498Szrj   unsigned order;
1856*38fd1498Szrj 
1857*38fd1498Szrj   for (order = 2; order < NUM_ORDERS; order++)
1858*38fd1498Szrj     {
1859*38fd1498Szrj       page_entry *p;
1860*38fd1498Szrj 
1861*38fd1498Szrj       for (p = G.pages[order]; p != NULL; p = p->next)
1862*38fd1498Szrj 	{
1863*38fd1498Szrj 	  size_t num_objects = OBJECTS_IN_PAGE (p);
1864*38fd1498Szrj 	  size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1865*38fd1498Szrj 
1866*38fd1498Szrj 	  /* The data should be page-aligned.  */
1867*38fd1498Szrj 	  gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1)));
1868*38fd1498Szrj 
1869*38fd1498Szrj 	  /* Pages that aren't in the topmost context are not collected;
1870*38fd1498Szrj 	     nevertheless, we need their in-use bit vectors to store GC
1871*38fd1498Szrj 	     marks.  So, back them up first.  */
1872*38fd1498Szrj 	  if (p->context_depth < G.context_depth)
1873*38fd1498Szrj 	    {
1874*38fd1498Szrj 	      if (! save_in_use_p (p))
1875*38fd1498Szrj 		save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1876*38fd1498Szrj 	      memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1877*38fd1498Szrj 	    }
1878*38fd1498Szrj 
1879*38fd1498Szrj 	  /* Reset reset the number of free objects and clear the
1880*38fd1498Szrj              in-use bits.  These will be adjusted by mark_obj.  */
1881*38fd1498Szrj 	  p->num_free_objects = num_objects;
1882*38fd1498Szrj 	  memset (p->in_use_p, 0, bitmap_size);
1883*38fd1498Szrj 
1884*38fd1498Szrj 	  /* Make sure the one-past-the-end bit is always set.  */
1885*38fd1498Szrj 	  p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1886*38fd1498Szrj 	    = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1887*38fd1498Szrj 	}
1888*38fd1498Szrj     }
1889*38fd1498Szrj }
1890*38fd1498Szrj 
1891*38fd1498Szrj /* Check if any blocks with a registered finalizer have become unmarked. If so
1892*38fd1498Szrj    run the finalizer and unregister it because the block is about to be freed.
1893*38fd1498Szrj    Note that no garantee is made about what order finalizers will run in so
1894*38fd1498Szrj    touching other objects in gc memory is extremely unwise.  */
1895*38fd1498Szrj 
1896*38fd1498Szrj static void
ggc_handle_finalizers()1897*38fd1498Szrj ggc_handle_finalizers ()
1898*38fd1498Szrj {
1899*38fd1498Szrj   unsigned dlen = G.finalizers.length();
1900*38fd1498Szrj   for (unsigned d = G.context_depth; d < dlen; ++d)
1901*38fd1498Szrj     {
1902*38fd1498Szrj       vec<finalizer> &v = G.finalizers[d];
1903*38fd1498Szrj       unsigned length = v.length ();
1904*38fd1498Szrj       for (unsigned int i = 0; i < length;)
1905*38fd1498Szrj 	{
1906*38fd1498Szrj 	  finalizer &f = v[i];
1907*38fd1498Szrj 	  if (!ggc_marked_p (f.addr ()))
1908*38fd1498Szrj 	    {
1909*38fd1498Szrj 	      f.call ();
1910*38fd1498Szrj 	      v.unordered_remove (i);
1911*38fd1498Szrj 	      length--;
1912*38fd1498Szrj 	    }
1913*38fd1498Szrj 	  else
1914*38fd1498Szrj 	    i++;
1915*38fd1498Szrj 	}
1916*38fd1498Szrj     }
1917*38fd1498Szrj 
1918*38fd1498Szrj   gcc_assert (dlen == G.vec_finalizers.length());
1919*38fd1498Szrj   for (unsigned d = G.context_depth; d < dlen; ++d)
1920*38fd1498Szrj     {
1921*38fd1498Szrj       vec<vec_finalizer> &vv = G.vec_finalizers[d];
1922*38fd1498Szrj       unsigned length = vv.length ();
1923*38fd1498Szrj       for (unsigned int i = 0; i < length;)
1924*38fd1498Szrj 	{
1925*38fd1498Szrj 	  vec_finalizer &f = vv[i];
1926*38fd1498Szrj 	  if (!ggc_marked_p (f.addr ()))
1927*38fd1498Szrj 	    {
1928*38fd1498Szrj 	      f.call ();
1929*38fd1498Szrj 	      vv.unordered_remove (i);
1930*38fd1498Szrj 	      length--;
1931*38fd1498Szrj 	    }
1932*38fd1498Szrj 	  else
1933*38fd1498Szrj 	    i++;
1934*38fd1498Szrj 	}
1935*38fd1498Szrj     }
1936*38fd1498Szrj }
1937*38fd1498Szrj 
1938*38fd1498Szrj /* Free all empty pages.  Partially empty pages need no attention
1939*38fd1498Szrj    because the `mark' bit doubles as an `unused' bit.  */
1940*38fd1498Szrj 
1941*38fd1498Szrj static void
sweep_pages(void)1942*38fd1498Szrj sweep_pages (void)
1943*38fd1498Szrj {
1944*38fd1498Szrj   unsigned order;
1945*38fd1498Szrj 
1946*38fd1498Szrj   for (order = 2; order < NUM_ORDERS; order++)
1947*38fd1498Szrj     {
1948*38fd1498Szrj       /* The last page-entry to consider, regardless of entries
1949*38fd1498Szrj 	 placed at the end of the list.  */
1950*38fd1498Szrj       page_entry * const last = G.page_tails[order];
1951*38fd1498Szrj 
1952*38fd1498Szrj       size_t num_objects;
1953*38fd1498Szrj       size_t live_objects;
1954*38fd1498Szrj       page_entry *p, *previous;
1955*38fd1498Szrj       int done;
1956*38fd1498Szrj 
1957*38fd1498Szrj       p = G.pages[order];
1958*38fd1498Szrj       if (p == NULL)
1959*38fd1498Szrj 	continue;
1960*38fd1498Szrj 
1961*38fd1498Szrj       previous = NULL;
1962*38fd1498Szrj       do
1963*38fd1498Szrj 	{
1964*38fd1498Szrj 	  page_entry *next = p->next;
1965*38fd1498Szrj 
1966*38fd1498Szrj 	  /* Loop until all entries have been examined.  */
1967*38fd1498Szrj 	  done = (p == last);
1968*38fd1498Szrj 
1969*38fd1498Szrj 	  num_objects = OBJECTS_IN_PAGE (p);
1970*38fd1498Szrj 
1971*38fd1498Szrj 	  /* Add all live objects on this page to the count of
1972*38fd1498Szrj              allocated memory.  */
1973*38fd1498Szrj 	  live_objects = num_objects - p->num_free_objects;
1974*38fd1498Szrj 
1975*38fd1498Szrj 	  G.allocated += OBJECT_SIZE (order) * live_objects;
1976*38fd1498Szrj 
1977*38fd1498Szrj 	  /* Only objects on pages in the topmost context should get
1978*38fd1498Szrj 	     collected.  */
1979*38fd1498Szrj 	  if (p->context_depth < G.context_depth)
1980*38fd1498Szrj 	    ;
1981*38fd1498Szrj 
1982*38fd1498Szrj 	  /* Remove the page if it's empty.  */
1983*38fd1498Szrj 	  else if (live_objects == 0)
1984*38fd1498Szrj 	    {
1985*38fd1498Szrj 	      /* If P was the first page in the list, then NEXT
1986*38fd1498Szrj 		 becomes the new first page in the list, otherwise
1987*38fd1498Szrj 		 splice P out of the forward pointers.  */
1988*38fd1498Szrj 	      if (! previous)
1989*38fd1498Szrj 		G.pages[order] = next;
1990*38fd1498Szrj 	      else
1991*38fd1498Szrj 		previous->next = next;
1992*38fd1498Szrj 
1993*38fd1498Szrj 	      /* Splice P out of the back pointers too.  */
1994*38fd1498Szrj 	      if (next)
1995*38fd1498Szrj 		next->prev = previous;
1996*38fd1498Szrj 
1997*38fd1498Szrj 	      /* Are we removing the last element?  */
1998*38fd1498Szrj 	      if (p == G.page_tails[order])
1999*38fd1498Szrj 		G.page_tails[order] = previous;
2000*38fd1498Szrj 	      free_page (p);
2001*38fd1498Szrj 	      p = previous;
2002*38fd1498Szrj 	    }
2003*38fd1498Szrj 
2004*38fd1498Szrj 	  /* If the page is full, move it to the end.  */
2005*38fd1498Szrj 	  else if (p->num_free_objects == 0)
2006*38fd1498Szrj 	    {
2007*38fd1498Szrj 	      /* Don't move it if it's already at the end.  */
2008*38fd1498Szrj 	      if (p != G.page_tails[order])
2009*38fd1498Szrj 		{
2010*38fd1498Szrj 		  /* Move p to the end of the list.  */
2011*38fd1498Szrj 		  p->next = NULL;
2012*38fd1498Szrj 		  p->prev = G.page_tails[order];
2013*38fd1498Szrj 		  G.page_tails[order]->next = p;
2014*38fd1498Szrj 
2015*38fd1498Szrj 		  /* Update the tail pointer...  */
2016*38fd1498Szrj 		  G.page_tails[order] = p;
2017*38fd1498Szrj 
2018*38fd1498Szrj 		  /* ... and the head pointer, if necessary.  */
2019*38fd1498Szrj 		  if (! previous)
2020*38fd1498Szrj 		    G.pages[order] = next;
2021*38fd1498Szrj 		  else
2022*38fd1498Szrj 		    previous->next = next;
2023*38fd1498Szrj 
2024*38fd1498Szrj 		  /* And update the backpointer in NEXT if necessary.  */
2025*38fd1498Szrj 		  if (next)
2026*38fd1498Szrj 		    next->prev = previous;
2027*38fd1498Szrj 
2028*38fd1498Szrj 		  p = previous;
2029*38fd1498Szrj 		}
2030*38fd1498Szrj 	    }
2031*38fd1498Szrj 
2032*38fd1498Szrj 	  /* If we've fallen through to here, it's a page in the
2033*38fd1498Szrj 	     topmost context that is neither full nor empty.  Such a
2034*38fd1498Szrj 	     page must precede pages at lesser context depth in the
2035*38fd1498Szrj 	     list, so move it to the head.  */
2036*38fd1498Szrj 	  else if (p != G.pages[order])
2037*38fd1498Szrj 	    {
2038*38fd1498Szrj 	      previous->next = p->next;
2039*38fd1498Szrj 
2040*38fd1498Szrj 	      /* Update the backchain in the next node if it exists.  */
2041*38fd1498Szrj 	      if (p->next)
2042*38fd1498Szrj 		p->next->prev = previous;
2043*38fd1498Szrj 
2044*38fd1498Szrj 	      /* Move P to the head of the list.  */
2045*38fd1498Szrj 	      p->next = G.pages[order];
2046*38fd1498Szrj 	      p->prev = NULL;
2047*38fd1498Szrj 	      G.pages[order]->prev = p;
2048*38fd1498Szrj 
2049*38fd1498Szrj 	      /* Update the head pointer.  */
2050*38fd1498Szrj 	      G.pages[order] = p;
2051*38fd1498Szrj 
2052*38fd1498Szrj 	      /* Are we moving the last element?  */
2053*38fd1498Szrj 	      if (G.page_tails[order] == p)
2054*38fd1498Szrj 	        G.page_tails[order] = previous;
2055*38fd1498Szrj 	      p = previous;
2056*38fd1498Szrj 	    }
2057*38fd1498Szrj 
2058*38fd1498Szrj 	  previous = p;
2059*38fd1498Szrj 	  p = next;
2060*38fd1498Szrj 	}
2061*38fd1498Szrj       while (! done);
2062*38fd1498Szrj 
2063*38fd1498Szrj       /* Now, restore the in_use_p vectors for any pages from contexts
2064*38fd1498Szrj          other than the current one.  */
2065*38fd1498Szrj       for (p = G.pages[order]; p; p = p->next)
2066*38fd1498Szrj 	if (p->context_depth != G.context_depth)
2067*38fd1498Szrj 	  ggc_recalculate_in_use_p (p);
2068*38fd1498Szrj     }
2069*38fd1498Szrj }
2070*38fd1498Szrj 
2071*38fd1498Szrj #ifdef ENABLE_GC_CHECKING
2072*38fd1498Szrj /* Clobber all free objects.  */
2073*38fd1498Szrj 
2074*38fd1498Szrj static void
poison_pages(void)2075*38fd1498Szrj poison_pages (void)
2076*38fd1498Szrj {
2077*38fd1498Szrj   unsigned order;
2078*38fd1498Szrj 
2079*38fd1498Szrj   for (order = 2; order < NUM_ORDERS; order++)
2080*38fd1498Szrj     {
2081*38fd1498Szrj       size_t size = OBJECT_SIZE (order);
2082*38fd1498Szrj       page_entry *p;
2083*38fd1498Szrj 
2084*38fd1498Szrj       for (p = G.pages[order]; p != NULL; p = p->next)
2085*38fd1498Szrj 	{
2086*38fd1498Szrj 	  size_t num_objects;
2087*38fd1498Szrj 	  size_t i;
2088*38fd1498Szrj 
2089*38fd1498Szrj 	  if (p->context_depth != G.context_depth)
2090*38fd1498Szrj 	    /* Since we don't do any collection for pages in pushed
2091*38fd1498Szrj 	       contexts, there's no need to do any poisoning.  And
2092*38fd1498Szrj 	       besides, the IN_USE_P array isn't valid until we pop
2093*38fd1498Szrj 	       contexts.  */
2094*38fd1498Szrj 	    continue;
2095*38fd1498Szrj 
2096*38fd1498Szrj 	  num_objects = OBJECTS_IN_PAGE (p);
2097*38fd1498Szrj 	  for (i = 0; i < num_objects; i++)
2098*38fd1498Szrj 	    {
2099*38fd1498Szrj 	      size_t word, bit;
2100*38fd1498Szrj 	      word = i / HOST_BITS_PER_LONG;
2101*38fd1498Szrj 	      bit = i % HOST_BITS_PER_LONG;
2102*38fd1498Szrj 	      if (((p->in_use_p[word] >> bit) & 1) == 0)
2103*38fd1498Szrj 		{
2104*38fd1498Szrj 		  char *object = p->page + i * size;
2105*38fd1498Szrj 
2106*38fd1498Szrj 		  /* Keep poison-by-write when we expect to use Valgrind,
2107*38fd1498Szrj 		     so the exact same memory semantics is kept, in case
2108*38fd1498Szrj 		     there are memory errors.  We override this request
2109*38fd1498Szrj 		     below.  */
2110*38fd1498Szrj 		  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
2111*38fd1498Szrj 								 size));
2112*38fd1498Szrj 		  memset (object, 0xa5, size);
2113*38fd1498Szrj 
2114*38fd1498Szrj 		  /* Drop the handle to avoid handle leak.  */
2115*38fd1498Szrj 		  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
2116*38fd1498Szrj 		}
2117*38fd1498Szrj 	    }
2118*38fd1498Szrj 	}
2119*38fd1498Szrj     }
2120*38fd1498Szrj }
2121*38fd1498Szrj #else
2122*38fd1498Szrj #define poison_pages()
2123*38fd1498Szrj #endif
2124*38fd1498Szrj 
2125*38fd1498Szrj #ifdef ENABLE_GC_ALWAYS_COLLECT
2126*38fd1498Szrj /* Validate that the reportedly free objects actually are.  */
2127*38fd1498Szrj 
2128*38fd1498Szrj static void
validate_free_objects(void)2129*38fd1498Szrj validate_free_objects (void)
2130*38fd1498Szrj {
2131*38fd1498Szrj   struct free_object *f, *next, *still_free = NULL;
2132*38fd1498Szrj 
2133*38fd1498Szrj   for (f = G.free_object_list; f ; f = next)
2134*38fd1498Szrj     {
2135*38fd1498Szrj       page_entry *pe = lookup_page_table_entry (f->object);
2136*38fd1498Szrj       size_t bit, word;
2137*38fd1498Szrj 
2138*38fd1498Szrj       bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
2139*38fd1498Szrj       word = bit / HOST_BITS_PER_LONG;
2140*38fd1498Szrj       bit = bit % HOST_BITS_PER_LONG;
2141*38fd1498Szrj       next = f->next;
2142*38fd1498Szrj 
2143*38fd1498Szrj       /* Make certain it isn't visible from any root.  Notice that we
2144*38fd1498Szrj 	 do this check before sweep_pages merges save_in_use_p.  */
2145*38fd1498Szrj       gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
2146*38fd1498Szrj 
2147*38fd1498Szrj       /* If the object comes from an outer context, then retain the
2148*38fd1498Szrj 	 free_object entry, so that we can verify that the address
2149*38fd1498Szrj 	 isn't live on the stack in some outer context.  */
2150*38fd1498Szrj       if (pe->context_depth != G.context_depth)
2151*38fd1498Szrj 	{
2152*38fd1498Szrj 	  f->next = still_free;
2153*38fd1498Szrj 	  still_free = f;
2154*38fd1498Szrj 	}
2155*38fd1498Szrj       else
2156*38fd1498Szrj 	free (f);
2157*38fd1498Szrj     }
2158*38fd1498Szrj 
2159*38fd1498Szrj   G.free_object_list = still_free;
2160*38fd1498Szrj }
2161*38fd1498Szrj #else
2162*38fd1498Szrj #define validate_free_objects()
2163*38fd1498Szrj #endif
2164*38fd1498Szrj 
2165*38fd1498Szrj /* Top level mark-and-sweep routine.  */
2166*38fd1498Szrj 
2167*38fd1498Szrj void
ggc_collect(void)2168*38fd1498Szrj ggc_collect (void)
2169*38fd1498Szrj {
2170*38fd1498Szrj   /* Avoid frequent unnecessary work by skipping collection if the
2171*38fd1498Szrj      total allocations haven't expanded much since the last
2172*38fd1498Szrj      collection.  */
2173*38fd1498Szrj   float allocated_last_gc =
2174*38fd1498Szrj     MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
2175*38fd1498Szrj 
2176*38fd1498Szrj   float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
2177*38fd1498Szrj   if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
2178*38fd1498Szrj     return;
2179*38fd1498Szrj 
2180*38fd1498Szrj   timevar_push (TV_GC);
2181*38fd1498Szrj   if (!quiet_flag)
2182*38fd1498Szrj     fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
2183*38fd1498Szrj   if (GGC_DEBUG_LEVEL >= 2)
2184*38fd1498Szrj     fprintf (G.debug_file, "BEGIN COLLECTING\n");
2185*38fd1498Szrj 
2186*38fd1498Szrj   /* Zero the total allocated bytes.  This will be recalculated in the
2187*38fd1498Szrj      sweep phase.  */
2188*38fd1498Szrj   G.allocated = 0;
2189*38fd1498Szrj 
2190*38fd1498Szrj   /* Release the pages we freed the last time we collected, but didn't
2191*38fd1498Szrj      reuse in the interim.  */
2192*38fd1498Szrj   release_pages ();
2193*38fd1498Szrj 
2194*38fd1498Szrj   /* Indicate that we've seen collections at this context depth.  */
2195*38fd1498Szrj   G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
2196*38fd1498Szrj 
2197*38fd1498Szrj   invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2198*38fd1498Szrj 
2199*38fd1498Szrj   in_gc = true;
2200*38fd1498Szrj   clear_marks ();
2201*38fd1498Szrj   ggc_mark_roots ();
2202*38fd1498Szrj   ggc_handle_finalizers ();
2203*38fd1498Szrj 
2204*38fd1498Szrj   if (GATHER_STATISTICS)
2205*38fd1498Szrj     ggc_prune_overhead_list ();
2206*38fd1498Szrj 
2207*38fd1498Szrj   poison_pages ();
2208*38fd1498Szrj   validate_free_objects ();
2209*38fd1498Szrj   sweep_pages ();
2210*38fd1498Szrj 
2211*38fd1498Szrj   in_gc = false;
2212*38fd1498Szrj   G.allocated_last_gc = G.allocated;
2213*38fd1498Szrj 
2214*38fd1498Szrj   invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2215*38fd1498Szrj 
2216*38fd1498Szrj   timevar_pop (TV_GC);
2217*38fd1498Szrj 
2218*38fd1498Szrj   if (!quiet_flag)
2219*38fd1498Szrj     fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
2220*38fd1498Szrj   if (GGC_DEBUG_LEVEL >= 2)
2221*38fd1498Szrj     fprintf (G.debug_file, "END COLLECTING\n");
2222*38fd1498Szrj }
2223*38fd1498Szrj 
2224*38fd1498Szrj /* Assume that all GGC memory is reachable and grow the limits for next collection.
2225*38fd1498Szrj    With checking, trigger GGC so -Q compilation outputs how much of memory really is
2226*38fd1498Szrj    reachable.  */
2227*38fd1498Szrj 
2228*38fd1498Szrj void
ggc_grow(void)2229*38fd1498Szrj ggc_grow (void)
2230*38fd1498Szrj {
2231*38fd1498Szrj   if (!flag_checking)
2232*38fd1498Szrj     G.allocated_last_gc = MAX (G.allocated_last_gc,
2233*38fd1498Szrj 			       G.allocated);
2234*38fd1498Szrj   else
2235*38fd1498Szrj     ggc_collect ();
2236*38fd1498Szrj   if (!quiet_flag)
2237*38fd1498Szrj     fprintf (stderr, " {GC start %luk} ", (unsigned long) G.allocated / 1024);
2238*38fd1498Szrj }
2239*38fd1498Szrj 
2240*38fd1498Szrj /* Print allocation statistics.  */
2241*38fd1498Szrj #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2242*38fd1498Szrj 		  ? (x) \
2243*38fd1498Szrj 		  : ((x) < 1024*1024*10 \
2244*38fd1498Szrj 		     ? (x) / 1024 \
2245*38fd1498Szrj 		     : (x) / (1024*1024))))
2246*38fd1498Szrj #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2247*38fd1498Szrj 
2248*38fd1498Szrj void
ggc_print_statistics(void)2249*38fd1498Szrj ggc_print_statistics (void)
2250*38fd1498Szrj {
2251*38fd1498Szrj   struct ggc_statistics stats;
2252*38fd1498Szrj   unsigned int i;
2253*38fd1498Szrj   size_t total_overhead = 0;
2254*38fd1498Szrj 
2255*38fd1498Szrj   /* Clear the statistics.  */
2256*38fd1498Szrj   memset (&stats, 0, sizeof (stats));
2257*38fd1498Szrj 
2258*38fd1498Szrj   /* Make sure collection will really occur.  */
2259*38fd1498Szrj   G.allocated_last_gc = 0;
2260*38fd1498Szrj 
2261*38fd1498Szrj   /* Collect and print the statistics common across collectors.  */
2262*38fd1498Szrj   ggc_print_common_statistics (stderr, &stats);
2263*38fd1498Szrj 
2264*38fd1498Szrj   /* Release free pages so that we will not count the bytes allocated
2265*38fd1498Szrj      there as part of the total allocated memory.  */
2266*38fd1498Szrj   release_pages ();
2267*38fd1498Szrj 
2268*38fd1498Szrj   /* Collect some information about the various sizes of
2269*38fd1498Szrj      allocation.  */
2270*38fd1498Szrj   fprintf (stderr,
2271*38fd1498Szrj            "Memory still allocated at the end of the compilation process\n");
2272*38fd1498Szrj   fprintf (stderr, "%-8s %10s  %10s  %10s\n",
2273*38fd1498Szrj 	   "Size", "Allocated", "Used", "Overhead");
2274*38fd1498Szrj   for (i = 0; i < NUM_ORDERS; ++i)
2275*38fd1498Szrj     {
2276*38fd1498Szrj       page_entry *p;
2277*38fd1498Szrj       size_t allocated;
2278*38fd1498Szrj       size_t in_use;
2279*38fd1498Szrj       size_t overhead;
2280*38fd1498Szrj 
2281*38fd1498Szrj       /* Skip empty entries.  */
2282*38fd1498Szrj       if (!G.pages[i])
2283*38fd1498Szrj 	continue;
2284*38fd1498Szrj 
2285*38fd1498Szrj       overhead = allocated = in_use = 0;
2286*38fd1498Szrj 
2287*38fd1498Szrj       /* Figure out the total number of bytes allocated for objects of
2288*38fd1498Szrj 	 this size, and how many of them are actually in use.  Also figure
2289*38fd1498Szrj 	 out how much memory the page table is using.  */
2290*38fd1498Szrj       for (p = G.pages[i]; p; p = p->next)
2291*38fd1498Szrj 	{
2292*38fd1498Szrj 	  allocated += p->bytes;
2293*38fd1498Szrj 	  in_use +=
2294*38fd1498Szrj 	    (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2295*38fd1498Szrj 
2296*38fd1498Szrj 	  overhead += (sizeof (page_entry) - sizeof (long)
2297*38fd1498Szrj 		       + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2298*38fd1498Szrj 	}
2299*38fd1498Szrj       fprintf (stderr, "%-8lu %10lu%c %10lu%c %10lu%c\n",
2300*38fd1498Szrj 	       (unsigned long) OBJECT_SIZE (i),
2301*38fd1498Szrj 	       SCALE (allocated), STAT_LABEL (allocated),
2302*38fd1498Szrj 	       SCALE (in_use), STAT_LABEL (in_use),
2303*38fd1498Szrj 	       SCALE (overhead), STAT_LABEL (overhead));
2304*38fd1498Szrj       total_overhead += overhead;
2305*38fd1498Szrj     }
2306*38fd1498Szrj   fprintf (stderr, "%-8s %10lu%c %10lu%c %10lu%c\n", "Total",
2307*38fd1498Szrj 	   SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
2308*38fd1498Szrj 	   SCALE (G.allocated), STAT_LABEL (G.allocated),
2309*38fd1498Szrj 	   SCALE (total_overhead), STAT_LABEL (total_overhead));
2310*38fd1498Szrj 
2311*38fd1498Szrj   if (GATHER_STATISTICS)
2312*38fd1498Szrj     {
2313*38fd1498Szrj       fprintf (stderr, "\nTotal allocations and overheads during "
2314*38fd1498Szrj 	       "the compilation process\n");
2315*38fd1498Szrj 
2316*38fd1498Szrj       fprintf (stderr, "Total Overhead:                          %10"
2317*38fd1498Szrj 	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead);
2318*38fd1498Szrj       fprintf (stderr, "Total Allocated:                         %10"
2319*38fd1498Szrj 	       HOST_LONG_LONG_FORMAT "d\n",
2320*38fd1498Szrj 	       G.stats.total_allocated);
2321*38fd1498Szrj 
2322*38fd1498Szrj       fprintf (stderr, "Total Overhead  under  32B:              %10"
2323*38fd1498Szrj 	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under32);
2324*38fd1498Szrj       fprintf (stderr, "Total Allocated under  32B:              %10"
2325*38fd1498Szrj 	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under32);
2326*38fd1498Szrj       fprintf (stderr, "Total Overhead  under  64B:              %10"
2327*38fd1498Szrj 	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under64);
2328*38fd1498Szrj       fprintf (stderr, "Total Allocated under  64B:              %10"
2329*38fd1498Szrj 	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under64);
2330*38fd1498Szrj       fprintf (stderr, "Total Overhead  under 128B:              %10"
2331*38fd1498Szrj 	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under128);
2332*38fd1498Szrj       fprintf (stderr, "Total Allocated under 128B:              %10"
2333*38fd1498Szrj 	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under128);
2334*38fd1498Szrj 
2335*38fd1498Szrj       for (i = 0; i < NUM_ORDERS; i++)
2336*38fd1498Szrj 	if (G.stats.total_allocated_per_order[i])
2337*38fd1498Szrj 	  {
2338*38fd1498Szrj 	    fprintf (stderr, "Total Overhead  page size %9lu:     %10"
2339*38fd1498Szrj 		     HOST_LONG_LONG_FORMAT "d\n",
2340*38fd1498Szrj 		     (unsigned long) OBJECT_SIZE (i),
2341*38fd1498Szrj 		     G.stats.total_overhead_per_order[i]);
2342*38fd1498Szrj 	    fprintf (stderr, "Total Allocated page size %9lu:     %10"
2343*38fd1498Szrj 		     HOST_LONG_LONG_FORMAT "d\n",
2344*38fd1498Szrj 		     (unsigned long) OBJECT_SIZE (i),
2345*38fd1498Szrj 		     G.stats.total_allocated_per_order[i]);
2346*38fd1498Szrj 	  }
2347*38fd1498Szrj   }
2348*38fd1498Szrj }
2349*38fd1498Szrj 
2350*38fd1498Szrj struct ggc_pch_ondisk
2351*38fd1498Szrj {
2352*38fd1498Szrj   unsigned totals[NUM_ORDERS];
2353*38fd1498Szrj };
2354*38fd1498Szrj 
2355*38fd1498Szrj struct ggc_pch_data
2356*38fd1498Szrj {
2357*38fd1498Szrj   struct ggc_pch_ondisk d;
2358*38fd1498Szrj   uintptr_t base[NUM_ORDERS];
2359*38fd1498Szrj   size_t written[NUM_ORDERS];
2360*38fd1498Szrj };
2361*38fd1498Szrj 
2362*38fd1498Szrj struct ggc_pch_data *
init_ggc_pch(void)2363*38fd1498Szrj init_ggc_pch (void)
2364*38fd1498Szrj {
2365*38fd1498Szrj   return XCNEW (struct ggc_pch_data);
2366*38fd1498Szrj }
2367*38fd1498Szrj 
2368*38fd1498Szrj void
ggc_pch_count_object(struct ggc_pch_data * d,void * x ATTRIBUTE_UNUSED,size_t size,bool is_string ATTRIBUTE_UNUSED)2369*38fd1498Szrj ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2370*38fd1498Szrj 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2371*38fd1498Szrj {
2372*38fd1498Szrj   unsigned order;
2373*38fd1498Szrj 
2374*38fd1498Szrj   if (size < NUM_SIZE_LOOKUP)
2375*38fd1498Szrj     order = size_lookup[size];
2376*38fd1498Szrj   else
2377*38fd1498Szrj     {
2378*38fd1498Szrj       order = 10;
2379*38fd1498Szrj       while (size > OBJECT_SIZE (order))
2380*38fd1498Szrj 	order++;
2381*38fd1498Szrj     }
2382*38fd1498Szrj 
2383*38fd1498Szrj   d->d.totals[order]++;
2384*38fd1498Szrj }
2385*38fd1498Szrj 
2386*38fd1498Szrj size_t
ggc_pch_total_size(struct ggc_pch_data * d)2387*38fd1498Szrj ggc_pch_total_size (struct ggc_pch_data *d)
2388*38fd1498Szrj {
2389*38fd1498Szrj   size_t a = 0;
2390*38fd1498Szrj   unsigned i;
2391*38fd1498Szrj 
2392*38fd1498Szrj   for (i = 0; i < NUM_ORDERS; i++)
2393*38fd1498Szrj     a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2394*38fd1498Szrj   return a;
2395*38fd1498Szrj }
2396*38fd1498Szrj 
2397*38fd1498Szrj void
ggc_pch_this_base(struct ggc_pch_data * d,void * base)2398*38fd1498Szrj ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2399*38fd1498Szrj {
2400*38fd1498Szrj   uintptr_t a = (uintptr_t) base;
2401*38fd1498Szrj   unsigned i;
2402*38fd1498Szrj 
2403*38fd1498Szrj   for (i = 0; i < NUM_ORDERS; i++)
2404*38fd1498Szrj     {
2405*38fd1498Szrj       d->base[i] = a;
2406*38fd1498Szrj       a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2407*38fd1498Szrj     }
2408*38fd1498Szrj }
2409*38fd1498Szrj 
2410*38fd1498Szrj 
2411*38fd1498Szrj char *
ggc_pch_alloc_object(struct ggc_pch_data * d,void * x ATTRIBUTE_UNUSED,size_t size,bool is_string ATTRIBUTE_UNUSED)2412*38fd1498Szrj ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2413*38fd1498Szrj 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2414*38fd1498Szrj {
2415*38fd1498Szrj   unsigned order;
2416*38fd1498Szrj   char *result;
2417*38fd1498Szrj 
2418*38fd1498Szrj   if (size < NUM_SIZE_LOOKUP)
2419*38fd1498Szrj     order = size_lookup[size];
2420*38fd1498Szrj   else
2421*38fd1498Szrj     {
2422*38fd1498Szrj       order = 10;
2423*38fd1498Szrj       while (size > OBJECT_SIZE (order))
2424*38fd1498Szrj 	order++;
2425*38fd1498Szrj     }
2426*38fd1498Szrj 
2427*38fd1498Szrj   result = (char *) d->base[order];
2428*38fd1498Szrj   d->base[order] += OBJECT_SIZE (order);
2429*38fd1498Szrj   return result;
2430*38fd1498Szrj }
2431*38fd1498Szrj 
2432*38fd1498Szrj void
ggc_pch_prepare_write(struct ggc_pch_data * d ATTRIBUTE_UNUSED,FILE * f ATTRIBUTE_UNUSED)2433*38fd1498Szrj ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2434*38fd1498Szrj 		       FILE *f ATTRIBUTE_UNUSED)
2435*38fd1498Szrj {
2436*38fd1498Szrj   /* Nothing to do.  */
2437*38fd1498Szrj }
2438*38fd1498Szrj 
2439*38fd1498Szrj void
ggc_pch_write_object(struct ggc_pch_data * d,FILE * f,void * x,void * newx ATTRIBUTE_UNUSED,size_t size,bool is_string ATTRIBUTE_UNUSED)2440*38fd1498Szrj ggc_pch_write_object (struct ggc_pch_data *d,
2441*38fd1498Szrj 		      FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2442*38fd1498Szrj 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2443*38fd1498Szrj {
2444*38fd1498Szrj   unsigned order;
2445*38fd1498Szrj   static const char emptyBytes[256] = { 0 };
2446*38fd1498Szrj 
2447*38fd1498Szrj   if (size < NUM_SIZE_LOOKUP)
2448*38fd1498Szrj     order = size_lookup[size];
2449*38fd1498Szrj   else
2450*38fd1498Szrj     {
2451*38fd1498Szrj       order = 10;
2452*38fd1498Szrj       while (size > OBJECT_SIZE (order))
2453*38fd1498Szrj 	order++;
2454*38fd1498Szrj     }
2455*38fd1498Szrj 
2456*38fd1498Szrj   if (fwrite (x, size, 1, f) != 1)
2457*38fd1498Szrj     fatal_error (input_location, "can%'t write PCH file: %m");
2458*38fd1498Szrj 
2459*38fd1498Szrj   /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2460*38fd1498Szrj      object out to OBJECT_SIZE(order).  This happens for strings.  */
2461*38fd1498Szrj 
2462*38fd1498Szrj   if (size != OBJECT_SIZE (order))
2463*38fd1498Szrj     {
2464*38fd1498Szrj       unsigned padding = OBJECT_SIZE (order) - size;
2465*38fd1498Szrj 
2466*38fd1498Szrj       /* To speed small writes, we use a nulled-out array that's larger
2467*38fd1498Szrj          than most padding requests as the source for our null bytes.  This
2468*38fd1498Szrj          permits us to do the padding with fwrite() rather than fseek(), and
2469*38fd1498Szrj          limits the chance the OS may try to flush any outstanding writes.  */
2470*38fd1498Szrj       if (padding <= sizeof (emptyBytes))
2471*38fd1498Szrj         {
2472*38fd1498Szrj           if (fwrite (emptyBytes, 1, padding, f) != padding)
2473*38fd1498Szrj             fatal_error (input_location, "can%'t write PCH file");
2474*38fd1498Szrj         }
2475*38fd1498Szrj       else
2476*38fd1498Szrj         {
2477*38fd1498Szrj           /* Larger than our buffer?  Just default to fseek.  */
2478*38fd1498Szrj           if (fseek (f, padding, SEEK_CUR) != 0)
2479*38fd1498Szrj             fatal_error (input_location, "can%'t write PCH file");
2480*38fd1498Szrj         }
2481*38fd1498Szrj     }
2482*38fd1498Szrj 
2483*38fd1498Szrj   d->written[order]++;
2484*38fd1498Szrj   if (d->written[order] == d->d.totals[order]
2485*38fd1498Szrj       && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2486*38fd1498Szrj 				   G.pagesize),
2487*38fd1498Szrj 		SEEK_CUR) != 0)
2488*38fd1498Szrj     fatal_error (input_location, "can%'t write PCH file: %m");
2489*38fd1498Szrj }
2490*38fd1498Szrj 
2491*38fd1498Szrj void
ggc_pch_finish(struct ggc_pch_data * d,FILE * f)2492*38fd1498Szrj ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2493*38fd1498Szrj {
2494*38fd1498Szrj   if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2495*38fd1498Szrj     fatal_error (input_location, "can%'t write PCH file: %m");
2496*38fd1498Szrj   free (d);
2497*38fd1498Szrj }
2498*38fd1498Szrj 
2499*38fd1498Szrj /* Move the PCH PTE entries just added to the end of by_depth, to the
2500*38fd1498Szrj    front.  */
2501*38fd1498Szrj 
2502*38fd1498Szrj static void
move_ptes_to_front(int count_old_page_tables,int count_new_page_tables)2503*38fd1498Szrj move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2504*38fd1498Szrj {
2505*38fd1498Szrj   /* First, we swap the new entries to the front of the varrays.  */
2506*38fd1498Szrj   page_entry **new_by_depth;
2507*38fd1498Szrj   unsigned long **new_save_in_use;
2508*38fd1498Szrj 
2509*38fd1498Szrj   new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2510*38fd1498Szrj   new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2511*38fd1498Szrj 
2512*38fd1498Szrj   memcpy (&new_by_depth[0],
2513*38fd1498Szrj 	  &G.by_depth[count_old_page_tables],
2514*38fd1498Szrj 	  count_new_page_tables * sizeof (void *));
2515*38fd1498Szrj   memcpy (&new_by_depth[count_new_page_tables],
2516*38fd1498Szrj 	  &G.by_depth[0],
2517*38fd1498Szrj 	  count_old_page_tables * sizeof (void *));
2518*38fd1498Szrj   memcpy (&new_save_in_use[0],
2519*38fd1498Szrj 	  &G.save_in_use[count_old_page_tables],
2520*38fd1498Szrj 	  count_new_page_tables * sizeof (void *));
2521*38fd1498Szrj   memcpy (&new_save_in_use[count_new_page_tables],
2522*38fd1498Szrj 	  &G.save_in_use[0],
2523*38fd1498Szrj 	  count_old_page_tables * sizeof (void *));
2524*38fd1498Szrj 
2525*38fd1498Szrj   free (G.by_depth);
2526*38fd1498Szrj   free (G.save_in_use);
2527*38fd1498Szrj 
2528*38fd1498Szrj   G.by_depth = new_by_depth;
2529*38fd1498Szrj   G.save_in_use = new_save_in_use;
2530*38fd1498Szrj 
2531*38fd1498Szrj   /* Now update all the index_by_depth fields.  */
2532*38fd1498Szrj   for (unsigned i = G.by_depth_in_use; i--;)
2533*38fd1498Szrj     {
2534*38fd1498Szrj       page_entry *p = G.by_depth[i];
2535*38fd1498Szrj       p->index_by_depth = i;
2536*38fd1498Szrj     }
2537*38fd1498Szrj 
2538*38fd1498Szrj   /* And last, we update the depth pointers in G.depth.  The first
2539*38fd1498Szrj      entry is already 0, and context 0 entries always start at index
2540*38fd1498Szrj      0, so there is nothing to update in the first slot.  We need a
2541*38fd1498Szrj      second slot, only if we have old ptes, and if we do, they start
2542*38fd1498Szrj      at index count_new_page_tables.  */
2543*38fd1498Szrj   if (count_old_page_tables)
2544*38fd1498Szrj     push_depth (count_new_page_tables);
2545*38fd1498Szrj }
2546*38fd1498Szrj 
2547*38fd1498Szrj void
ggc_pch_read(FILE * f,void * addr)2548*38fd1498Szrj ggc_pch_read (FILE *f, void *addr)
2549*38fd1498Szrj {
2550*38fd1498Szrj   struct ggc_pch_ondisk d;
2551*38fd1498Szrj   unsigned i;
2552*38fd1498Szrj   char *offs = (char *) addr;
2553*38fd1498Szrj   unsigned long count_old_page_tables;
2554*38fd1498Szrj   unsigned long count_new_page_tables;
2555*38fd1498Szrj 
2556*38fd1498Szrj   count_old_page_tables = G.by_depth_in_use;
2557*38fd1498Szrj 
2558*38fd1498Szrj   /* We've just read in a PCH file.  So, every object that used to be
2559*38fd1498Szrj      allocated is now free.  */
2560*38fd1498Szrj   clear_marks ();
2561*38fd1498Szrj #ifdef ENABLE_GC_CHECKING
2562*38fd1498Szrj   poison_pages ();
2563*38fd1498Szrj #endif
2564*38fd1498Szrj   /* Since we free all the allocated objects, the free list becomes
2565*38fd1498Szrj      useless.  Validate it now, which will also clear it.  */
2566*38fd1498Szrj   validate_free_objects ();
2567*38fd1498Szrj 
2568*38fd1498Szrj   /* No object read from a PCH file should ever be freed.  So, set the
2569*38fd1498Szrj      context depth to 1, and set the depth of all the currently-allocated
2570*38fd1498Szrj      pages to be 1 too.  PCH pages will have depth 0.  */
2571*38fd1498Szrj   gcc_assert (!G.context_depth);
2572*38fd1498Szrj   G.context_depth = 1;
2573*38fd1498Szrj   /* Allocate space for the depth 1 finalizers.  */
2574*38fd1498Szrj   G.finalizers.safe_push (vNULL);
2575*38fd1498Szrj   G.vec_finalizers.safe_push (vNULL);
2576*38fd1498Szrj   gcc_assert (G.finalizers.length() == 2);
2577*38fd1498Szrj   for (i = 0; i < NUM_ORDERS; i++)
2578*38fd1498Szrj     {
2579*38fd1498Szrj       page_entry *p;
2580*38fd1498Szrj       for (p = G.pages[i]; p != NULL; p = p->next)
2581*38fd1498Szrj 	p->context_depth = G.context_depth;
2582*38fd1498Szrj     }
2583*38fd1498Szrj 
2584*38fd1498Szrj   /* Allocate the appropriate page-table entries for the pages read from
2585*38fd1498Szrj      the PCH file.  */
2586*38fd1498Szrj   if (fread (&d, sizeof (d), 1, f) != 1)
2587*38fd1498Szrj     fatal_error (input_location, "can%'t read PCH file: %m");
2588*38fd1498Szrj 
2589*38fd1498Szrj   for (i = 0; i < NUM_ORDERS; i++)
2590*38fd1498Szrj     {
2591*38fd1498Szrj       struct page_entry *entry;
2592*38fd1498Szrj       char *pte;
2593*38fd1498Szrj       size_t bytes;
2594*38fd1498Szrj       size_t num_objs;
2595*38fd1498Szrj       size_t j;
2596*38fd1498Szrj 
2597*38fd1498Szrj       if (d.totals[i] == 0)
2598*38fd1498Szrj 	continue;
2599*38fd1498Szrj 
2600*38fd1498Szrj       bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i));
2601*38fd1498Szrj       num_objs = bytes / OBJECT_SIZE (i);
2602*38fd1498Szrj       entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2603*38fd1498Szrj 					    - sizeof (long)
2604*38fd1498Szrj 					    + BITMAP_SIZE (num_objs + 1)));
2605*38fd1498Szrj       entry->bytes = bytes;
2606*38fd1498Szrj       entry->page = offs;
2607*38fd1498Szrj       entry->context_depth = 0;
2608*38fd1498Szrj       offs += bytes;
2609*38fd1498Szrj       entry->num_free_objects = 0;
2610*38fd1498Szrj       entry->order = i;
2611*38fd1498Szrj 
2612*38fd1498Szrj       for (j = 0;
2613*38fd1498Szrj 	   j + HOST_BITS_PER_LONG <= num_objs + 1;
2614*38fd1498Szrj 	   j += HOST_BITS_PER_LONG)
2615*38fd1498Szrj 	entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2616*38fd1498Szrj       for (; j < num_objs + 1; j++)
2617*38fd1498Szrj 	entry->in_use_p[j / HOST_BITS_PER_LONG]
2618*38fd1498Szrj 	  |= 1L << (j % HOST_BITS_PER_LONG);
2619*38fd1498Szrj 
2620*38fd1498Szrj       for (pte = entry->page;
2621*38fd1498Szrj 	   pte < entry->page + entry->bytes;
2622*38fd1498Szrj 	   pte += G.pagesize)
2623*38fd1498Szrj 	set_page_table_entry (pte, entry);
2624*38fd1498Szrj 
2625*38fd1498Szrj       if (G.page_tails[i] != NULL)
2626*38fd1498Szrj 	G.page_tails[i]->next = entry;
2627*38fd1498Szrj       else
2628*38fd1498Szrj 	G.pages[i] = entry;
2629*38fd1498Szrj       G.page_tails[i] = entry;
2630*38fd1498Szrj 
2631*38fd1498Szrj       /* We start off by just adding all the new information to the
2632*38fd1498Szrj 	 end of the varrays, later, we will move the new information
2633*38fd1498Szrj 	 to the front of the varrays, as the PCH page tables are at
2634*38fd1498Szrj 	 context 0.  */
2635*38fd1498Szrj       push_by_depth (entry, 0);
2636*38fd1498Szrj     }
2637*38fd1498Szrj 
2638*38fd1498Szrj   /* Now, we update the various data structures that speed page table
2639*38fd1498Szrj      handling.  */
2640*38fd1498Szrj   count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2641*38fd1498Szrj 
2642*38fd1498Szrj   move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2643*38fd1498Szrj 
2644*38fd1498Szrj   /* Update the statistics.  */
2645*38fd1498Szrj   G.allocated = G.allocated_last_gc = offs - (char *)addr;
2646*38fd1498Szrj }
2647