xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/ggc-page.c (revision 53b02e147d4ed531c0d2a5ca9b3e8026ba3e99b5)
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2    Copyright (C) 1999-2019 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "alias.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "memmodel.h"
28 #include "tm_p.h"
29 #include "diagnostic-core.h"
30 #include "flags.h"
31 #include "ggc-internal.h"
32 #include "timevar.h"
33 #include "params.h"
34 #include "cgraph.h"
35 #include "cfgloop.h"
36 #include "plugin.h"
37 
38 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
39    file open.  Prefer either to valloc.  */
40 #ifdef HAVE_MMAP_ANON
41 # undef HAVE_MMAP_DEV_ZERO
42 # define USING_MMAP
43 #endif
44 
45 #ifdef HAVE_MMAP_DEV_ZERO
46 # define USING_MMAP
47 #endif
48 
49 #ifndef USING_MMAP
50 #define USING_MALLOC_PAGE_GROUPS
51 #endif
52 
53 #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
54     && defined(USING_MMAP)
55 # define USING_MADVISE
56 #endif
57 
58 /* Strategy:
59 
60    This garbage-collecting allocator allocates objects on one of a set
61    of pages.  Each page can allocate objects of a single size only;
62    available sizes are powers of two starting at four bytes.  The size
63    of an allocation request is rounded up to the next power of two
64    (`order'), and satisfied from the appropriate page.
65 
66    Each page is recorded in a page-entry, which also maintains an
67    in-use bitmap of object positions on the page.  This allows the
68    allocation state of a particular object to be flipped without
69    touching the page itself.
70 
71    Each page-entry also has a context depth, which is used to track
72    pushing and popping of allocation contexts.  Only objects allocated
73    in the current (highest-numbered) context may be collected.
74 
75    Page entries are arranged in an array of singly-linked lists.  The
76    array is indexed by the allocation size, in bits, of the pages on
77    it; i.e. all pages on a list allocate objects of the same size.
78    Pages are ordered on the list such that all non-full pages precede
79    all full pages, with non-full pages arranged in order of decreasing
80    context depth.
81 
82    Empty pages (of all orders) are kept on a single page cache list,
83    and are considered first when new pages are required; they are
84    deallocated at the start of the next collection if they haven't
85    been recycled by then.  */
86 
87 /* Define GGC_DEBUG_LEVEL to print debugging information.
88      0: No debugging output.
89      1: GC statistics only.
90      2: Page-entry allocations/deallocations as well.
91      3: Object allocations as well.
92      4: Object marks as well.  */
93 #define GGC_DEBUG_LEVEL (0)
94 
95 /* A two-level tree is used to look up the page-entry for a given
96    pointer.  Two chunks of the pointer's bits are extracted to index
97    the first and second levels of the tree, as follows:
98 
99 				   HOST_PAGE_SIZE_BITS
100 			   32		|      |
101        msb +----------------+----+------+------+ lsb
102 			    |    |      |
103 			 PAGE_L1_BITS   |
104 				 |      |
105 			       PAGE_L2_BITS
106 
107    The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
108    pages are aligned on system page boundaries.  The next most
109    significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
110    index values in the lookup table, respectively.
111 
112    For 32-bit architectures and the settings below, there are no
113    leftover bits.  For architectures with wider pointers, the lookup
114    tree points to a list of pages, which must be scanned to find the
115    correct one.  */
116 
117 #define PAGE_L1_BITS	(8)
118 #define PAGE_L2_BITS	(32 - PAGE_L1_BITS - G.lg_pagesize)
119 #define PAGE_L1_SIZE	((uintptr_t) 1 << PAGE_L1_BITS)
120 #define PAGE_L2_SIZE	((uintptr_t) 1 << PAGE_L2_BITS)
121 
122 #define LOOKUP_L1(p) \
123   (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
124 
125 #define LOOKUP_L2(p) \
126   (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
127 
128 /* The number of objects per allocation page, for objects on a page of
129    the indicated ORDER.  */
130 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
131 
132 /* The number of objects in P.  */
133 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
134 
135 /* The size of an object on a page of the indicated ORDER.  */
136 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
137 
138 /* For speed, we avoid doing a general integer divide to locate the
139    offset in the allocation bitmap, by precalculating numbers M, S
140    such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
141    within the page which is evenly divisible by the object size Z.  */
142 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
143 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
144 #define OFFSET_TO_BIT(OFFSET, ORDER) \
145   (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
146 
147 /* We use this structure to determine the alignment required for
148    allocations.  For power-of-two sized allocations, that's not a
149    problem, but it does matter for odd-sized allocations.
150    We do not care about alignment for floating-point types.  */
151 
152 struct max_alignment {
153   char c;
154   union {
155     int64_t i;
156     void *p;
157   } u;
158 };
159 
160 /* The biggest alignment required.  */
161 
162 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
163 
164 
165 /* The number of extra orders, not corresponding to power-of-two sized
166    objects.  */
167 
168 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
169 
170 #define RTL_SIZE(NSLOTS) \
171   (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
172 
173 #define TREE_EXP_SIZE(OPS) \
174   (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
175 
176 /* The Ith entry is the maximum size of an object to be stored in the
177    Ith extra order.  Adding a new entry to this array is the *only*
178    thing you need to do to add a new special allocation size.  */
179 
180 static const size_t extra_order_size_table[] = {
181   /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
182      There are a lot of structures with these sizes and explicitly
183      listing them risks orders being dropped because they changed size.  */
184   MAX_ALIGNMENT * 3,
185   MAX_ALIGNMENT * 5,
186   MAX_ALIGNMENT * 6,
187   MAX_ALIGNMENT * 7,
188   MAX_ALIGNMENT * 9,
189   MAX_ALIGNMENT * 10,
190   MAX_ALIGNMENT * 11,
191   MAX_ALIGNMENT * 12,
192   MAX_ALIGNMENT * 13,
193   MAX_ALIGNMENT * 14,
194   MAX_ALIGNMENT * 15,
195   sizeof (struct tree_decl_non_common),
196   sizeof (struct tree_field_decl),
197   sizeof (struct tree_parm_decl),
198   sizeof (struct tree_var_decl),
199   sizeof (struct tree_type_non_common),
200   sizeof (struct function),
201   sizeof (struct basic_block_def),
202   sizeof (struct cgraph_node),
203   sizeof (struct loop),
204 };
205 
206 /* The total number of orders.  */
207 
208 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
209 
210 /* Compute the smallest nonnegative number which when added to X gives
211    a multiple of F.  */
212 
213 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
214 
215 /* Round X to next multiple of the page size */
216 
217 #define PAGE_ALIGN(x) ROUND_UP ((x), G.pagesize)
218 
219 /* The Ith entry is the number of objects on a page or order I.  */
220 
221 static unsigned objects_per_page_table[NUM_ORDERS];
222 
223 /* The Ith entry is the size of an object on a page of order I.  */
224 
225 static size_t object_size_table[NUM_ORDERS];
226 
227 /* The Ith entry is a pair of numbers (mult, shift) such that
228    ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
229    for all k evenly divisible by OBJECT_SIZE(I).  */
230 
231 static struct
232 {
233   size_t mult;
234   unsigned int shift;
235 }
236 inverse_table[NUM_ORDERS];
237 
238 /* A page_entry records the status of an allocation page.  This
239    structure is dynamically sized to fit the bitmap in_use_p.  */
240 struct page_entry
241 {
242   /* The next page-entry with objects of the same size, or NULL if
243      this is the last page-entry.  */
244   struct page_entry *next;
245 
246   /* The previous page-entry with objects of the same size, or NULL if
247      this is the first page-entry.   The PREV pointer exists solely to
248      keep the cost of ggc_free manageable.  */
249   struct page_entry *prev;
250 
251   /* The number of bytes allocated.  (This will always be a multiple
252      of the host system page size.)  */
253   size_t bytes;
254 
255   /* The address at which the memory is allocated.  */
256   char *page;
257 
258 #ifdef USING_MALLOC_PAGE_GROUPS
259   /* Back pointer to the page group this page came from.  */
260   struct page_group *group;
261 #endif
262 
263   /* This is the index in the by_depth varray where this page table
264      can be found.  */
265   unsigned long index_by_depth;
266 
267   /* Context depth of this page.  */
268   unsigned short context_depth;
269 
270   /* The number of free objects remaining on this page.  */
271   unsigned short num_free_objects;
272 
273   /* A likely candidate for the bit position of a free object for the
274      next allocation from this page.  */
275   unsigned short next_bit_hint;
276 
277   /* The lg of size of objects allocated from this page.  */
278   unsigned char order;
279 
280   /* Discarded page? */
281   bool discarded;
282 
283   /* A bit vector indicating whether or not objects are in use.  The
284      Nth bit is one if the Nth object on this page is allocated.  This
285      array is dynamically sized.  */
286   unsigned long in_use_p[1];
287 };
288 
289 #ifdef USING_MALLOC_PAGE_GROUPS
290 /* A page_group describes a large allocation from malloc, from which
291    we parcel out aligned pages.  */
292 struct page_group
293 {
294   /* A linked list of all extant page groups.  */
295   struct page_group *next;
296 
297   /* The address we received from malloc.  */
298   char *allocation;
299 
300   /* The size of the block.  */
301   size_t alloc_size;
302 
303   /* A bitmask of pages in use.  */
304   unsigned int in_use;
305 };
306 #endif
307 
308 #if HOST_BITS_PER_PTR <= 32
309 
310 /* On 32-bit hosts, we use a two level page table, as pictured above.  */
311 typedef page_entry **page_table[PAGE_L1_SIZE];
312 
313 #else
314 
315 /* On 64-bit hosts, we use the same two level page tables plus a linked
316    list that disambiguates the top 32-bits.  There will almost always be
317    exactly one entry in the list.  */
318 typedef struct page_table_chain
319 {
320   struct page_table_chain *next;
321   size_t high_bits;
322   page_entry **table[PAGE_L1_SIZE];
323 } *page_table;
324 
325 #endif
326 
327 class finalizer
328 {
329 public:
330   finalizer (void *addr, void (*f)(void *)) : m_addr (addr), m_function (f) {}
331 
332   void *addr () const { return m_addr; }
333 
334   void call () const { m_function (m_addr); }
335 
336 private:
337   void *m_addr;
338   void (*m_function)(void *);
339 };
340 
341 class vec_finalizer
342 {
343 public:
344   vec_finalizer (uintptr_t addr, void (*f)(void *), size_t s, size_t n) :
345     m_addr (addr), m_function (f), m_object_size (s), m_n_objects (n) {}
346 
347   void call () const
348     {
349       for (size_t i = 0; i < m_n_objects; i++)
350 	m_function (reinterpret_cast<void *> (m_addr + (i * m_object_size)));
351     }
352 
353   void *addr () const { return reinterpret_cast<void *> (m_addr); }
354 
355 private:
356   uintptr_t m_addr;
357   void (*m_function)(void *);
358   size_t m_object_size;
359   size_t m_n_objects;
360 };
361 
362 #ifdef ENABLE_GC_ALWAYS_COLLECT
363 /* List of free objects to be verified as actually free on the
364    next collection.  */
365 struct free_object
366 {
367   void *object;
368   struct free_object *next;
369 };
370 #endif
371 
372 /* The rest of the global variables.  */
373 static struct ggc_globals
374 {
375   /* The Nth element in this array is a page with objects of size 2^N.
376      If there are any pages with free objects, they will be at the
377      head of the list.  NULL if there are no page-entries for this
378      object size.  */
379   page_entry *pages[NUM_ORDERS];
380 
381   /* The Nth element in this array is the last page with objects of
382      size 2^N.  NULL if there are no page-entries for this object
383      size.  */
384   page_entry *page_tails[NUM_ORDERS];
385 
386   /* Lookup table for associating allocation pages with object addresses.  */
387   page_table lookup;
388 
389   /* The system's page size.  */
390   size_t pagesize;
391   size_t lg_pagesize;
392 
393   /* Bytes currently allocated.  */
394   size_t allocated;
395 
396   /* Bytes currently allocated at the end of the last collection.  */
397   size_t allocated_last_gc;
398 
399   /* Total amount of memory mapped.  */
400   size_t bytes_mapped;
401 
402   /* Bit N set if any allocations have been done at context depth N.  */
403   unsigned long context_depth_allocations;
404 
405   /* Bit N set if any collections have been done at context depth N.  */
406   unsigned long context_depth_collections;
407 
408   /* The current depth in the context stack.  */
409   unsigned short context_depth;
410 
411   /* A file descriptor open to /dev/zero for reading.  */
412 #if defined (HAVE_MMAP_DEV_ZERO)
413   int dev_zero_fd;
414 #endif
415 
416   /* A cache of free system pages.  */
417   page_entry *free_pages;
418 
419 #ifdef USING_MALLOC_PAGE_GROUPS
420   page_group *page_groups;
421 #endif
422 
423   /* The file descriptor for debugging output.  */
424   FILE *debug_file;
425 
426   /* Current number of elements in use in depth below.  */
427   unsigned int depth_in_use;
428 
429   /* Maximum number of elements that can be used before resizing.  */
430   unsigned int depth_max;
431 
432   /* Each element of this array is an index in by_depth where the given
433      depth starts.  This structure is indexed by that given depth we
434      are interested in.  */
435   unsigned int *depth;
436 
437   /* Current number of elements in use in by_depth below.  */
438   unsigned int by_depth_in_use;
439 
440   /* Maximum number of elements that can be used before resizing.  */
441   unsigned int by_depth_max;
442 
443   /* Each element of this array is a pointer to a page_entry, all
444      page_entries can be found in here by increasing depth.
445      index_by_depth in the page_entry is the index into this data
446      structure where that page_entry can be found.  This is used to
447      speed up finding all page_entries at a particular depth.  */
448   page_entry **by_depth;
449 
450   /* Each element is a pointer to the saved in_use_p bits, if any,
451      zero otherwise.  We allocate them all together, to enable a
452      better runtime data access pattern.  */
453   unsigned long **save_in_use;
454 
455   /* Finalizers for single objects.  The first index is collection_depth.  */
456   vec<vec<finalizer> > finalizers;
457 
458   /* Finalizers for vectors of objects.  */
459   vec<vec<vec_finalizer> > vec_finalizers;
460 
461 #ifdef ENABLE_GC_ALWAYS_COLLECT
462   /* List of free objects to be verified as actually free on the
463      next collection.  */
464   struct free_object *free_object_list;
465 #endif
466 
467   struct
468   {
469     /* Total GC-allocated memory.  */
470     unsigned long long total_allocated;
471     /* Total overhead for GC-allocated memory.  */
472     unsigned long long total_overhead;
473 
474     /* Total allocations and overhead for sizes less than 32, 64 and 128.
475        These sizes are interesting because they are typical cache line
476        sizes.  */
477 
478     unsigned long long total_allocated_under32;
479     unsigned long long total_overhead_under32;
480 
481     unsigned long long total_allocated_under64;
482     unsigned long long total_overhead_under64;
483 
484     unsigned long long total_allocated_under128;
485     unsigned long long total_overhead_under128;
486 
487     /* The allocations for each of the allocation orders.  */
488     unsigned long long total_allocated_per_order[NUM_ORDERS];
489 
490     /* The overhead for each of the allocation orders.  */
491     unsigned long long total_overhead_per_order[NUM_ORDERS];
492   } stats;
493 } G;
494 
495 /* True if a gc is currently taking place.  */
496 
497 static bool in_gc = false;
498 
499 /* The size in bytes required to maintain a bitmap for the objects
500    on a page-entry.  */
501 #define BITMAP_SIZE(Num_objects) \
502   (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long))
503 
504 /* Allocate pages in chunks of this size, to throttle calls to memory
505    allocation routines.  The first page is used, the rest go onto the
506    free list.  This cannot be larger than HOST_BITS_PER_INT for the
507    in_use bitmask for page_group.  Hosts that need a different value
508    can override this by defining GGC_QUIRE_SIZE explicitly.  */
509 #ifndef GGC_QUIRE_SIZE
510 # ifdef USING_MMAP
511 #  define GGC_QUIRE_SIZE 512	/* 2MB for 4K pages */
512 # else
513 #  define GGC_QUIRE_SIZE 16
514 # endif
515 #endif
516 
517 /* Initial guess as to how many page table entries we might need.  */
518 #define INITIAL_PTE_COUNT 128
519 
520 static page_entry *lookup_page_table_entry (const void *);
521 static void set_page_table_entry (void *, page_entry *);
522 #ifdef USING_MMAP
523 static char *alloc_anon (char *, size_t, bool check);
524 #endif
525 #ifdef USING_MALLOC_PAGE_GROUPS
526 static size_t page_group_index (char *, char *);
527 static void set_page_group_in_use (page_group *, char *);
528 static void clear_page_group_in_use (page_group *, char *);
529 #endif
530 static struct page_entry * alloc_page (unsigned);
531 static void free_page (struct page_entry *);
532 static void release_pages (void);
533 static void clear_marks (void);
534 static void sweep_pages (void);
535 static void ggc_recalculate_in_use_p (page_entry *);
536 static void compute_inverse (unsigned);
537 static inline void adjust_depth (void);
538 static void move_ptes_to_front (int, int);
539 
540 void debug_print_page_list (int);
541 static void push_depth (unsigned int);
542 static void push_by_depth (page_entry *, unsigned long *);
543 
544 /* Push an entry onto G.depth.  */
545 
546 inline static void
547 push_depth (unsigned int i)
548 {
549   if (G.depth_in_use >= G.depth_max)
550     {
551       G.depth_max *= 2;
552       G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
553     }
554   G.depth[G.depth_in_use++] = i;
555 }
556 
557 /* Push an entry onto G.by_depth and G.save_in_use.  */
558 
559 inline static void
560 push_by_depth (page_entry *p, unsigned long *s)
561 {
562   if (G.by_depth_in_use >= G.by_depth_max)
563     {
564       G.by_depth_max *= 2;
565       G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
566       G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
567 				  G.by_depth_max);
568     }
569   G.by_depth[G.by_depth_in_use] = p;
570   G.save_in_use[G.by_depth_in_use++] = s;
571 }
572 
573 #if (GCC_VERSION < 3001)
574 #define prefetch(X) ((void) X)
575 #else
576 #define prefetch(X) __builtin_prefetch (X)
577 #endif
578 
579 #define save_in_use_p_i(__i) \
580   (G.save_in_use[__i])
581 #define save_in_use_p(__p) \
582   (save_in_use_p_i (__p->index_by_depth))
583 
584 /* Traverse the page table and find the entry for a page.
585    If the object wasn't allocated in GC return NULL.  */
586 
587 static inline page_entry *
588 safe_lookup_page_table_entry (const void *p)
589 {
590   page_entry ***base;
591   size_t L1, L2;
592 
593 #if HOST_BITS_PER_PTR <= 32
594   base = &G.lookup[0];
595 #else
596   page_table table = G.lookup;
597   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
598   while (1)
599     {
600       if (table == NULL)
601 	return NULL;
602       if (table->high_bits == high_bits)
603 	break;
604       table = table->next;
605     }
606   base = &table->table[0];
607 #endif
608 
609   /* Extract the level 1 and 2 indices.  */
610   L1 = LOOKUP_L1 (p);
611   L2 = LOOKUP_L2 (p);
612   if (! base[L1])
613     return NULL;
614 
615   return base[L1][L2];
616 }
617 
618 /* Traverse the page table and find the entry for a page.
619    Die (probably) if the object wasn't allocated via GC.  */
620 
621 static inline page_entry *
622 lookup_page_table_entry (const void *p)
623 {
624   page_entry ***base;
625   size_t L1, L2;
626 
627 #if HOST_BITS_PER_PTR <= 32
628   base = &G.lookup[0];
629 #else
630   page_table table = G.lookup;
631   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
632   while (table->high_bits != high_bits)
633     table = table->next;
634   base = &table->table[0];
635 #endif
636 
637   /* Extract the level 1 and 2 indices.  */
638   L1 = LOOKUP_L1 (p);
639   L2 = LOOKUP_L2 (p);
640 
641   return base[L1][L2];
642 }
643 
644 /* Set the page table entry for a page.  */
645 
646 static void
647 set_page_table_entry (void *p, page_entry *entry)
648 {
649   page_entry ***base;
650   size_t L1, L2;
651 
652 #if HOST_BITS_PER_PTR <= 32
653   base = &G.lookup[0];
654 #else
655   page_table table;
656   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
657   for (table = G.lookup; table; table = table->next)
658     if (table->high_bits == high_bits)
659       goto found;
660 
661   /* Not found -- allocate a new table.  */
662   table = XCNEW (struct page_table_chain);
663   table->next = G.lookup;
664   table->high_bits = high_bits;
665   G.lookup = table;
666 found:
667   base = &table->table[0];
668 #endif
669 
670   /* Extract the level 1 and 2 indices.  */
671   L1 = LOOKUP_L1 (p);
672   L2 = LOOKUP_L2 (p);
673 
674   if (base[L1] == NULL)
675     base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
676 
677   base[L1][L2] = entry;
678 }
679 
680 /* Prints the page-entry for object size ORDER, for debugging.  */
681 
682 DEBUG_FUNCTION void
683 debug_print_page_list (int order)
684 {
685   page_entry *p;
686   printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
687 	  (void *) G.page_tails[order]);
688   p = G.pages[order];
689   while (p != NULL)
690     {
691       printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
692 	      p->num_free_objects);
693       p = p->next;
694     }
695   printf ("NULL\n");
696   fflush (stdout);
697 }
698 
699 #ifdef USING_MMAP
700 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
701    (if non-null).  The ifdef structure here is intended to cause a
702    compile error unless exactly one of the HAVE_* is defined.  */
703 
704 static inline char *
705 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
706 {
707 #ifdef HAVE_MMAP_ANON
708   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
709 			      MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
710 #endif
711 #ifdef HAVE_MMAP_DEV_ZERO
712   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
713 			      MAP_PRIVATE, G.dev_zero_fd, 0);
714 #endif
715 
716   if (page == (char *) MAP_FAILED)
717     {
718       if (!check)
719         return NULL;
720       perror ("virtual memory exhausted");
721       exit (FATAL_EXIT_CODE);
722     }
723 
724   /* Remember that we allocated this memory.  */
725   G.bytes_mapped += size;
726 
727   /* Pretend we don't have access to the allocated pages.  We'll enable
728      access to smaller pieces of the area in ggc_internal_alloc.  Discard the
729      handle to avoid handle leak.  */
730   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
731 
732   return page;
733 }
734 #endif
735 #ifdef USING_MALLOC_PAGE_GROUPS
736 /* Compute the index for this page into the page group.  */
737 
738 static inline size_t
739 page_group_index (char *allocation, char *page)
740 {
741   return (size_t) (page - allocation) >> G.lg_pagesize;
742 }
743 
744 /* Set and clear the in_use bit for this page in the page group.  */
745 
746 static inline void
747 set_page_group_in_use (page_group *group, char *page)
748 {
749   group->in_use |= 1 << page_group_index (group->allocation, page);
750 }
751 
752 static inline void
753 clear_page_group_in_use (page_group *group, char *page)
754 {
755   group->in_use &= ~(1 << page_group_index (group->allocation, page));
756 }
757 #endif
758 
759 /* Allocate a new page for allocating objects of size 2^ORDER,
760    and return an entry for it.  The entry is not added to the
761    appropriate page_table list.  */
762 
763 static inline struct page_entry *
764 alloc_page (unsigned order)
765 {
766   struct page_entry *entry, *p, **pp;
767   char *page;
768   size_t num_objects;
769   size_t bitmap_size;
770   size_t page_entry_size;
771   size_t entry_size;
772 #ifdef USING_MALLOC_PAGE_GROUPS
773   page_group *group;
774 #endif
775 
776   num_objects = OBJECTS_PER_PAGE (order);
777   bitmap_size = BITMAP_SIZE (num_objects + 1);
778   page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
779   entry_size = num_objects * OBJECT_SIZE (order);
780   if (entry_size < G.pagesize)
781     entry_size = G.pagesize;
782   entry_size = PAGE_ALIGN (entry_size);
783 
784   entry = NULL;
785   page = NULL;
786 
787   /* Check the list of free pages for one we can use.  */
788   for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
789     if (p->bytes == entry_size)
790       break;
791 
792   if (p != NULL)
793     {
794       if (p->discarded)
795         G.bytes_mapped += p->bytes;
796       p->discarded = false;
797 
798       /* Recycle the allocated memory from this page ...  */
799       *pp = p->next;
800       page = p->page;
801 
802 #ifdef USING_MALLOC_PAGE_GROUPS
803       group = p->group;
804 #endif
805 
806       /* ... and, if possible, the page entry itself.  */
807       if (p->order == order)
808 	{
809 	  entry = p;
810 	  memset (entry, 0, page_entry_size);
811 	}
812       else
813 	free (p);
814     }
815 #ifdef USING_MMAP
816   else if (entry_size == G.pagesize)
817     {
818       /* We want just one page.  Allocate a bunch of them and put the
819 	 extras on the freelist.  (Can only do this optimization with
820 	 mmap for backing store.)  */
821       struct page_entry *e, *f = G.free_pages;
822       int i, entries = GGC_QUIRE_SIZE;
823 
824       page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
825       if (page == NULL)
826      	{
827 	  page = alloc_anon (NULL, G.pagesize, true);
828           entries = 1;
829 	}
830 
831       /* This loop counts down so that the chain will be in ascending
832 	 memory order.  */
833       for (i = entries - 1; i >= 1; i--)
834 	{
835 	  e = XCNEWVAR (struct page_entry, page_entry_size);
836 	  e->order = order;
837 	  e->bytes = G.pagesize;
838 	  e->page = page + (i << G.lg_pagesize);
839 	  e->next = f;
840 	  f = e;
841 	}
842 
843       G.free_pages = f;
844     }
845   else
846     page = alloc_anon (NULL, entry_size, true);
847 #endif
848 #ifdef USING_MALLOC_PAGE_GROUPS
849   else
850     {
851       /* Allocate a large block of memory and serve out the aligned
852 	 pages therein.  This results in much less memory wastage
853 	 than the traditional implementation of valloc.  */
854 
855       char *allocation, *a, *enda;
856       size_t alloc_size, head_slop, tail_slop;
857       int multiple_pages = (entry_size == G.pagesize);
858 
859       if (multiple_pages)
860 	alloc_size = GGC_QUIRE_SIZE * G.pagesize;
861       else
862 	alloc_size = entry_size + G.pagesize - 1;
863       allocation = XNEWVEC (char, alloc_size);
864 
865       page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize);
866       head_slop = page - allocation;
867       if (multiple_pages)
868 	tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
869       else
870 	tail_slop = alloc_size - entry_size - head_slop;
871       enda = allocation + alloc_size - tail_slop;
872 
873       /* We allocated N pages, which are likely not aligned, leaving
874 	 us with N-1 usable pages.  We plan to place the page_group
875 	 structure somewhere in the slop.  */
876       if (head_slop >= sizeof (page_group))
877 	group = (page_group *)page - 1;
878       else
879 	{
880 	  /* We magically got an aligned allocation.  Too bad, we have
881 	     to waste a page anyway.  */
882 	  if (tail_slop == 0)
883 	    {
884 	      enda -= G.pagesize;
885 	      tail_slop += G.pagesize;
886 	    }
887 	  gcc_assert (tail_slop >= sizeof (page_group));
888 	  group = (page_group *)enda;
889 	  tail_slop -= sizeof (page_group);
890 	}
891 
892       /* Remember that we allocated this memory.  */
893       group->next = G.page_groups;
894       group->allocation = allocation;
895       group->alloc_size = alloc_size;
896       group->in_use = 0;
897       G.page_groups = group;
898       G.bytes_mapped += alloc_size;
899 
900       /* If we allocated multiple pages, put the rest on the free list.  */
901       if (multiple_pages)
902 	{
903 	  struct page_entry *e, *f = G.free_pages;
904 	  for (a = enda - G.pagesize; a != page; a -= G.pagesize)
905 	    {
906 	      e = XCNEWVAR (struct page_entry, page_entry_size);
907 	      e->order = order;
908 	      e->bytes = G.pagesize;
909 	      e->page = a;
910 	      e->group = group;
911 	      e->next = f;
912 	      f = e;
913 	    }
914 	  G.free_pages = f;
915 	}
916     }
917 #endif
918 
919   if (entry == NULL)
920     entry = XCNEWVAR (struct page_entry, page_entry_size);
921 
922   entry->bytes = entry_size;
923   entry->page = page;
924   entry->context_depth = G.context_depth;
925   entry->order = order;
926   entry->num_free_objects = num_objects;
927   entry->next_bit_hint = 1;
928 
929   G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
930 
931 #ifdef USING_MALLOC_PAGE_GROUPS
932   entry->group = group;
933   set_page_group_in_use (group, page);
934 #endif
935 
936   /* Set the one-past-the-end in-use bit.  This acts as a sentry as we
937      increment the hint.  */
938   entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
939     = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
940 
941   set_page_table_entry (page, entry);
942 
943   if (GGC_DEBUG_LEVEL >= 2)
944     fprintf (G.debug_file,
945 	     "Allocating page at %p, object size=%lu, data %p-%p\n",
946 	     (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
947 	     page + entry_size - 1);
948 
949   return entry;
950 }
951 
952 /* Adjust the size of G.depth so that no index greater than the one
953    used by the top of the G.by_depth is used.  */
954 
955 static inline void
956 adjust_depth (void)
957 {
958   page_entry *top;
959 
960   if (G.by_depth_in_use)
961     {
962       top = G.by_depth[G.by_depth_in_use-1];
963 
964       /* Peel back indices in depth that index into by_depth, so that
965 	 as new elements are added to by_depth, we note the indices
966 	 of those elements, if they are for new context depths.  */
967       while (G.depth_in_use > (size_t)top->context_depth+1)
968 	--G.depth_in_use;
969     }
970 }
971 
972 /* For a page that is no longer needed, put it on the free page list.  */
973 
974 static void
975 free_page (page_entry *entry)
976 {
977   if (GGC_DEBUG_LEVEL >= 2)
978     fprintf (G.debug_file,
979 	     "Deallocating page at %p, data %p-%p\n", (void *) entry,
980 	     entry->page, entry->page + entry->bytes - 1);
981 
982   /* Mark the page as inaccessible.  Discard the handle to avoid handle
983      leak.  */
984   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
985 
986   set_page_table_entry (entry->page, NULL);
987 
988 #ifdef USING_MALLOC_PAGE_GROUPS
989   clear_page_group_in_use (entry->group, entry->page);
990 #endif
991 
992   if (G.by_depth_in_use > 1)
993     {
994       page_entry *top = G.by_depth[G.by_depth_in_use-1];
995       int i = entry->index_by_depth;
996 
997       /* We cannot free a page from a context deeper than the current
998 	 one.  */
999       gcc_assert (entry->context_depth == top->context_depth);
1000 
1001       /* Put top element into freed slot.  */
1002       G.by_depth[i] = top;
1003       G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
1004       top->index_by_depth = i;
1005     }
1006   --G.by_depth_in_use;
1007 
1008   adjust_depth ();
1009 
1010   entry->next = G.free_pages;
1011   G.free_pages = entry;
1012 }
1013 
1014 /* Release the free page cache to the system.  */
1015 
1016 static void
1017 release_pages (void)
1018 {
1019   size_t n1 = 0;
1020   size_t n2 = 0;
1021 #ifdef USING_MADVISE
1022   page_entry *p, *start_p;
1023   char *start;
1024   size_t len;
1025   size_t mapped_len;
1026   page_entry *next, *prev, *newprev;
1027   size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
1028 
1029   /* First free larger continuous areas to the OS.
1030      This allows other allocators to grab these areas if needed.
1031      This is only done on larger chunks to avoid fragmentation.
1032      This does not always work because the free_pages list is only
1033      approximately sorted. */
1034 
1035   p = G.free_pages;
1036   prev = NULL;
1037   while (p)
1038     {
1039       start = p->page;
1040       start_p = p;
1041       len = 0;
1042       mapped_len = 0;
1043       newprev = prev;
1044       while (p && p->page == start + len)
1045         {
1046           len += p->bytes;
1047 	  if (!p->discarded)
1048 	      mapped_len += p->bytes;
1049 	  newprev = p;
1050           p = p->next;
1051         }
1052       if (len >= free_unit)
1053         {
1054           while (start_p != p)
1055             {
1056               next = start_p->next;
1057               free (start_p);
1058               start_p = next;
1059             }
1060           munmap (start, len);
1061 	  if (prev)
1062 	    prev->next = p;
1063           else
1064             G.free_pages = p;
1065           G.bytes_mapped -= mapped_len;
1066 	  n1 += len;
1067 	  continue;
1068         }
1069       prev = newprev;
1070    }
1071 
1072   /* Now give back the fragmented pages to the OS, but keep the address
1073      space to reuse it next time. */
1074 
1075   for (p = G.free_pages; p; )
1076     {
1077       if (p->discarded)
1078         {
1079           p = p->next;
1080           continue;
1081         }
1082       start = p->page;
1083       len = p->bytes;
1084       start_p = p;
1085       p = p->next;
1086       while (p && p->page == start + len)
1087         {
1088           len += p->bytes;
1089           p = p->next;
1090         }
1091       /* Give the page back to the kernel, but don't free the mapping.
1092          This avoids fragmentation in the virtual memory map of the
1093  	 process. Next time we can reuse it by just touching it. */
1094       madvise (start, len, MADV_DONTNEED);
1095       /* Don't count those pages as mapped to not touch the garbage collector
1096          unnecessarily. */
1097       G.bytes_mapped -= len;
1098       n2 += len;
1099       while (start_p != p)
1100         {
1101           start_p->discarded = true;
1102           start_p = start_p->next;
1103         }
1104     }
1105 #endif
1106 #if defined(USING_MMAP) && !defined(USING_MADVISE)
1107   page_entry *p, *next;
1108   char *start;
1109   size_t len;
1110 
1111   /* Gather up adjacent pages so they are unmapped together.  */
1112   p = G.free_pages;
1113 
1114   while (p)
1115     {
1116       start = p->page;
1117       next = p->next;
1118       len = p->bytes;
1119       free (p);
1120       p = next;
1121 
1122       while (p && p->page == start + len)
1123 	{
1124 	  next = p->next;
1125 	  len += p->bytes;
1126 	  free (p);
1127 	  p = next;
1128 	}
1129 
1130       munmap (start, len);
1131       n1 += len;
1132       G.bytes_mapped -= len;
1133     }
1134 
1135   G.free_pages = NULL;
1136 #endif
1137 #ifdef USING_MALLOC_PAGE_GROUPS
1138   page_entry **pp, *p;
1139   page_group **gp, *g;
1140 
1141   /* Remove all pages from free page groups from the list.  */
1142   pp = &G.free_pages;
1143   while ((p = *pp) != NULL)
1144     if (p->group->in_use == 0)
1145       {
1146 	*pp = p->next;
1147 	free (p);
1148       }
1149     else
1150       pp = &p->next;
1151 
1152   /* Remove all free page groups, and release the storage.  */
1153   gp = &G.page_groups;
1154   while ((g = *gp) != NULL)
1155     if (g->in_use == 0)
1156       {
1157 	*gp = g->next;
1158 	G.bytes_mapped -= g->alloc_size;
1159 	n1 += g->alloc_size;
1160 	free (g->allocation);
1161       }
1162     else
1163       gp = &g->next;
1164 #endif
1165   if (!quiet_flag && (n1 || n2))
1166     {
1167       fprintf (stderr, " {GC");
1168       if (n1)
1169 	fprintf (stderr, " released %luk", (unsigned long)(n1 / 1024));
1170       if (n2)
1171 	fprintf (stderr, " madv_dontneed %luk", (unsigned long)(n2 / 1024));
1172       fprintf (stderr, "}");
1173     }
1174 }
1175 
1176 /* This table provides a fast way to determine ceil(log_2(size)) for
1177    allocation requests.  The minimum allocation size is eight bytes.  */
1178 #define NUM_SIZE_LOOKUP 512
1179 static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1180 {
1181   3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1182   4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1183   5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1184   6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1185   6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1186   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1187   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1188   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1189   7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1190   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1191   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1192   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1193   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1194   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1195   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1196   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1197   8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1198   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1199   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1200   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1201   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1202   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1203   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1204   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1205   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1206   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1207   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1208   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1209   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1210   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1211   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1212   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1213 };
1214 
1215 /* For a given size of memory requested for allocation, return the
1216    actual size that is going to be allocated, as well as the size
1217    order.  */
1218 
1219 static void
1220 ggc_round_alloc_size_1 (size_t requested_size,
1221 			size_t *size_order,
1222 			size_t *alloced_size)
1223 {
1224   size_t order, object_size;
1225 
1226   if (requested_size < NUM_SIZE_LOOKUP)
1227     {
1228       order = size_lookup[requested_size];
1229       object_size = OBJECT_SIZE (order);
1230     }
1231   else
1232     {
1233       order = 10;
1234       while (requested_size > (object_size = OBJECT_SIZE (order)))
1235         order++;
1236     }
1237 
1238   if (size_order)
1239     *size_order = order;
1240   if (alloced_size)
1241     *alloced_size = object_size;
1242 }
1243 
1244 /* For a given size of memory requested for allocation, return the
1245    actual size that is going to be allocated.  */
1246 
1247 size_t
1248 ggc_round_alloc_size (size_t requested_size)
1249 {
1250   size_t size = 0;
1251 
1252   ggc_round_alloc_size_1 (requested_size, NULL, &size);
1253   return size;
1254 }
1255 
1256 /* Push a finalizer onto the appropriate vec.  */
1257 
1258 static void
1259 add_finalizer (void *result, void (*f)(void *), size_t s, size_t n)
1260 {
1261   if (f == NULL)
1262     /* No finalizer.  */;
1263   else if (n == 1)
1264     {
1265       finalizer fin (result, f);
1266       G.finalizers[G.context_depth].safe_push (fin);
1267     }
1268   else
1269     {
1270       vec_finalizer fin (reinterpret_cast<uintptr_t> (result), f, s, n);
1271       G.vec_finalizers[G.context_depth].safe_push (fin);
1272     }
1273 }
1274 
1275 /* Allocate a chunk of memory of SIZE bytes.  Its contents are undefined.  */
1276 
1277 void *
1278 ggc_internal_alloc (size_t size, void (*f)(void *), size_t s, size_t n
1279 		    MEM_STAT_DECL)
1280 {
1281   size_t order, word, bit, object_offset, object_size;
1282   struct page_entry *entry;
1283   void *result;
1284 
1285   ggc_round_alloc_size_1 (size, &order, &object_size);
1286 
1287   /* If there are non-full pages for this size allocation, they are at
1288      the head of the list.  */
1289   entry = G.pages[order];
1290 
1291   /* If there is no page for this object size, or all pages in this
1292      context are full, allocate a new page.  */
1293   if (entry == NULL || entry->num_free_objects == 0)
1294     {
1295       struct page_entry *new_entry;
1296       new_entry = alloc_page (order);
1297 
1298       new_entry->index_by_depth = G.by_depth_in_use;
1299       push_by_depth (new_entry, 0);
1300 
1301       /* We can skip context depths, if we do, make sure we go all the
1302 	 way to the new depth.  */
1303       while (new_entry->context_depth >= G.depth_in_use)
1304 	push_depth (G.by_depth_in_use-1);
1305 
1306       /* If this is the only entry, it's also the tail.  If it is not
1307 	 the only entry, then we must update the PREV pointer of the
1308 	 ENTRY (G.pages[order]) to point to our new page entry.  */
1309       if (entry == NULL)
1310 	G.page_tails[order] = new_entry;
1311       else
1312 	entry->prev = new_entry;
1313 
1314       /* Put new pages at the head of the page list.  By definition the
1315 	 entry at the head of the list always has a NULL pointer.  */
1316       new_entry->next = entry;
1317       new_entry->prev = NULL;
1318       entry = new_entry;
1319       G.pages[order] = new_entry;
1320 
1321       /* For a new page, we know the word and bit positions (in the
1322 	 in_use bitmap) of the first available object -- they're zero.  */
1323       new_entry->next_bit_hint = 1;
1324       word = 0;
1325       bit = 0;
1326       object_offset = 0;
1327     }
1328   else
1329     {
1330       /* First try to use the hint left from the previous allocation
1331 	 to locate a clear bit in the in-use bitmap.  We've made sure
1332 	 that the one-past-the-end bit is always set, so if the hint
1333 	 has run over, this test will fail.  */
1334       unsigned hint = entry->next_bit_hint;
1335       word = hint / HOST_BITS_PER_LONG;
1336       bit = hint % HOST_BITS_PER_LONG;
1337 
1338       /* If the hint didn't work, scan the bitmap from the beginning.  */
1339       if ((entry->in_use_p[word] >> bit) & 1)
1340 	{
1341 	  word = bit = 0;
1342 	  while (~entry->in_use_p[word] == 0)
1343 	    ++word;
1344 
1345 #if GCC_VERSION >= 3004
1346 	  bit = __builtin_ctzl (~entry->in_use_p[word]);
1347 #else
1348 	  while ((entry->in_use_p[word] >> bit) & 1)
1349 	    ++bit;
1350 #endif
1351 
1352 	  hint = word * HOST_BITS_PER_LONG + bit;
1353 	}
1354 
1355       /* Next time, try the next bit.  */
1356       entry->next_bit_hint = hint + 1;
1357 
1358       object_offset = hint * object_size;
1359     }
1360 
1361   /* Set the in-use bit.  */
1362   entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1363 
1364   /* Keep a running total of the number of free objects.  If this page
1365      fills up, we may have to move it to the end of the list if the
1366      next page isn't full.  If the next page is full, all subsequent
1367      pages are full, so there's no need to move it.  */
1368   if (--entry->num_free_objects == 0
1369       && entry->next != NULL
1370       && entry->next->num_free_objects > 0)
1371     {
1372       /* We have a new head for the list.  */
1373       G.pages[order] = entry->next;
1374 
1375       /* We are moving ENTRY to the end of the page table list.
1376 	 The new page at the head of the list will have NULL in
1377 	 its PREV field and ENTRY will have NULL in its NEXT field.  */
1378       entry->next->prev = NULL;
1379       entry->next = NULL;
1380 
1381       /* Append ENTRY to the tail of the list.  */
1382       entry->prev = G.page_tails[order];
1383       G.page_tails[order]->next = entry;
1384       G.page_tails[order] = entry;
1385     }
1386 
1387   /* Calculate the object's address.  */
1388   result = entry->page + object_offset;
1389   if (GATHER_STATISTICS)
1390     ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1391 			 result FINAL_PASS_MEM_STAT);
1392 
1393 #ifdef ENABLE_GC_CHECKING
1394   /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1395      exact same semantics in presence of memory bugs, regardless of
1396      ENABLE_VALGRIND_CHECKING.  We override this request below.  Drop the
1397      handle to avoid handle leak.  */
1398   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1399 
1400   /* `Poison' the entire allocated object, including any padding at
1401      the end.  */
1402   memset (result, 0xaf, object_size);
1403 
1404   /* Make the bytes after the end of the object unaccessible.  Discard the
1405      handle to avoid handle leak.  */
1406   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1407 						object_size - size));
1408 #endif
1409 
1410   /* Tell Valgrind that the memory is there, but its content isn't
1411      defined.  The bytes at the end of the object are still marked
1412      unaccessible.  */
1413   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1414 
1415   /* Keep track of how many bytes are being allocated.  This
1416      information is used in deciding when to collect.  */
1417   G.allocated += object_size;
1418 
1419   /* For timevar statistics.  */
1420   timevar_ggc_mem_total += object_size;
1421 
1422   if (f)
1423     add_finalizer (result, f, s, n);
1424 
1425   if (GATHER_STATISTICS)
1426     {
1427       size_t overhead = object_size - size;
1428 
1429       G.stats.total_overhead += overhead;
1430       G.stats.total_allocated += object_size;
1431       G.stats.total_overhead_per_order[order] += overhead;
1432       G.stats.total_allocated_per_order[order] += object_size;
1433 
1434       if (size <= 32)
1435 	{
1436 	  G.stats.total_overhead_under32 += overhead;
1437 	  G.stats.total_allocated_under32 += object_size;
1438 	}
1439       if (size <= 64)
1440 	{
1441 	  G.stats.total_overhead_under64 += overhead;
1442 	  G.stats.total_allocated_under64 += object_size;
1443 	}
1444       if (size <= 128)
1445 	{
1446 	  G.stats.total_overhead_under128 += overhead;
1447 	  G.stats.total_allocated_under128 += object_size;
1448 	}
1449     }
1450 
1451   if (GGC_DEBUG_LEVEL >= 3)
1452     fprintf (G.debug_file,
1453 	     "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1454 	     (unsigned long) size, (unsigned long) object_size, result,
1455 	     (void *) entry);
1456 
1457   return result;
1458 }
1459 
1460 /* Mark function for strings.  */
1461 
1462 void
1463 gt_ggc_m_S (const void *p)
1464 {
1465   page_entry *entry;
1466   unsigned bit, word;
1467   unsigned long mask;
1468   unsigned long offset;
1469 
1470   if (!p)
1471     return;
1472 
1473   /* Look up the page on which the object is alloced.  If it was not
1474      GC allocated, gracefully bail out.  */
1475   entry = safe_lookup_page_table_entry (p);
1476   if (!entry)
1477     return;
1478 
1479   /* Calculate the index of the object on the page; this is its bit
1480      position in the in_use_p bitmap.  Note that because a char* might
1481      point to the middle of an object, we need special code here to
1482      make sure P points to the start of an object.  */
1483   offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1484   if (offset)
1485     {
1486       /* Here we've seen a char* which does not point to the beginning
1487 	 of an allocated object.  We assume it points to the middle of
1488 	 a STRING_CST.  */
1489       gcc_assert (offset == offsetof (struct tree_string, str));
1490       p = ((const char *) p) - offset;
1491       gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1492       return;
1493     }
1494 
1495   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1496   word = bit / HOST_BITS_PER_LONG;
1497   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1498 
1499   /* If the bit was previously set, skip it.  */
1500   if (entry->in_use_p[word] & mask)
1501     return;
1502 
1503   /* Otherwise set it, and decrement the free object count.  */
1504   entry->in_use_p[word] |= mask;
1505   entry->num_free_objects -= 1;
1506 
1507   if (GGC_DEBUG_LEVEL >= 4)
1508     fprintf (G.debug_file, "Marking %p\n", p);
1509 
1510   return;
1511 }
1512 
1513 
1514 /* User-callable entry points for marking string X.  */
1515 
1516 void
1517 gt_ggc_mx (const char *& x)
1518 {
1519   gt_ggc_m_S (x);
1520 }
1521 
1522 void
1523 gt_ggc_mx (unsigned char *& x)
1524 {
1525   gt_ggc_m_S (x);
1526 }
1527 
1528 void
1529 gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED)
1530 {
1531 }
1532 
1533 /* If P is not marked, marks it and return false.  Otherwise return true.
1534    P must have been allocated by the GC allocator; it mustn't point to
1535    static objects, stack variables, or memory allocated with malloc.  */
1536 
1537 int
1538 ggc_set_mark (const void *p)
1539 {
1540   page_entry *entry;
1541   unsigned bit, word;
1542   unsigned long mask;
1543 
1544   /* Look up the page on which the object is alloced.  If the object
1545      wasn't allocated by the collector, we'll probably die.  */
1546   entry = lookup_page_table_entry (p);
1547   gcc_assert (entry);
1548 
1549   /* Calculate the index of the object on the page; this is its bit
1550      position in the in_use_p bitmap.  */
1551   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1552   word = bit / HOST_BITS_PER_LONG;
1553   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1554 
1555   /* If the bit was previously set, skip it.  */
1556   if (entry->in_use_p[word] & mask)
1557     return 1;
1558 
1559   /* Otherwise set it, and decrement the free object count.  */
1560   entry->in_use_p[word] |= mask;
1561   entry->num_free_objects -= 1;
1562 
1563   if (GGC_DEBUG_LEVEL >= 4)
1564     fprintf (G.debug_file, "Marking %p\n", p);
1565 
1566   return 0;
1567 }
1568 
1569 /* Return 1 if P has been marked, zero otherwise.
1570    P must have been allocated by the GC allocator; it mustn't point to
1571    static objects, stack variables, or memory allocated with malloc.  */
1572 
1573 int
1574 ggc_marked_p (const void *p)
1575 {
1576   page_entry *entry;
1577   unsigned bit, word;
1578   unsigned long mask;
1579 
1580   /* Look up the page on which the object is alloced.  If the object
1581      wasn't allocated by the collector, we'll probably die.  */
1582   entry = lookup_page_table_entry (p);
1583   gcc_assert (entry);
1584 
1585   /* Calculate the index of the object on the page; this is its bit
1586      position in the in_use_p bitmap.  */
1587   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1588   word = bit / HOST_BITS_PER_LONG;
1589   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1590 
1591   return (entry->in_use_p[word] & mask) != 0;
1592 }
1593 
1594 /* Return the size of the gc-able object P.  */
1595 
1596 size_t
1597 ggc_get_size (const void *p)
1598 {
1599   page_entry *pe = lookup_page_table_entry (p);
1600   return OBJECT_SIZE (pe->order);
1601 }
1602 
1603 /* Release the memory for object P.  */
1604 
1605 void
1606 ggc_free (void *p)
1607 {
1608   if (in_gc)
1609     return;
1610 
1611   page_entry *pe = lookup_page_table_entry (p);
1612   size_t order = pe->order;
1613   size_t size = OBJECT_SIZE (order);
1614 
1615   if (GATHER_STATISTICS)
1616     ggc_free_overhead (p);
1617 
1618   if (GGC_DEBUG_LEVEL >= 3)
1619     fprintf (G.debug_file,
1620 	     "Freeing object, actual size=%lu, at %p on %p\n",
1621 	     (unsigned long) size, p, (void *) pe);
1622 
1623 #ifdef ENABLE_GC_CHECKING
1624   /* Poison the data, to indicate the data is garbage.  */
1625   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1626   memset (p, 0xa5, size);
1627 #endif
1628   /* Let valgrind know the object is free.  */
1629   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1630 
1631 #ifdef ENABLE_GC_ALWAYS_COLLECT
1632   /* In the completely-anal-checking mode, we do *not* immediately free
1633      the data, but instead verify that the data is *actually* not
1634      reachable the next time we collect.  */
1635   {
1636     struct free_object *fo = XNEW (struct free_object);
1637     fo->object = p;
1638     fo->next = G.free_object_list;
1639     G.free_object_list = fo;
1640   }
1641 #else
1642   {
1643     unsigned int bit_offset, word, bit;
1644 
1645     G.allocated -= size;
1646 
1647     /* Mark the object not-in-use.  */
1648     bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1649     word = bit_offset / HOST_BITS_PER_LONG;
1650     bit = bit_offset % HOST_BITS_PER_LONG;
1651     pe->in_use_p[word] &= ~(1UL << bit);
1652 
1653     if (pe->num_free_objects++ == 0)
1654       {
1655 	page_entry *p, *q;
1656 
1657 	/* If the page is completely full, then it's supposed to
1658 	   be after all pages that aren't.  Since we've freed one
1659 	   object from a page that was full, we need to move the
1660 	   page to the head of the list.
1661 
1662 	   PE is the node we want to move.  Q is the previous node
1663 	   and P is the next node in the list.  */
1664 	q = pe->prev;
1665 	if (q && q->num_free_objects == 0)
1666 	  {
1667 	    p = pe->next;
1668 
1669 	    q->next = p;
1670 
1671 	    /* If PE was at the end of the list, then Q becomes the
1672 	       new end of the list.  If PE was not the end of the
1673 	       list, then we need to update the PREV field for P.  */
1674 	    if (!p)
1675 	      G.page_tails[order] = q;
1676 	    else
1677 	      p->prev = q;
1678 
1679 	    /* Move PE to the head of the list.  */
1680 	    pe->next = G.pages[order];
1681 	    pe->prev = NULL;
1682 	    G.pages[order]->prev = pe;
1683 	    G.pages[order] = pe;
1684 	  }
1685 
1686 	/* Reset the hint bit to point to the only free object.  */
1687 	pe->next_bit_hint = bit_offset;
1688       }
1689   }
1690 #endif
1691 }
1692 
1693 /* Subroutine of init_ggc which computes the pair of numbers used to
1694    perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1695 
1696    This algorithm is taken from Granlund and Montgomery's paper
1697    "Division by Invariant Integers using Multiplication"
1698    (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1699    constants).  */
1700 
1701 static void
1702 compute_inverse (unsigned order)
1703 {
1704   size_t size, inv;
1705   unsigned int e;
1706 
1707   size = OBJECT_SIZE (order);
1708   e = 0;
1709   while (size % 2 == 0)
1710     {
1711       e++;
1712       size >>= 1;
1713     }
1714 
1715   inv = size;
1716   while (inv * size != 1)
1717     inv = inv * (2 - inv*size);
1718 
1719   DIV_MULT (order) = inv;
1720   DIV_SHIFT (order) = e;
1721 }
1722 
1723 /* Initialize the ggc-mmap allocator.  */
1724 void
1725 init_ggc (void)
1726 {
1727   static bool init_p = false;
1728   unsigned order;
1729 
1730   if (init_p)
1731     return;
1732   init_p = true;
1733 
1734   G.pagesize = getpagesize ();
1735   G.lg_pagesize = exact_log2 (G.pagesize);
1736 
1737 #ifdef HAVE_MMAP_DEV_ZERO
1738   G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1739   if (G.dev_zero_fd == -1)
1740     internal_error ("open /dev/zero: %m");
1741 #endif
1742 
1743 #if 0
1744   G.debug_file = fopen ("ggc-mmap.debug", "w");
1745 #else
1746   G.debug_file = stdout;
1747 #endif
1748 
1749 #ifdef USING_MMAP
1750   /* StunOS has an amazing off-by-one error for the first mmap allocation
1751      after fiddling with RLIMIT_STACK.  The result, as hard as it is to
1752      believe, is an unaligned page allocation, which would cause us to
1753      hork badly if we tried to use it.  */
1754   {
1755     char *p = alloc_anon (NULL, G.pagesize, true);
1756     struct page_entry *e;
1757     if ((uintptr_t)p & (G.pagesize - 1))
1758       {
1759 	/* How losing.  Discard this one and try another.  If we still
1760 	   can't get something useful, give up.  */
1761 
1762 	p = alloc_anon (NULL, G.pagesize, true);
1763 	gcc_assert (!((uintptr_t)p & (G.pagesize - 1)));
1764       }
1765 
1766     /* We have a good page, might as well hold onto it...  */
1767     e = XCNEW (struct page_entry);
1768     e->bytes = G.pagesize;
1769     e->page = p;
1770     e->next = G.free_pages;
1771     G.free_pages = e;
1772   }
1773 #endif
1774 
1775   /* Initialize the object size table.  */
1776   for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1777     object_size_table[order] = (size_t) 1 << order;
1778   for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1779     {
1780       size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1781 
1782       /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1783 	 so that we're sure of getting aligned memory.  */
1784       s = ROUND_UP (s, MAX_ALIGNMENT);
1785       object_size_table[order] = s;
1786     }
1787 
1788   /* Initialize the objects-per-page and inverse tables.  */
1789   for (order = 0; order < NUM_ORDERS; ++order)
1790     {
1791       objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1792       if (objects_per_page_table[order] == 0)
1793 	objects_per_page_table[order] = 1;
1794       compute_inverse (order);
1795     }
1796 
1797   /* Reset the size_lookup array to put appropriately sized objects in
1798      the special orders.  All objects bigger than the previous power
1799      of two, but no greater than the special size, should go in the
1800      new order.  */
1801   for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1802     {
1803       int o;
1804       int i;
1805 
1806       i = OBJECT_SIZE (order);
1807       if (i >= NUM_SIZE_LOOKUP)
1808 	continue;
1809 
1810       for (o = size_lookup[i]; o == size_lookup [i]; --i)
1811 	size_lookup[i] = order;
1812     }
1813 
1814   G.depth_in_use = 0;
1815   G.depth_max = 10;
1816   G.depth = XNEWVEC (unsigned int, G.depth_max);
1817 
1818   G.by_depth_in_use = 0;
1819   G.by_depth_max = INITIAL_PTE_COUNT;
1820   G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1821   G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1822 
1823   /* Allocate space for the depth 0 finalizers.  */
1824   G.finalizers.safe_push (vNULL);
1825   G.vec_finalizers.safe_push (vNULL);
1826   gcc_assert (G.finalizers.length() == 1);
1827 }
1828 
1829 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1830    reflects reality.  Recalculate NUM_FREE_OBJECTS as well.  */
1831 
1832 static void
1833 ggc_recalculate_in_use_p (page_entry *p)
1834 {
1835   unsigned int i;
1836   size_t num_objects;
1837 
1838   /* Because the past-the-end bit in in_use_p is always set, we
1839      pretend there is one additional object.  */
1840   num_objects = OBJECTS_IN_PAGE (p) + 1;
1841 
1842   /* Reset the free object count.  */
1843   p->num_free_objects = num_objects;
1844 
1845   /* Combine the IN_USE_P and SAVE_IN_USE_P arrays.  */
1846   for (i = 0;
1847        i < CEIL (BITMAP_SIZE (num_objects),
1848 		 sizeof (*p->in_use_p));
1849        ++i)
1850     {
1851       unsigned long j;
1852 
1853       /* Something is in use if it is marked, or if it was in use in a
1854 	 context further down the context stack.  */
1855       p->in_use_p[i] |= save_in_use_p (p)[i];
1856 
1857       /* Decrement the free object count for every object allocated.  */
1858       for (j = p->in_use_p[i]; j; j >>= 1)
1859 	p->num_free_objects -= (j & 1);
1860     }
1861 
1862   gcc_assert (p->num_free_objects < num_objects);
1863 }
1864 
1865 /* Unmark all objects.  */
1866 
1867 static void
1868 clear_marks (void)
1869 {
1870   unsigned order;
1871 
1872   for (order = 2; order < NUM_ORDERS; order++)
1873     {
1874       page_entry *p;
1875 
1876       for (p = G.pages[order]; p != NULL; p = p->next)
1877 	{
1878 	  size_t num_objects = OBJECTS_IN_PAGE (p);
1879 	  size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1880 
1881 	  /* The data should be page-aligned.  */
1882 	  gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1)));
1883 
1884 	  /* Pages that aren't in the topmost context are not collected;
1885 	     nevertheless, we need their in-use bit vectors to store GC
1886 	     marks.  So, back them up first.  */
1887 	  if (p->context_depth < G.context_depth)
1888 	    {
1889 	      if (! save_in_use_p (p))
1890 		save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1891 	      memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1892 	    }
1893 
1894 	  /* Reset reset the number of free objects and clear the
1895              in-use bits.  These will be adjusted by mark_obj.  */
1896 	  p->num_free_objects = num_objects;
1897 	  memset (p->in_use_p, 0, bitmap_size);
1898 
1899 	  /* Make sure the one-past-the-end bit is always set.  */
1900 	  p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1901 	    = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1902 	}
1903     }
1904 }
1905 
1906 /* Check if any blocks with a registered finalizer have become unmarked. If so
1907    run the finalizer and unregister it because the block is about to be freed.
1908    Note that no garantee is made about what order finalizers will run in so
1909    touching other objects in gc memory is extremely unwise.  */
1910 
1911 static void
1912 ggc_handle_finalizers ()
1913 {
1914   unsigned dlen = G.finalizers.length();
1915   for (unsigned d = G.context_depth; d < dlen; ++d)
1916     {
1917       vec<finalizer> &v = G.finalizers[d];
1918       unsigned length = v.length ();
1919       for (unsigned int i = 0; i < length;)
1920 	{
1921 	  finalizer &f = v[i];
1922 	  if (!ggc_marked_p (f.addr ()))
1923 	    {
1924 	      f.call ();
1925 	      v.unordered_remove (i);
1926 	      length--;
1927 	    }
1928 	  else
1929 	    i++;
1930 	}
1931     }
1932 
1933   gcc_assert (dlen == G.vec_finalizers.length());
1934   for (unsigned d = G.context_depth; d < dlen; ++d)
1935     {
1936       vec<vec_finalizer> &vv = G.vec_finalizers[d];
1937       unsigned length = vv.length ();
1938       for (unsigned int i = 0; i < length;)
1939 	{
1940 	  vec_finalizer &f = vv[i];
1941 	  if (!ggc_marked_p (f.addr ()))
1942 	    {
1943 	      f.call ();
1944 	      vv.unordered_remove (i);
1945 	      length--;
1946 	    }
1947 	  else
1948 	    i++;
1949 	}
1950     }
1951 }
1952 
1953 /* Free all empty pages.  Partially empty pages need no attention
1954    because the `mark' bit doubles as an `unused' bit.  */
1955 
1956 static void
1957 sweep_pages (void)
1958 {
1959   unsigned order;
1960 
1961   for (order = 2; order < NUM_ORDERS; order++)
1962     {
1963       /* The last page-entry to consider, regardless of entries
1964 	 placed at the end of the list.  */
1965       page_entry * const last = G.page_tails[order];
1966 
1967       size_t num_objects;
1968       size_t live_objects;
1969       page_entry *p, *previous;
1970       int done;
1971 
1972       p = G.pages[order];
1973       if (p == NULL)
1974 	continue;
1975 
1976       previous = NULL;
1977       do
1978 	{
1979 	  page_entry *next = p->next;
1980 
1981 	  /* Loop until all entries have been examined.  */
1982 	  done = (p == last);
1983 
1984 	  num_objects = OBJECTS_IN_PAGE (p);
1985 
1986 	  /* Add all live objects on this page to the count of
1987              allocated memory.  */
1988 	  live_objects = num_objects - p->num_free_objects;
1989 
1990 	  G.allocated += OBJECT_SIZE (order) * live_objects;
1991 
1992 	  /* Only objects on pages in the topmost context should get
1993 	     collected.  */
1994 	  if (p->context_depth < G.context_depth)
1995 	    ;
1996 
1997 	  /* Remove the page if it's empty.  */
1998 	  else if (live_objects == 0)
1999 	    {
2000 	      /* If P was the first page in the list, then NEXT
2001 		 becomes the new first page in the list, otherwise
2002 		 splice P out of the forward pointers.  */
2003 	      if (! previous)
2004 		G.pages[order] = next;
2005 	      else
2006 		previous->next = next;
2007 
2008 	      /* Splice P out of the back pointers too.  */
2009 	      if (next)
2010 		next->prev = previous;
2011 
2012 	      /* Are we removing the last element?  */
2013 	      if (p == G.page_tails[order])
2014 		G.page_tails[order] = previous;
2015 	      free_page (p);
2016 	      p = previous;
2017 	    }
2018 
2019 	  /* If the page is full, move it to the end.  */
2020 	  else if (p->num_free_objects == 0)
2021 	    {
2022 	      /* Don't move it if it's already at the end.  */
2023 	      if (p != G.page_tails[order])
2024 		{
2025 		  /* Move p to the end of the list.  */
2026 		  p->next = NULL;
2027 		  p->prev = G.page_tails[order];
2028 		  G.page_tails[order]->next = p;
2029 
2030 		  /* Update the tail pointer...  */
2031 		  G.page_tails[order] = p;
2032 
2033 		  /* ... and the head pointer, if necessary.  */
2034 		  if (! previous)
2035 		    G.pages[order] = next;
2036 		  else
2037 		    previous->next = next;
2038 
2039 		  /* And update the backpointer in NEXT if necessary.  */
2040 		  if (next)
2041 		    next->prev = previous;
2042 
2043 		  p = previous;
2044 		}
2045 	    }
2046 
2047 	  /* If we've fallen through to here, it's a page in the
2048 	     topmost context that is neither full nor empty.  Such a
2049 	     page must precede pages at lesser context depth in the
2050 	     list, so move it to the head.  */
2051 	  else if (p != G.pages[order])
2052 	    {
2053 	      previous->next = p->next;
2054 
2055 	      /* Update the backchain in the next node if it exists.  */
2056 	      if (p->next)
2057 		p->next->prev = previous;
2058 
2059 	      /* Move P to the head of the list.  */
2060 	      p->next = G.pages[order];
2061 	      p->prev = NULL;
2062 	      G.pages[order]->prev = p;
2063 
2064 	      /* Update the head pointer.  */
2065 	      G.pages[order] = p;
2066 
2067 	      /* Are we moving the last element?  */
2068 	      if (G.page_tails[order] == p)
2069 	        G.page_tails[order] = previous;
2070 	      p = previous;
2071 	    }
2072 
2073 	  previous = p;
2074 	  p = next;
2075 	}
2076       while (! done);
2077 
2078       /* Now, restore the in_use_p vectors for any pages from contexts
2079          other than the current one.  */
2080       for (p = G.pages[order]; p; p = p->next)
2081 	if (p->context_depth != G.context_depth)
2082 	  ggc_recalculate_in_use_p (p);
2083     }
2084 }
2085 
2086 #ifdef ENABLE_GC_CHECKING
2087 /* Clobber all free objects.  */
2088 
2089 static void
2090 poison_pages (void)
2091 {
2092   unsigned order;
2093 
2094   for (order = 2; order < NUM_ORDERS; order++)
2095     {
2096       size_t size = OBJECT_SIZE (order);
2097       page_entry *p;
2098 
2099       for (p = G.pages[order]; p != NULL; p = p->next)
2100 	{
2101 	  size_t num_objects;
2102 	  size_t i;
2103 
2104 	  if (p->context_depth != G.context_depth)
2105 	    /* Since we don't do any collection for pages in pushed
2106 	       contexts, there's no need to do any poisoning.  And
2107 	       besides, the IN_USE_P array isn't valid until we pop
2108 	       contexts.  */
2109 	    continue;
2110 
2111 	  num_objects = OBJECTS_IN_PAGE (p);
2112 	  for (i = 0; i < num_objects; i++)
2113 	    {
2114 	      size_t word, bit;
2115 	      word = i / HOST_BITS_PER_LONG;
2116 	      bit = i % HOST_BITS_PER_LONG;
2117 	      if (((p->in_use_p[word] >> bit) & 1) == 0)
2118 		{
2119 		  char *object = p->page + i * size;
2120 
2121 		  /* Keep poison-by-write when we expect to use Valgrind,
2122 		     so the exact same memory semantics is kept, in case
2123 		     there are memory errors.  We override this request
2124 		     below.  */
2125 		  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
2126 								 size));
2127 		  memset (object, 0xa5, size);
2128 
2129 		  /* Drop the handle to avoid handle leak.  */
2130 		  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
2131 		}
2132 	    }
2133 	}
2134     }
2135 }
2136 #else
2137 #define poison_pages()
2138 #endif
2139 
2140 #ifdef ENABLE_GC_ALWAYS_COLLECT
2141 /* Validate that the reportedly free objects actually are.  */
2142 
2143 static void
2144 validate_free_objects (void)
2145 {
2146   struct free_object *f, *next, *still_free = NULL;
2147 
2148   for (f = G.free_object_list; f ; f = next)
2149     {
2150       page_entry *pe = lookup_page_table_entry (f->object);
2151       size_t bit, word;
2152 
2153       bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
2154       word = bit / HOST_BITS_PER_LONG;
2155       bit = bit % HOST_BITS_PER_LONG;
2156       next = f->next;
2157 
2158       /* Make certain it isn't visible from any root.  Notice that we
2159 	 do this check before sweep_pages merges save_in_use_p.  */
2160       gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
2161 
2162       /* If the object comes from an outer context, then retain the
2163 	 free_object entry, so that we can verify that the address
2164 	 isn't live on the stack in some outer context.  */
2165       if (pe->context_depth != G.context_depth)
2166 	{
2167 	  f->next = still_free;
2168 	  still_free = f;
2169 	}
2170       else
2171 	free (f);
2172     }
2173 
2174   G.free_object_list = still_free;
2175 }
2176 #else
2177 #define validate_free_objects()
2178 #endif
2179 
2180 /* Top level mark-and-sweep routine.  */
2181 
2182 void
2183 ggc_collect (void)
2184 {
2185   /* Avoid frequent unnecessary work by skipping collection if the
2186      total allocations haven't expanded much since the last
2187      collection.  */
2188   float allocated_last_gc =
2189     MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
2190 
2191   float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
2192   if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
2193     return;
2194 
2195   timevar_push (TV_GC);
2196   if (GGC_DEBUG_LEVEL >= 2)
2197     fprintf (G.debug_file, "BEGIN COLLECTING\n");
2198 
2199   /* Zero the total allocated bytes.  This will be recalculated in the
2200      sweep phase.  */
2201   size_t allocated = G.allocated;
2202   G.allocated = 0;
2203 
2204   /* Release the pages we freed the last time we collected, but didn't
2205      reuse in the interim.  */
2206   release_pages ();
2207 
2208   /* Output this later so we do not interfere with release_pages.  */
2209   if (!quiet_flag)
2210     fprintf (stderr, " {GC %luk -> ", (unsigned long) allocated / 1024);
2211 
2212   /* Indicate that we've seen collections at this context depth.  */
2213   G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
2214 
2215   invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2216 
2217   in_gc = true;
2218   clear_marks ();
2219   ggc_mark_roots ();
2220   ggc_handle_finalizers ();
2221 
2222   if (GATHER_STATISTICS)
2223     ggc_prune_overhead_list ();
2224 
2225   poison_pages ();
2226   validate_free_objects ();
2227   sweep_pages ();
2228 
2229   in_gc = false;
2230   G.allocated_last_gc = G.allocated;
2231 
2232   invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2233 
2234   timevar_pop (TV_GC);
2235 
2236   if (!quiet_flag)
2237     fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
2238   if (GGC_DEBUG_LEVEL >= 2)
2239     fprintf (G.debug_file, "END COLLECTING\n");
2240 }
2241 
2242 /* Return free pages to the system.  */
2243 
2244 void
2245 ggc_trim ()
2246 {
2247   timevar_push (TV_GC);
2248   G.allocated = 0;
2249   sweep_pages ();
2250   release_pages ();
2251   if (!quiet_flag)
2252     fprintf (stderr, " {GC trimmed to %luk, %luk mapped}",
2253 	     (unsigned long) G.allocated / 1024,
2254 	     (unsigned long) G.bytes_mapped / 1024);
2255   timevar_pop (TV_GC);
2256 }
2257 
2258 /* Assume that all GGC memory is reachable and grow the limits for next
2259    collection.  With checking, trigger GGC so -Q compilation outputs how much
2260    of memory really is reachable.  */
2261 
2262 void
2263 ggc_grow (void)
2264 {
2265   if (!flag_checking)
2266     G.allocated_last_gc = MAX (G.allocated_last_gc,
2267 			       G.allocated);
2268   else
2269     ggc_collect ();
2270   if (!quiet_flag)
2271     fprintf (stderr, " {GC start %luk} ", (unsigned long) G.allocated / 1024);
2272 }
2273 
2274 void
2275 ggc_print_statistics (void)
2276 {
2277   struct ggc_statistics stats;
2278   unsigned int i;
2279   size_t total_overhead = 0;
2280 
2281   /* Clear the statistics.  */
2282   memset (&stats, 0, sizeof (stats));
2283 
2284   /* Make sure collection will really occur.  */
2285   G.allocated_last_gc = 0;
2286 
2287   /* Collect and print the statistics common across collectors.  */
2288   ggc_print_common_statistics (stderr, &stats);
2289 
2290   /* Release free pages so that we will not count the bytes allocated
2291      there as part of the total allocated memory.  */
2292   release_pages ();
2293 
2294   /* Collect some information about the various sizes of
2295      allocation.  */
2296   fprintf (stderr,
2297            "Memory still allocated at the end of the compilation process\n");
2298   fprintf (stderr, "%-8s %10s  %10s  %10s\n",
2299 	   "Size", "Allocated", "Used", "Overhead");
2300   for (i = 0; i < NUM_ORDERS; ++i)
2301     {
2302       page_entry *p;
2303       size_t allocated;
2304       size_t in_use;
2305       size_t overhead;
2306 
2307       /* Skip empty entries.  */
2308       if (!G.pages[i])
2309 	continue;
2310 
2311       overhead = allocated = in_use = 0;
2312 
2313       /* Figure out the total number of bytes allocated for objects of
2314 	 this size, and how many of them are actually in use.  Also figure
2315 	 out how much memory the page table is using.  */
2316       for (p = G.pages[i]; p; p = p->next)
2317 	{
2318 	  allocated += p->bytes;
2319 	  in_use +=
2320 	    (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2321 
2322 	  overhead += (sizeof (page_entry) - sizeof (long)
2323 		       + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2324 	}
2325       fprintf (stderr, "%-8" PRIu64 " " PRsa (10) " " PRsa (10) " "
2326 	       PRsa (10) "\n",
2327 	       (uint64_t)OBJECT_SIZE (i),
2328 	       SIZE_AMOUNT (allocated),
2329 	       SIZE_AMOUNT (in_use),
2330 	       SIZE_AMOUNT (overhead));
2331       total_overhead += overhead;
2332     }
2333   fprintf (stderr, "%-8s " PRsa (10) " " PRsa (10) " " PRsa (10) "\n",
2334 	   "Total",
2335 	   SIZE_AMOUNT (G.bytes_mapped),
2336 	   SIZE_AMOUNT (G.allocated),
2337 	   SIZE_AMOUNT (total_overhead));
2338 
2339   if (GATHER_STATISTICS)
2340     {
2341       fprintf (stderr, "\nTotal allocations and overheads during "
2342 	       "the compilation process\n");
2343 
2344       fprintf (stderr, "Total Overhead:                          "
2345 	       PRsa (9) "\n",
2346 	       SIZE_AMOUNT (G.stats.total_overhead));
2347       fprintf (stderr, "Total Allocated:                         "
2348 	       PRsa (9) "\n",
2349 	       SIZE_AMOUNT (G.stats.total_allocated));
2350 
2351       fprintf (stderr, "Total Overhead  under  32B:              "
2352 	       PRsa (9) "\n",
2353 	       SIZE_AMOUNT (G.stats.total_overhead_under32));
2354       fprintf (stderr, "Total Allocated under  32B:              "
2355 	       PRsa (9) "\n",
2356 	       SIZE_AMOUNT (G.stats.total_allocated_under32));
2357       fprintf (stderr, "Total Overhead  under  64B:              "
2358 	       PRsa (9) "\n",
2359 	       SIZE_AMOUNT (G.stats.total_overhead_under64));
2360       fprintf (stderr, "Total Allocated under  64B:              "
2361 	       PRsa (9) "\n",
2362 	       SIZE_AMOUNT (G.stats.total_allocated_under64));
2363       fprintf (stderr, "Total Overhead  under 128B:              "
2364 	       PRsa (9) "\n",
2365 	       SIZE_AMOUNT (G.stats.total_overhead_under128));
2366       fprintf (stderr, "Total Allocated under 128B:              "
2367 	       PRsa (9) "\n",
2368 	       SIZE_AMOUNT (G.stats.total_allocated_under128));
2369 
2370       for (i = 0; i < NUM_ORDERS; i++)
2371 	if (G.stats.total_allocated_per_order[i])
2372 	  {
2373 	    fprintf (stderr, "Total Overhead  page size %9" PRIu64 ":     "
2374 		     PRsa (9) "\n",
2375 		     (uint64_t)OBJECT_SIZE (i),
2376 		     SIZE_AMOUNT (G.stats.total_overhead_per_order[i]));
2377 	    fprintf (stderr, "Total Allocated page size %9" PRIu64 ":     "
2378 		     PRsa (9) "\n",
2379 		     (uint64_t)OBJECT_SIZE (i),
2380 		     SIZE_AMOUNT (G.stats.total_allocated_per_order[i]));
2381 	  }
2382   }
2383 }
2384 
2385 struct ggc_pch_ondisk
2386 {
2387   unsigned totals[NUM_ORDERS];
2388 };
2389 
2390 struct ggc_pch_data
2391 {
2392   struct ggc_pch_ondisk d;
2393   uintptr_t base[NUM_ORDERS];
2394   size_t written[NUM_ORDERS];
2395 };
2396 
2397 struct ggc_pch_data *
2398 init_ggc_pch (void)
2399 {
2400   return XCNEW (struct ggc_pch_data);
2401 }
2402 
2403 void
2404 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2405 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2406 {
2407   unsigned order;
2408 
2409   if (size < NUM_SIZE_LOOKUP)
2410     order = size_lookup[size];
2411   else
2412     {
2413       order = 10;
2414       while (size > OBJECT_SIZE (order))
2415 	order++;
2416     }
2417 
2418   d->d.totals[order]++;
2419 }
2420 
2421 size_t
2422 ggc_pch_total_size (struct ggc_pch_data *d)
2423 {
2424   size_t a = 0;
2425   unsigned i;
2426 
2427   for (i = 0; i < NUM_ORDERS; i++)
2428     a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2429   return a;
2430 }
2431 
2432 void
2433 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2434 {
2435   uintptr_t a = (uintptr_t) base;
2436   unsigned i;
2437 
2438   for (i = 0; i < NUM_ORDERS; i++)
2439     {
2440       d->base[i] = a;
2441       a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2442     }
2443 }
2444 
2445 
2446 char *
2447 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2448 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2449 {
2450   unsigned order;
2451   char *result;
2452 
2453   if (size < NUM_SIZE_LOOKUP)
2454     order = size_lookup[size];
2455   else
2456     {
2457       order = 10;
2458       while (size > OBJECT_SIZE (order))
2459 	order++;
2460     }
2461 
2462   result = (char *) d->base[order];
2463   d->base[order] += OBJECT_SIZE (order);
2464   return result;
2465 }
2466 
2467 void
2468 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2469 		       FILE *f ATTRIBUTE_UNUSED)
2470 {
2471   /* Nothing to do.  */
2472 }
2473 
2474 void
2475 ggc_pch_write_object (struct ggc_pch_data *d,
2476 		      FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2477 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2478 {
2479   unsigned order;
2480   static const char emptyBytes[256] = { 0 };
2481 
2482   if (size < NUM_SIZE_LOOKUP)
2483     order = size_lookup[size];
2484   else
2485     {
2486       order = 10;
2487       while (size > OBJECT_SIZE (order))
2488 	order++;
2489     }
2490 
2491   if (fwrite (x, size, 1, f) != 1)
2492     fatal_error (input_location, "can%'t write PCH file: %m");
2493 
2494   /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2495      object out to OBJECT_SIZE(order).  This happens for strings.  */
2496 
2497   if (size != OBJECT_SIZE (order))
2498     {
2499       unsigned padding = OBJECT_SIZE (order) - size;
2500 
2501       /* To speed small writes, we use a nulled-out array that's larger
2502          than most padding requests as the source for our null bytes.  This
2503          permits us to do the padding with fwrite() rather than fseek(), and
2504          limits the chance the OS may try to flush any outstanding writes.  */
2505       if (padding <= sizeof (emptyBytes))
2506         {
2507           if (fwrite (emptyBytes, 1, padding, f) != padding)
2508             fatal_error (input_location, "can%'t write PCH file");
2509         }
2510       else
2511         {
2512           /* Larger than our buffer?  Just default to fseek.  */
2513           if (fseek (f, padding, SEEK_CUR) != 0)
2514             fatal_error (input_location, "can%'t write PCH file");
2515         }
2516     }
2517 
2518   d->written[order]++;
2519   if (d->written[order] == d->d.totals[order]
2520       && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2521 				   G.pagesize),
2522 		SEEK_CUR) != 0)
2523     fatal_error (input_location, "can%'t write PCH file: %m");
2524 }
2525 
2526 void
2527 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2528 {
2529   if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2530     fatal_error (input_location, "can%'t write PCH file: %m");
2531   free (d);
2532 }
2533 
2534 /* Move the PCH PTE entries just added to the end of by_depth, to the
2535    front.  */
2536 
2537 static void
2538 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2539 {
2540   /* First, we swap the new entries to the front of the varrays.  */
2541   page_entry **new_by_depth;
2542   unsigned long **new_save_in_use;
2543 
2544   new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2545   new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2546 
2547   memcpy (&new_by_depth[0],
2548 	  &G.by_depth[count_old_page_tables],
2549 	  count_new_page_tables * sizeof (void *));
2550   memcpy (&new_by_depth[count_new_page_tables],
2551 	  &G.by_depth[0],
2552 	  count_old_page_tables * sizeof (void *));
2553   memcpy (&new_save_in_use[0],
2554 	  &G.save_in_use[count_old_page_tables],
2555 	  count_new_page_tables * sizeof (void *));
2556   memcpy (&new_save_in_use[count_new_page_tables],
2557 	  &G.save_in_use[0],
2558 	  count_old_page_tables * sizeof (void *));
2559 
2560   free (G.by_depth);
2561   free (G.save_in_use);
2562 
2563   G.by_depth = new_by_depth;
2564   G.save_in_use = new_save_in_use;
2565 
2566   /* Now update all the index_by_depth fields.  */
2567   for (unsigned i = G.by_depth_in_use; i--;)
2568     {
2569       page_entry *p = G.by_depth[i];
2570       p->index_by_depth = i;
2571     }
2572 
2573   /* And last, we update the depth pointers in G.depth.  The first
2574      entry is already 0, and context 0 entries always start at index
2575      0, so there is nothing to update in the first slot.  We need a
2576      second slot, only if we have old ptes, and if we do, they start
2577      at index count_new_page_tables.  */
2578   if (count_old_page_tables)
2579     push_depth (count_new_page_tables);
2580 }
2581 
2582 void
2583 ggc_pch_read (FILE *f, void *addr)
2584 {
2585   struct ggc_pch_ondisk d;
2586   unsigned i;
2587   char *offs = (char *) addr;
2588   unsigned long count_old_page_tables;
2589   unsigned long count_new_page_tables;
2590 
2591   count_old_page_tables = G.by_depth_in_use;
2592 
2593   if (fread (&d, sizeof (d), 1, f) != 1)
2594     fatal_error (input_location, "cannot read PCH file: %m");
2595 
2596   /* We've just read in a PCH file.  So, every object that used to be
2597      allocated is now free.  */
2598   clear_marks ();
2599 #ifdef ENABLE_GC_CHECKING
2600   poison_pages ();
2601 #endif
2602   /* Since we free all the allocated objects, the free list becomes
2603      useless.  Validate it now, which will also clear it.  */
2604   validate_free_objects ();
2605 
2606   /* No object read from a PCH file should ever be freed.  So, set the
2607      context depth to 1, and set the depth of all the currently-allocated
2608      pages to be 1 too.  PCH pages will have depth 0.  */
2609   gcc_assert (!G.context_depth);
2610   G.context_depth = 1;
2611   /* Allocate space for the depth 1 finalizers.  */
2612   G.finalizers.safe_push (vNULL);
2613   G.vec_finalizers.safe_push (vNULL);
2614   gcc_assert (G.finalizers.length() == 2);
2615   for (i = 0; i < NUM_ORDERS; i++)
2616     {
2617       page_entry *p;
2618       for (p = G.pages[i]; p != NULL; p = p->next)
2619 	p->context_depth = G.context_depth;
2620     }
2621 
2622   /* Allocate the appropriate page-table entries for the pages read from
2623      the PCH file.  */
2624 
2625   for (i = 0; i < NUM_ORDERS; i++)
2626     {
2627       struct page_entry *entry;
2628       char *pte;
2629       size_t bytes;
2630       size_t num_objs;
2631       size_t j;
2632 
2633       if (d.totals[i] == 0)
2634 	continue;
2635 
2636       bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i));
2637       num_objs = bytes / OBJECT_SIZE (i);
2638       entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2639 					    - sizeof (long)
2640 					    + BITMAP_SIZE (num_objs + 1)));
2641       entry->bytes = bytes;
2642       entry->page = offs;
2643       entry->context_depth = 0;
2644       offs += bytes;
2645       entry->num_free_objects = 0;
2646       entry->order = i;
2647 
2648       for (j = 0;
2649 	   j + HOST_BITS_PER_LONG <= num_objs + 1;
2650 	   j += HOST_BITS_PER_LONG)
2651 	entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2652       for (; j < num_objs + 1; j++)
2653 	entry->in_use_p[j / HOST_BITS_PER_LONG]
2654 	  |= 1L << (j % HOST_BITS_PER_LONG);
2655 
2656       for (pte = entry->page;
2657 	   pte < entry->page + entry->bytes;
2658 	   pte += G.pagesize)
2659 	set_page_table_entry (pte, entry);
2660 
2661       if (G.page_tails[i] != NULL)
2662 	G.page_tails[i]->next = entry;
2663       else
2664 	G.pages[i] = entry;
2665       G.page_tails[i] = entry;
2666 
2667       /* We start off by just adding all the new information to the
2668 	 end of the varrays, later, we will move the new information
2669 	 to the front of the varrays, as the PCH page tables are at
2670 	 context 0.  */
2671       push_by_depth (entry, 0);
2672     }
2673 
2674   /* Now, we update the various data structures that speed page table
2675      handling.  */
2676   count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2677 
2678   move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2679 
2680   /* Update the statistics.  */
2681   G.allocated = G.allocated_last_gc = offs - (char *)addr;
2682 }
2683