xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/ggc-page.c (revision d90047b5d07facf36e6c01dcc0bded8997ce9cc2)
1 /* "Bag-of-pages" garbage collector for the GNU compiler.
2    Copyright (C) 1999-2017 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "alias.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "memmodel.h"
28 #include "tm_p.h"
29 #include "diagnostic-core.h"
30 #include "flags.h"
31 #include "ggc-internal.h"
32 #include "timevar.h"
33 #include "params.h"
34 #include "cgraph.h"
35 #include "cfgloop.h"
36 #include "plugin.h"
37 
38 /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
39    file open.  Prefer either to valloc.  */
40 #ifdef HAVE_MMAP_ANON
41 # undef HAVE_MMAP_DEV_ZERO
42 # define USING_MMAP
43 #endif
44 
45 #ifdef HAVE_MMAP_DEV_ZERO
46 # define USING_MMAP
47 #endif
48 
49 #ifndef USING_MMAP
50 #define USING_MALLOC_PAGE_GROUPS
51 #endif
52 
53 #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
54     && defined(USING_MMAP)
55 # define USING_MADVISE
56 #endif
57 
58 /* Strategy:
59 
60    This garbage-collecting allocator allocates objects on one of a set
61    of pages.  Each page can allocate objects of a single size only;
62    available sizes are powers of two starting at four bytes.  The size
63    of an allocation request is rounded up to the next power of two
64    (`order'), and satisfied from the appropriate page.
65 
66    Each page is recorded in a page-entry, which also maintains an
67    in-use bitmap of object positions on the page.  This allows the
68    allocation state of a particular object to be flipped without
69    touching the page itself.
70 
71    Each page-entry also has a context depth, which is used to track
72    pushing and popping of allocation contexts.  Only objects allocated
73    in the current (highest-numbered) context may be collected.
74 
75    Page entries are arranged in an array of singly-linked lists.  The
76    array is indexed by the allocation size, in bits, of the pages on
77    it; i.e. all pages on a list allocate objects of the same size.
78    Pages are ordered on the list such that all non-full pages precede
79    all full pages, with non-full pages arranged in order of decreasing
80    context depth.
81 
82    Empty pages (of all orders) are kept on a single page cache list,
83    and are considered first when new pages are required; they are
84    deallocated at the start of the next collection if they haven't
85    been recycled by then.  */
86 
87 /* Define GGC_DEBUG_LEVEL to print debugging information.
88      0: No debugging output.
89      1: GC statistics only.
90      2: Page-entry allocations/deallocations as well.
91      3: Object allocations as well.
92      4: Object marks as well.  */
93 #define GGC_DEBUG_LEVEL (0)
94 
95 /* A two-level tree is used to look up the page-entry for a given
96    pointer.  Two chunks of the pointer's bits are extracted to index
97    the first and second levels of the tree, as follows:
98 
99 				   HOST_PAGE_SIZE_BITS
100 			   32		|      |
101        msb +----------------+----+------+------+ lsb
102 			    |    |      |
103 			 PAGE_L1_BITS   |
104 				 |      |
105 			       PAGE_L2_BITS
106 
107    The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
108    pages are aligned on system page boundaries.  The next most
109    significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
110    index values in the lookup table, respectively.
111 
112    For 32-bit architectures and the settings below, there are no
113    leftover bits.  For architectures with wider pointers, the lookup
114    tree points to a list of pages, which must be scanned to find the
115    correct one.  */
116 
117 #define PAGE_L1_BITS	(8)
118 #define PAGE_L2_BITS	(32 - PAGE_L1_BITS - G.lg_pagesize)
119 #define PAGE_L1_SIZE	((uintptr_t) 1 << PAGE_L1_BITS)
120 #define PAGE_L2_SIZE	((uintptr_t) 1 << PAGE_L2_BITS)
121 
122 #define LOOKUP_L1(p) \
123   (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
124 
125 #define LOOKUP_L2(p) \
126   (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
127 
128 /* The number of objects per allocation page, for objects on a page of
129    the indicated ORDER.  */
130 #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
131 
132 /* The number of objects in P.  */
133 #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
134 
135 /* The size of an object on a page of the indicated ORDER.  */
136 #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
137 
138 /* For speed, we avoid doing a general integer divide to locate the
139    offset in the allocation bitmap, by precalculating numbers M, S
140    such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
141    within the page which is evenly divisible by the object size Z.  */
142 #define DIV_MULT(ORDER) inverse_table[ORDER].mult
143 #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
144 #define OFFSET_TO_BIT(OFFSET, ORDER) \
145   (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
146 
147 /* We use this structure to determine the alignment required for
148    allocations.  For power-of-two sized allocations, that's not a
149    problem, but it does matter for odd-sized allocations.
150    We do not care about alignment for floating-point types.  */
151 
152 struct max_alignment {
153   char c;
154   union {
155     int64_t i;
156     void *p;
157   } u;
158 };
159 
160 /* The biggest alignment required.  */
161 
162 #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
163 
164 
165 /* The number of extra orders, not corresponding to power-of-two sized
166    objects.  */
167 
168 #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
169 
170 #define RTL_SIZE(NSLOTS) \
171   (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
172 
173 #define TREE_EXP_SIZE(OPS) \
174   (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
175 
176 /* The Ith entry is the maximum size of an object to be stored in the
177    Ith extra order.  Adding a new entry to this array is the *only*
178    thing you need to do to add a new special allocation size.  */
179 
180 static const size_t extra_order_size_table[] = {
181   /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
182      There are a lot of structures with these sizes and explicitly
183      listing them risks orders being dropped because they changed size.  */
184   MAX_ALIGNMENT * 3,
185   MAX_ALIGNMENT * 5,
186   MAX_ALIGNMENT * 6,
187   MAX_ALIGNMENT * 7,
188   MAX_ALIGNMENT * 9,
189   MAX_ALIGNMENT * 10,
190   MAX_ALIGNMENT * 11,
191   MAX_ALIGNMENT * 12,
192   MAX_ALIGNMENT * 13,
193   MAX_ALIGNMENT * 14,
194   MAX_ALIGNMENT * 15,
195   sizeof (struct tree_decl_non_common),
196   sizeof (struct tree_field_decl),
197   sizeof (struct tree_parm_decl),
198   sizeof (struct tree_var_decl),
199   sizeof (struct tree_type_non_common),
200   sizeof (struct function),
201   sizeof (struct basic_block_def),
202   sizeof (struct cgraph_node),
203   sizeof (struct loop),
204 };
205 
206 /* The total number of orders.  */
207 
208 #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
209 
210 /* Compute the smallest nonnegative number which when added to X gives
211    a multiple of F.  */
212 
213 #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
214 
215 /* Round X to next multiple of the page size */
216 
217 #define PAGE_ALIGN(x) ROUND_UP ((x), G.pagesize)
218 
219 /* The Ith entry is the number of objects on a page or order I.  */
220 
221 static unsigned objects_per_page_table[NUM_ORDERS];
222 
223 /* The Ith entry is the size of an object on a page of order I.  */
224 
225 static size_t object_size_table[NUM_ORDERS];
226 
227 /* The Ith entry is a pair of numbers (mult, shift) such that
228    ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
229    for all k evenly divisible by OBJECT_SIZE(I).  */
230 
231 static struct
232 {
233   size_t mult;
234   unsigned int shift;
235 }
236 inverse_table[NUM_ORDERS];
237 
238 /* A page_entry records the status of an allocation page.  This
239    structure is dynamically sized to fit the bitmap in_use_p.  */
240 struct page_entry
241 {
242   /* The next page-entry with objects of the same size, or NULL if
243      this is the last page-entry.  */
244   struct page_entry *next;
245 
246   /* The previous page-entry with objects of the same size, or NULL if
247      this is the first page-entry.   The PREV pointer exists solely to
248      keep the cost of ggc_free manageable.  */
249   struct page_entry *prev;
250 
251   /* The number of bytes allocated.  (This will always be a multiple
252      of the host system page size.)  */
253   size_t bytes;
254 
255   /* The address at which the memory is allocated.  */
256   char *page;
257 
258 #ifdef USING_MALLOC_PAGE_GROUPS
259   /* Back pointer to the page group this page came from.  */
260   struct page_group *group;
261 #endif
262 
263   /* This is the index in the by_depth varray where this page table
264      can be found.  */
265   unsigned long index_by_depth;
266 
267   /* Context depth of this page.  */
268   unsigned short context_depth;
269 
270   /* The number of free objects remaining on this page.  */
271   unsigned short num_free_objects;
272 
273   /* A likely candidate for the bit position of a free object for the
274      next allocation from this page.  */
275   unsigned short next_bit_hint;
276 
277   /* The lg of size of objects allocated from this page.  */
278   unsigned char order;
279 
280   /* Discarded page? */
281   bool discarded;
282 
283   /* A bit vector indicating whether or not objects are in use.  The
284      Nth bit is one if the Nth object on this page is allocated.  This
285      array is dynamically sized.  */
286   unsigned long in_use_p[1];
287 };
288 
289 #ifdef USING_MALLOC_PAGE_GROUPS
290 /* A page_group describes a large allocation from malloc, from which
291    we parcel out aligned pages.  */
292 struct page_group
293 {
294   /* A linked list of all extant page groups.  */
295   struct page_group *next;
296 
297   /* The address we received from malloc.  */
298   char *allocation;
299 
300   /* The size of the block.  */
301   size_t alloc_size;
302 
303   /* A bitmask of pages in use.  */
304   unsigned int in_use;
305 };
306 #endif
307 
308 #if HOST_BITS_PER_PTR <= 32
309 
310 /* On 32-bit hosts, we use a two level page table, as pictured above.  */
311 typedef page_entry **page_table[PAGE_L1_SIZE];
312 
313 #else
314 
315 /* On 64-bit hosts, we use the same two level page tables plus a linked
316    list that disambiguates the top 32-bits.  There will almost always be
317    exactly one entry in the list.  */
318 typedef struct page_table_chain
319 {
320   struct page_table_chain *next;
321   size_t high_bits;
322   page_entry **table[PAGE_L1_SIZE];
323 } *page_table;
324 
325 #endif
326 
327 class finalizer
328 {
329 public:
330   finalizer (void *addr, void (*f)(void *)) : m_addr (addr), m_function (f) {}
331 
332   void *addr () const { return m_addr; }
333 
334   void call () const { m_function (m_addr); }
335 
336 private:
337   void *m_addr;
338   void (*m_function)(void *);
339 };
340 
341 class vec_finalizer
342 {
343 public:
344   vec_finalizer (uintptr_t addr, void (*f)(void *), size_t s, size_t n) :
345     m_addr (addr), m_function (f), m_object_size (s), m_n_objects (n) {}
346 
347   void call () const
348     {
349       for (size_t i = 0; i < m_n_objects; i++)
350 	m_function (reinterpret_cast<void *> (m_addr + (i * m_object_size)));
351     }
352 
353   void *addr () const { return reinterpret_cast<void *> (m_addr); }
354 
355 private:
356   uintptr_t m_addr;
357   void (*m_function)(void *);
358   size_t m_object_size;
359   size_t m_n_objects;
360 };
361 
362 #ifdef ENABLE_GC_ALWAYS_COLLECT
363 /* List of free objects to be verified as actually free on the
364    next collection.  */
365 struct free_object
366 {
367   void *object;
368   struct free_object *next;
369 };
370 #endif
371 
372 /* The rest of the global variables.  */
373 static struct ggc_globals
374 {
375   /* The Nth element in this array is a page with objects of size 2^N.
376      If there are any pages with free objects, they will be at the
377      head of the list.  NULL if there are no page-entries for this
378      object size.  */
379   page_entry *pages[NUM_ORDERS];
380 
381   /* The Nth element in this array is the last page with objects of
382      size 2^N.  NULL if there are no page-entries for this object
383      size.  */
384   page_entry *page_tails[NUM_ORDERS];
385 
386   /* Lookup table for associating allocation pages with object addresses.  */
387   page_table lookup;
388 
389   /* The system's page size.  */
390   size_t pagesize;
391   size_t lg_pagesize;
392 
393   /* Bytes currently allocated.  */
394   size_t allocated;
395 
396   /* Bytes currently allocated at the end of the last collection.  */
397   size_t allocated_last_gc;
398 
399   /* Total amount of memory mapped.  */
400   size_t bytes_mapped;
401 
402   /* Bit N set if any allocations have been done at context depth N.  */
403   unsigned long context_depth_allocations;
404 
405   /* Bit N set if any collections have been done at context depth N.  */
406   unsigned long context_depth_collections;
407 
408   /* The current depth in the context stack.  */
409   unsigned short context_depth;
410 
411   /* A file descriptor open to /dev/zero for reading.  */
412 #if defined (HAVE_MMAP_DEV_ZERO)
413   int dev_zero_fd;
414 #endif
415 
416   /* A cache of free system pages.  */
417   page_entry *free_pages;
418 
419 #ifdef USING_MALLOC_PAGE_GROUPS
420   page_group *page_groups;
421 #endif
422 
423   /* The file descriptor for debugging output.  */
424   FILE *debug_file;
425 
426   /* Current number of elements in use in depth below.  */
427   unsigned int depth_in_use;
428 
429   /* Maximum number of elements that can be used before resizing.  */
430   unsigned int depth_max;
431 
432   /* Each element of this array is an index in by_depth where the given
433      depth starts.  This structure is indexed by that given depth we
434      are interested in.  */
435   unsigned int *depth;
436 
437   /* Current number of elements in use in by_depth below.  */
438   unsigned int by_depth_in_use;
439 
440   /* Maximum number of elements that can be used before resizing.  */
441   unsigned int by_depth_max;
442 
443   /* Each element of this array is a pointer to a page_entry, all
444      page_entries can be found in here by increasing depth.
445      index_by_depth in the page_entry is the index into this data
446      structure where that page_entry can be found.  This is used to
447      speed up finding all page_entries at a particular depth.  */
448   page_entry **by_depth;
449 
450   /* Each element is a pointer to the saved in_use_p bits, if any,
451      zero otherwise.  We allocate them all together, to enable a
452      better runtime data access pattern.  */
453   unsigned long **save_in_use;
454 
455   /* Finalizers for single objects.  The first index is collection_depth.  */
456   vec<vec<finalizer> > finalizers;
457 
458   /* Finalizers for vectors of objects.  */
459   vec<vec<vec_finalizer> > vec_finalizers;
460 
461 #ifdef ENABLE_GC_ALWAYS_COLLECT
462   /* List of free objects to be verified as actually free on the
463      next collection.  */
464   struct free_object *free_object_list;
465 #endif
466 
467   struct
468   {
469     /* Total GC-allocated memory.  */
470     unsigned long long total_allocated;
471     /* Total overhead for GC-allocated memory.  */
472     unsigned long long total_overhead;
473 
474     /* Total allocations and overhead for sizes less than 32, 64 and 128.
475        These sizes are interesting because they are typical cache line
476        sizes.  */
477 
478     unsigned long long total_allocated_under32;
479     unsigned long long total_overhead_under32;
480 
481     unsigned long long total_allocated_under64;
482     unsigned long long total_overhead_under64;
483 
484     unsigned long long total_allocated_under128;
485     unsigned long long total_overhead_under128;
486 
487     /* The allocations for each of the allocation orders.  */
488     unsigned long long total_allocated_per_order[NUM_ORDERS];
489 
490     /* The overhead for each of the allocation orders.  */
491     unsigned long long total_overhead_per_order[NUM_ORDERS];
492   } stats;
493 } G;
494 
495 /* True if a gc is currently taking place.  */
496 
497 static bool in_gc = false;
498 
499 /* The size in bytes required to maintain a bitmap for the objects
500    on a page-entry.  */
501 #define BITMAP_SIZE(Num_objects) \
502   (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long))
503 
504 /* Allocate pages in chunks of this size, to throttle calls to memory
505    allocation routines.  The first page is used, the rest go onto the
506    free list.  This cannot be larger than HOST_BITS_PER_INT for the
507    in_use bitmask for page_group.  Hosts that need a different value
508    can override this by defining GGC_QUIRE_SIZE explicitly.  */
509 #ifndef GGC_QUIRE_SIZE
510 # ifdef USING_MMAP
511 #  define GGC_QUIRE_SIZE 512	/* 2MB for 4K pages */
512 # else
513 #  define GGC_QUIRE_SIZE 16
514 # endif
515 #endif
516 
517 /* Initial guess as to how many page table entries we might need.  */
518 #define INITIAL_PTE_COUNT 128
519 
520 static int ggc_allocated_p (const void *);
521 static page_entry *lookup_page_table_entry (const void *);
522 static void set_page_table_entry (void *, page_entry *);
523 #ifdef USING_MMAP
524 static char *alloc_anon (char *, size_t, bool check);
525 #endif
526 #ifdef USING_MALLOC_PAGE_GROUPS
527 static size_t page_group_index (char *, char *);
528 static void set_page_group_in_use (page_group *, char *);
529 static void clear_page_group_in_use (page_group *, char *);
530 #endif
531 static struct page_entry * alloc_page (unsigned);
532 static void free_page (struct page_entry *);
533 static void release_pages (void);
534 static void clear_marks (void);
535 static void sweep_pages (void);
536 static void ggc_recalculate_in_use_p (page_entry *);
537 static void compute_inverse (unsigned);
538 static inline void adjust_depth (void);
539 static void move_ptes_to_front (int, int);
540 
541 void debug_print_page_list (int);
542 static void push_depth (unsigned int);
543 static void push_by_depth (page_entry *, unsigned long *);
544 
545 /* Push an entry onto G.depth.  */
546 
547 inline static void
548 push_depth (unsigned int i)
549 {
550   if (G.depth_in_use >= G.depth_max)
551     {
552       G.depth_max *= 2;
553       G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
554     }
555   G.depth[G.depth_in_use++] = i;
556 }
557 
558 /* Push an entry onto G.by_depth and G.save_in_use.  */
559 
560 inline static void
561 push_by_depth (page_entry *p, unsigned long *s)
562 {
563   if (G.by_depth_in_use >= G.by_depth_max)
564     {
565       G.by_depth_max *= 2;
566       G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
567       G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
568 				  G.by_depth_max);
569     }
570   G.by_depth[G.by_depth_in_use] = p;
571   G.save_in_use[G.by_depth_in_use++] = s;
572 }
573 
574 #if (GCC_VERSION < 3001)
575 #define prefetch(X) ((void) X)
576 #else
577 #define prefetch(X) __builtin_prefetch (X)
578 #endif
579 
580 #define save_in_use_p_i(__i) \
581   (G.save_in_use[__i])
582 #define save_in_use_p(__p) \
583   (save_in_use_p_i (__p->index_by_depth))
584 
585 /* Returns nonzero if P was allocated in GC'able memory.  */
586 
587 static inline int
588 ggc_allocated_p (const void *p)
589 {
590   page_entry ***base;
591   size_t L1, L2;
592 
593 #if HOST_BITS_PER_PTR <= 32
594   base = &G.lookup[0];
595 #else
596   page_table table = G.lookup;
597   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
598   while (1)
599     {
600       if (table == NULL)
601 	return 0;
602       if (table->high_bits == high_bits)
603 	break;
604       table = table->next;
605     }
606   base = &table->table[0];
607 #endif
608 
609   /* Extract the level 1 and 2 indices.  */
610   L1 = LOOKUP_L1 (p);
611   L2 = LOOKUP_L2 (p);
612 
613   return base[L1] && base[L1][L2];
614 }
615 
616 /* Traverse the page table and find the entry for a page.
617    Die (probably) if the object wasn't allocated via GC.  */
618 
619 static inline page_entry *
620 lookup_page_table_entry (const void *p)
621 {
622   page_entry ***base;
623   size_t L1, L2;
624 
625 #if HOST_BITS_PER_PTR <= 32
626   base = &G.lookup[0];
627 #else
628   page_table table = G.lookup;
629   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
630   while (table->high_bits != high_bits)
631     table = table->next;
632   base = &table->table[0];
633 #endif
634 
635   /* Extract the level 1 and 2 indices.  */
636   L1 = LOOKUP_L1 (p);
637   L2 = LOOKUP_L2 (p);
638 
639   return base[L1][L2];
640 }
641 
642 /* Set the page table entry for a page.  */
643 
644 static void
645 set_page_table_entry (void *p, page_entry *entry)
646 {
647   page_entry ***base;
648   size_t L1, L2;
649 
650 #if HOST_BITS_PER_PTR <= 32
651   base = &G.lookup[0];
652 #else
653   page_table table;
654   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
655   for (table = G.lookup; table; table = table->next)
656     if (table->high_bits == high_bits)
657       goto found;
658 
659   /* Not found -- allocate a new table.  */
660   table = XCNEW (struct page_table_chain);
661   table->next = G.lookup;
662   table->high_bits = high_bits;
663   G.lookup = table;
664 found:
665   base = &table->table[0];
666 #endif
667 
668   /* Extract the level 1 and 2 indices.  */
669   L1 = LOOKUP_L1 (p);
670   L2 = LOOKUP_L2 (p);
671 
672   if (base[L1] == NULL)
673     base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
674 
675   base[L1][L2] = entry;
676 }
677 
678 /* Prints the page-entry for object size ORDER, for debugging.  */
679 
680 DEBUG_FUNCTION void
681 debug_print_page_list (int order)
682 {
683   page_entry *p;
684   printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
685 	  (void *) G.page_tails[order]);
686   p = G.pages[order];
687   while (p != NULL)
688     {
689       printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
690 	      p->num_free_objects);
691       p = p->next;
692     }
693   printf ("NULL\n");
694   fflush (stdout);
695 }
696 
697 #ifdef USING_MMAP
698 /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
699    (if non-null).  The ifdef structure here is intended to cause a
700    compile error unless exactly one of the HAVE_* is defined.  */
701 
702 static inline char *
703 alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
704 {
705 #ifdef HAVE_MMAP_ANON
706   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
707 			      MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
708 #endif
709 #ifdef HAVE_MMAP_DEV_ZERO
710   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
711 			      MAP_PRIVATE, G.dev_zero_fd, 0);
712 #endif
713 
714   if (page == (char *) MAP_FAILED)
715     {
716       if (!check)
717         return NULL;
718       perror ("virtual memory exhausted");
719       exit (FATAL_EXIT_CODE);
720     }
721 
722   /* Remember that we allocated this memory.  */
723   G.bytes_mapped += size;
724 
725   /* Pretend we don't have access to the allocated pages.  We'll enable
726      access to smaller pieces of the area in ggc_internal_alloc.  Discard the
727      handle to avoid handle leak.  */
728   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
729 
730   return page;
731 }
732 #endif
733 #ifdef USING_MALLOC_PAGE_GROUPS
734 /* Compute the index for this page into the page group.  */
735 
736 static inline size_t
737 page_group_index (char *allocation, char *page)
738 {
739   return (size_t) (page - allocation) >> G.lg_pagesize;
740 }
741 
742 /* Set and clear the in_use bit for this page in the page group.  */
743 
744 static inline void
745 set_page_group_in_use (page_group *group, char *page)
746 {
747   group->in_use |= 1 << page_group_index (group->allocation, page);
748 }
749 
750 static inline void
751 clear_page_group_in_use (page_group *group, char *page)
752 {
753   group->in_use &= ~(1 << page_group_index (group->allocation, page));
754 }
755 #endif
756 
757 /* Allocate a new page for allocating objects of size 2^ORDER,
758    and return an entry for it.  The entry is not added to the
759    appropriate page_table list.  */
760 
761 static inline struct page_entry *
762 alloc_page (unsigned order)
763 {
764   struct page_entry *entry, *p, **pp;
765   char *page;
766   size_t num_objects;
767   size_t bitmap_size;
768   size_t page_entry_size;
769   size_t entry_size;
770 #ifdef USING_MALLOC_PAGE_GROUPS
771   page_group *group;
772 #endif
773 
774   num_objects = OBJECTS_PER_PAGE (order);
775   bitmap_size = BITMAP_SIZE (num_objects + 1);
776   page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
777   entry_size = num_objects * OBJECT_SIZE (order);
778   if (entry_size < G.pagesize)
779     entry_size = G.pagesize;
780   entry_size = PAGE_ALIGN (entry_size);
781 
782   entry = NULL;
783   page = NULL;
784 
785   /* Check the list of free pages for one we can use.  */
786   for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
787     if (p->bytes == entry_size)
788       break;
789 
790   if (p != NULL)
791     {
792       if (p->discarded)
793         G.bytes_mapped += p->bytes;
794       p->discarded = false;
795 
796       /* Recycle the allocated memory from this page ...  */
797       *pp = p->next;
798       page = p->page;
799 
800 #ifdef USING_MALLOC_PAGE_GROUPS
801       group = p->group;
802 #endif
803 
804       /* ... and, if possible, the page entry itself.  */
805       if (p->order == order)
806 	{
807 	  entry = p;
808 	  memset (entry, 0, page_entry_size);
809 	}
810       else
811 	free (p);
812     }
813 #ifdef USING_MMAP
814   else if (entry_size == G.pagesize)
815     {
816       /* We want just one page.  Allocate a bunch of them and put the
817 	 extras on the freelist.  (Can only do this optimization with
818 	 mmap for backing store.)  */
819       struct page_entry *e, *f = G.free_pages;
820       int i, entries = GGC_QUIRE_SIZE;
821 
822       page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
823       if (page == NULL)
824      	{
825 	  page = alloc_anon (NULL, G.pagesize, true);
826           entries = 1;
827 	}
828 
829       /* This loop counts down so that the chain will be in ascending
830 	 memory order.  */
831       for (i = entries - 1; i >= 1; i--)
832 	{
833 	  e = XCNEWVAR (struct page_entry, page_entry_size);
834 	  e->order = order;
835 	  e->bytes = G.pagesize;
836 	  e->page = page + (i << G.lg_pagesize);
837 	  e->next = f;
838 	  f = e;
839 	}
840 
841       G.free_pages = f;
842     }
843   else
844     page = alloc_anon (NULL, entry_size, true);
845 #endif
846 #ifdef USING_MALLOC_PAGE_GROUPS
847   else
848     {
849       /* Allocate a large block of memory and serve out the aligned
850 	 pages therein.  This results in much less memory wastage
851 	 than the traditional implementation of valloc.  */
852 
853       char *allocation, *a, *enda;
854       size_t alloc_size, head_slop, tail_slop;
855       int multiple_pages = (entry_size == G.pagesize);
856 
857       if (multiple_pages)
858 	alloc_size = GGC_QUIRE_SIZE * G.pagesize;
859       else
860 	alloc_size = entry_size + G.pagesize - 1;
861       allocation = XNEWVEC (char, alloc_size);
862 
863       page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize);
864       head_slop = page - allocation;
865       if (multiple_pages)
866 	tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
867       else
868 	tail_slop = alloc_size - entry_size - head_slop;
869       enda = allocation + alloc_size - tail_slop;
870 
871       /* We allocated N pages, which are likely not aligned, leaving
872 	 us with N-1 usable pages.  We plan to place the page_group
873 	 structure somewhere in the slop.  */
874       if (head_slop >= sizeof (page_group))
875 	group = (page_group *)page - 1;
876       else
877 	{
878 	  /* We magically got an aligned allocation.  Too bad, we have
879 	     to waste a page anyway.  */
880 	  if (tail_slop == 0)
881 	    {
882 	      enda -= G.pagesize;
883 	      tail_slop += G.pagesize;
884 	    }
885 	  gcc_assert (tail_slop >= sizeof (page_group));
886 	  group = (page_group *)enda;
887 	  tail_slop -= sizeof (page_group);
888 	}
889 
890       /* Remember that we allocated this memory.  */
891       group->next = G.page_groups;
892       group->allocation = allocation;
893       group->alloc_size = alloc_size;
894       group->in_use = 0;
895       G.page_groups = group;
896       G.bytes_mapped += alloc_size;
897 
898       /* If we allocated multiple pages, put the rest on the free list.  */
899       if (multiple_pages)
900 	{
901 	  struct page_entry *e, *f = G.free_pages;
902 	  for (a = enda - G.pagesize; a != page; a -= G.pagesize)
903 	    {
904 	      e = XCNEWVAR (struct page_entry, page_entry_size);
905 	      e->order = order;
906 	      e->bytes = G.pagesize;
907 	      e->page = a;
908 	      e->group = group;
909 	      e->next = f;
910 	      f = e;
911 	    }
912 	  G.free_pages = f;
913 	}
914     }
915 #endif
916 
917   if (entry == NULL)
918     entry = XCNEWVAR (struct page_entry, page_entry_size);
919 
920   entry->bytes = entry_size;
921   entry->page = page;
922   entry->context_depth = G.context_depth;
923   entry->order = order;
924   entry->num_free_objects = num_objects;
925   entry->next_bit_hint = 1;
926 
927   G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
928 
929 #ifdef USING_MALLOC_PAGE_GROUPS
930   entry->group = group;
931   set_page_group_in_use (group, page);
932 #endif
933 
934   /* Set the one-past-the-end in-use bit.  This acts as a sentry as we
935      increment the hint.  */
936   entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
937     = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
938 
939   set_page_table_entry (page, entry);
940 
941   if (GGC_DEBUG_LEVEL >= 2)
942     fprintf (G.debug_file,
943 	     "Allocating page at %p, object size=%lu, data %p-%p\n",
944 	     (void *) entry, (unsigned long) OBJECT_SIZE (order), page,
945 	     page + entry_size - 1);
946 
947   return entry;
948 }
949 
950 /* Adjust the size of G.depth so that no index greater than the one
951    used by the top of the G.by_depth is used.  */
952 
953 static inline void
954 adjust_depth (void)
955 {
956   page_entry *top;
957 
958   if (G.by_depth_in_use)
959     {
960       top = G.by_depth[G.by_depth_in_use-1];
961 
962       /* Peel back indices in depth that index into by_depth, so that
963 	 as new elements are added to by_depth, we note the indices
964 	 of those elements, if they are for new context depths.  */
965       while (G.depth_in_use > (size_t)top->context_depth+1)
966 	--G.depth_in_use;
967     }
968 }
969 
970 /* For a page that is no longer needed, put it on the free page list.  */
971 
972 static void
973 free_page (page_entry *entry)
974 {
975   if (GGC_DEBUG_LEVEL >= 2)
976     fprintf (G.debug_file,
977 	     "Deallocating page at %p, data %p-%p\n", (void *) entry,
978 	     entry->page, entry->page + entry->bytes - 1);
979 
980   /* Mark the page as inaccessible.  Discard the handle to avoid handle
981      leak.  */
982   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
983 
984   set_page_table_entry (entry->page, NULL);
985 
986 #ifdef USING_MALLOC_PAGE_GROUPS
987   clear_page_group_in_use (entry->group, entry->page);
988 #endif
989 
990   if (G.by_depth_in_use > 1)
991     {
992       page_entry *top = G.by_depth[G.by_depth_in_use-1];
993       int i = entry->index_by_depth;
994 
995       /* We cannot free a page from a context deeper than the current
996 	 one.  */
997       gcc_assert (entry->context_depth == top->context_depth);
998 
999       /* Put top element into freed slot.  */
1000       G.by_depth[i] = top;
1001       G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
1002       top->index_by_depth = i;
1003     }
1004   --G.by_depth_in_use;
1005 
1006   adjust_depth ();
1007 
1008   entry->next = G.free_pages;
1009   G.free_pages = entry;
1010 }
1011 
1012 /* Release the free page cache to the system.  */
1013 
1014 static void
1015 release_pages (void)
1016 {
1017 #ifdef USING_MADVISE
1018   page_entry *p, *start_p;
1019   char *start;
1020   size_t len;
1021   size_t mapped_len;
1022   page_entry *next, *prev, *newprev;
1023   size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
1024 
1025   /* First free larger continuous areas to the OS.
1026      This allows other allocators to grab these areas if needed.
1027      This is only done on larger chunks to avoid fragmentation.
1028      This does not always work because the free_pages list is only
1029      approximately sorted. */
1030 
1031   p = G.free_pages;
1032   prev = NULL;
1033   while (p)
1034     {
1035       start = p->page;
1036       start_p = p;
1037       len = 0;
1038       mapped_len = 0;
1039       newprev = prev;
1040       while (p && p->page == start + len)
1041         {
1042           len += p->bytes;
1043 	  if (!p->discarded)
1044 	      mapped_len += p->bytes;
1045 	  newprev = p;
1046           p = p->next;
1047         }
1048       if (len >= free_unit)
1049         {
1050           while (start_p != p)
1051             {
1052               next = start_p->next;
1053               free (start_p);
1054               start_p = next;
1055             }
1056           munmap (start, len);
1057 	  if (prev)
1058 	    prev->next = p;
1059           else
1060             G.free_pages = p;
1061           G.bytes_mapped -= mapped_len;
1062 	  continue;
1063         }
1064       prev = newprev;
1065    }
1066 
1067   /* Now give back the fragmented pages to the OS, but keep the address
1068      space to reuse it next time. */
1069 
1070   for (p = G.free_pages; p; )
1071     {
1072       if (p->discarded)
1073         {
1074           p = p->next;
1075           continue;
1076         }
1077       start = p->page;
1078       len = p->bytes;
1079       start_p = p;
1080       p = p->next;
1081       while (p && p->page == start + len)
1082         {
1083           len += p->bytes;
1084           p = p->next;
1085         }
1086       /* Give the page back to the kernel, but don't free the mapping.
1087          This avoids fragmentation in the virtual memory map of the
1088  	 process. Next time we can reuse it by just touching it. */
1089       madvise (start, len, MADV_DONTNEED);
1090       /* Don't count those pages as mapped to not touch the garbage collector
1091          unnecessarily. */
1092       G.bytes_mapped -= len;
1093       while (start_p != p)
1094         {
1095           start_p->discarded = true;
1096           start_p = start_p->next;
1097         }
1098     }
1099 #endif
1100 #if defined(USING_MMAP) && !defined(USING_MADVISE)
1101   page_entry *p, *next;
1102   char *start;
1103   size_t len;
1104 
1105   /* Gather up adjacent pages so they are unmapped together.  */
1106   p = G.free_pages;
1107 
1108   while (p)
1109     {
1110       start = p->page;
1111       next = p->next;
1112       len = p->bytes;
1113       free (p);
1114       p = next;
1115 
1116       while (p && p->page == start + len)
1117 	{
1118 	  next = p->next;
1119 	  len += p->bytes;
1120 	  free (p);
1121 	  p = next;
1122 	}
1123 
1124       munmap (start, len);
1125       G.bytes_mapped -= len;
1126     }
1127 
1128   G.free_pages = NULL;
1129 #endif
1130 #ifdef USING_MALLOC_PAGE_GROUPS
1131   page_entry **pp, *p;
1132   page_group **gp, *g;
1133 
1134   /* Remove all pages from free page groups from the list.  */
1135   pp = &G.free_pages;
1136   while ((p = *pp) != NULL)
1137     if (p->group->in_use == 0)
1138       {
1139 	*pp = p->next;
1140 	free (p);
1141       }
1142     else
1143       pp = &p->next;
1144 
1145   /* Remove all free page groups, and release the storage.  */
1146   gp = &G.page_groups;
1147   while ((g = *gp) != NULL)
1148     if (g->in_use == 0)
1149       {
1150 	*gp = g->next;
1151 	G.bytes_mapped -= g->alloc_size;
1152 	free (g->allocation);
1153       }
1154     else
1155       gp = &g->next;
1156 #endif
1157 }
1158 
1159 /* This table provides a fast way to determine ceil(log_2(size)) for
1160    allocation requests.  The minimum allocation size is eight bytes.  */
1161 #define NUM_SIZE_LOOKUP 512
1162 static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1163 {
1164   3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1165   4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1166   5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1167   6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1168   6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1169   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1170   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1171   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1172   7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1173   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1174   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1175   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1176   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1177   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1178   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1179   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1180   8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1181   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1182   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1183   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1184   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1185   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1186   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1187   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1188   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1189   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1190   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1191   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1192   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1193   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1194   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1195   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1196 };
1197 
1198 /* For a given size of memory requested for allocation, return the
1199    actual size that is going to be allocated, as well as the size
1200    order.  */
1201 
1202 static void
1203 ggc_round_alloc_size_1 (size_t requested_size,
1204 			size_t *size_order,
1205 			size_t *alloced_size)
1206 {
1207   size_t order, object_size;
1208 
1209   if (requested_size < NUM_SIZE_LOOKUP)
1210     {
1211       order = size_lookup[requested_size];
1212       object_size = OBJECT_SIZE (order);
1213     }
1214   else
1215     {
1216       order = 10;
1217       while (requested_size > (object_size = OBJECT_SIZE (order)))
1218         order++;
1219     }
1220 
1221   if (size_order)
1222     *size_order = order;
1223   if (alloced_size)
1224     *alloced_size = object_size;
1225 }
1226 
1227 /* For a given size of memory requested for allocation, return the
1228    actual size that is going to be allocated.  */
1229 
1230 size_t
1231 ggc_round_alloc_size (size_t requested_size)
1232 {
1233   size_t size = 0;
1234 
1235   ggc_round_alloc_size_1 (requested_size, NULL, &size);
1236   return size;
1237 }
1238 
1239 /* Push a finalizer onto the appropriate vec.  */
1240 
1241 static void
1242 add_finalizer (void *result, void (*f)(void *), size_t s, size_t n)
1243 {
1244   if (f == NULL)
1245     /* No finalizer.  */;
1246   else if (n == 1)
1247     {
1248       finalizer fin (result, f);
1249       G.finalizers[G.context_depth].safe_push (fin);
1250     }
1251   else
1252     {
1253       vec_finalizer fin (reinterpret_cast<uintptr_t> (result), f, s, n);
1254       G.vec_finalizers[G.context_depth].safe_push (fin);
1255     }
1256 }
1257 
1258 /* Allocate a chunk of memory of SIZE bytes.  Its contents are undefined.  */
1259 
1260 void *
1261 ggc_internal_alloc (size_t size, void (*f)(void *), size_t s, size_t n
1262 		    MEM_STAT_DECL)
1263 {
1264   size_t order, word, bit, object_offset, object_size;
1265   struct page_entry *entry;
1266   void *result;
1267 
1268   ggc_round_alloc_size_1 (size, &order, &object_size);
1269 
1270   /* If there are non-full pages for this size allocation, they are at
1271      the head of the list.  */
1272   entry = G.pages[order];
1273 
1274   /* If there is no page for this object size, or all pages in this
1275      context are full, allocate a new page.  */
1276   if (entry == NULL || entry->num_free_objects == 0)
1277     {
1278       struct page_entry *new_entry;
1279       new_entry = alloc_page (order);
1280 
1281       new_entry->index_by_depth = G.by_depth_in_use;
1282       push_by_depth (new_entry, 0);
1283 
1284       /* We can skip context depths, if we do, make sure we go all the
1285 	 way to the new depth.  */
1286       while (new_entry->context_depth >= G.depth_in_use)
1287 	push_depth (G.by_depth_in_use-1);
1288 
1289       /* If this is the only entry, it's also the tail.  If it is not
1290 	 the only entry, then we must update the PREV pointer of the
1291 	 ENTRY (G.pages[order]) to point to our new page entry.  */
1292       if (entry == NULL)
1293 	G.page_tails[order] = new_entry;
1294       else
1295 	entry->prev = new_entry;
1296 
1297       /* Put new pages at the head of the page list.  By definition the
1298 	 entry at the head of the list always has a NULL pointer.  */
1299       new_entry->next = entry;
1300       new_entry->prev = NULL;
1301       entry = new_entry;
1302       G.pages[order] = new_entry;
1303 
1304       /* For a new page, we know the word and bit positions (in the
1305 	 in_use bitmap) of the first available object -- they're zero.  */
1306       new_entry->next_bit_hint = 1;
1307       word = 0;
1308       bit = 0;
1309       object_offset = 0;
1310     }
1311   else
1312     {
1313       /* First try to use the hint left from the previous allocation
1314 	 to locate a clear bit in the in-use bitmap.  We've made sure
1315 	 that the one-past-the-end bit is always set, so if the hint
1316 	 has run over, this test will fail.  */
1317       unsigned hint = entry->next_bit_hint;
1318       word = hint / HOST_BITS_PER_LONG;
1319       bit = hint % HOST_BITS_PER_LONG;
1320 
1321       /* If the hint didn't work, scan the bitmap from the beginning.  */
1322       if ((entry->in_use_p[word] >> bit) & 1)
1323 	{
1324 	  word = bit = 0;
1325 	  while (~entry->in_use_p[word] == 0)
1326 	    ++word;
1327 
1328 #if GCC_VERSION >= 3004
1329 	  bit = __builtin_ctzl (~entry->in_use_p[word]);
1330 #else
1331 	  while ((entry->in_use_p[word] >> bit) & 1)
1332 	    ++bit;
1333 #endif
1334 
1335 	  hint = word * HOST_BITS_PER_LONG + bit;
1336 	}
1337 
1338       /* Next time, try the next bit.  */
1339       entry->next_bit_hint = hint + 1;
1340 
1341       object_offset = hint * object_size;
1342     }
1343 
1344   /* Set the in-use bit.  */
1345   entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1346 
1347   /* Keep a running total of the number of free objects.  If this page
1348      fills up, we may have to move it to the end of the list if the
1349      next page isn't full.  If the next page is full, all subsequent
1350      pages are full, so there's no need to move it.  */
1351   if (--entry->num_free_objects == 0
1352       && entry->next != NULL
1353       && entry->next->num_free_objects > 0)
1354     {
1355       /* We have a new head for the list.  */
1356       G.pages[order] = entry->next;
1357 
1358       /* We are moving ENTRY to the end of the page table list.
1359 	 The new page at the head of the list will have NULL in
1360 	 its PREV field and ENTRY will have NULL in its NEXT field.  */
1361       entry->next->prev = NULL;
1362       entry->next = NULL;
1363 
1364       /* Append ENTRY to the tail of the list.  */
1365       entry->prev = G.page_tails[order];
1366       G.page_tails[order]->next = entry;
1367       G.page_tails[order] = entry;
1368     }
1369 
1370   /* Calculate the object's address.  */
1371   result = entry->page + object_offset;
1372   if (GATHER_STATISTICS)
1373     ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1374 			 result FINAL_PASS_MEM_STAT);
1375 
1376 #ifdef ENABLE_GC_CHECKING
1377   /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1378      exact same semantics in presence of memory bugs, regardless of
1379      ENABLE_VALGRIND_CHECKING.  We override this request below.  Drop the
1380      handle to avoid handle leak.  */
1381   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1382 
1383   /* `Poison' the entire allocated object, including any padding at
1384      the end.  */
1385   memset (result, 0xaf, object_size);
1386 
1387   /* Make the bytes after the end of the object unaccessible.  Discard the
1388      handle to avoid handle leak.  */
1389   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1390 						object_size - size));
1391 #endif
1392 
1393   /* Tell Valgrind that the memory is there, but its content isn't
1394      defined.  The bytes at the end of the object are still marked
1395      unaccessible.  */
1396   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1397 
1398   /* Keep track of how many bytes are being allocated.  This
1399      information is used in deciding when to collect.  */
1400   G.allocated += object_size;
1401 
1402   /* For timevar statistics.  */
1403   timevar_ggc_mem_total += object_size;
1404 
1405   if (f)
1406     add_finalizer (result, f, s, n);
1407 
1408   if (GATHER_STATISTICS)
1409     {
1410       size_t overhead = object_size - size;
1411 
1412       G.stats.total_overhead += overhead;
1413       G.stats.total_allocated += object_size;
1414       G.stats.total_overhead_per_order[order] += overhead;
1415       G.stats.total_allocated_per_order[order] += object_size;
1416 
1417       if (size <= 32)
1418 	{
1419 	  G.stats.total_overhead_under32 += overhead;
1420 	  G.stats.total_allocated_under32 += object_size;
1421 	}
1422       if (size <= 64)
1423 	{
1424 	  G.stats.total_overhead_under64 += overhead;
1425 	  G.stats.total_allocated_under64 += object_size;
1426 	}
1427       if (size <= 128)
1428 	{
1429 	  G.stats.total_overhead_under128 += overhead;
1430 	  G.stats.total_allocated_under128 += object_size;
1431 	}
1432     }
1433 
1434   if (GGC_DEBUG_LEVEL >= 3)
1435     fprintf (G.debug_file,
1436 	     "Allocating object, requested size=%lu, actual=%lu at %p on %p\n",
1437 	     (unsigned long) size, (unsigned long) object_size, result,
1438 	     (void *) entry);
1439 
1440   return result;
1441 }
1442 
1443 /* Mark function for strings.  */
1444 
1445 void
1446 gt_ggc_m_S (const void *p)
1447 {
1448   page_entry *entry;
1449   unsigned bit, word;
1450   unsigned long mask;
1451   unsigned long offset;
1452 
1453   if (!p || !ggc_allocated_p (p))
1454     return;
1455 
1456   /* Look up the page on which the object is alloced.  .  */
1457   entry = lookup_page_table_entry (p);
1458   gcc_assert (entry);
1459 
1460   /* Calculate the index of the object on the page; this is its bit
1461      position in the in_use_p bitmap.  Note that because a char* might
1462      point to the middle of an object, we need special code here to
1463      make sure P points to the start of an object.  */
1464   offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1465   if (offset)
1466     {
1467       /* Here we've seen a char* which does not point to the beginning
1468 	 of an allocated object.  We assume it points to the middle of
1469 	 a STRING_CST.  */
1470       gcc_assert (offset == offsetof (struct tree_string, str));
1471       p = ((const char *) p) - offset;
1472       gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1473       return;
1474     }
1475 
1476   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1477   word = bit / HOST_BITS_PER_LONG;
1478   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1479 
1480   /* If the bit was previously set, skip it.  */
1481   if (entry->in_use_p[word] & mask)
1482     return;
1483 
1484   /* Otherwise set it, and decrement the free object count.  */
1485   entry->in_use_p[word] |= mask;
1486   entry->num_free_objects -= 1;
1487 
1488   if (GGC_DEBUG_LEVEL >= 4)
1489     fprintf (G.debug_file, "Marking %p\n", p);
1490 
1491   return;
1492 }
1493 
1494 
1495 /* User-callable entry points for marking string X.  */
1496 
1497 void
1498 gt_ggc_mx (const char *& x)
1499 {
1500   gt_ggc_m_S (x);
1501 }
1502 
1503 void
1504 gt_ggc_mx (unsigned char *& x)
1505 {
1506   gt_ggc_m_S (x);
1507 }
1508 
1509 void
1510 gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED)
1511 {
1512 }
1513 
1514 /* If P is not marked, marks it and return false.  Otherwise return true.
1515    P must have been allocated by the GC allocator; it mustn't point to
1516    static objects, stack variables, or memory allocated with malloc.  */
1517 
1518 int
1519 ggc_set_mark (const void *p)
1520 {
1521   page_entry *entry;
1522   unsigned bit, word;
1523   unsigned long mask;
1524 
1525   /* Look up the page on which the object is alloced.  If the object
1526      wasn't allocated by the collector, we'll probably die.  */
1527   entry = lookup_page_table_entry (p);
1528   gcc_assert (entry);
1529 
1530   /* Calculate the index of the object on the page; this is its bit
1531      position in the in_use_p bitmap.  */
1532   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1533   word = bit / HOST_BITS_PER_LONG;
1534   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1535 
1536   /* If the bit was previously set, skip it.  */
1537   if (entry->in_use_p[word] & mask)
1538     return 1;
1539 
1540   /* Otherwise set it, and decrement the free object count.  */
1541   entry->in_use_p[word] |= mask;
1542   entry->num_free_objects -= 1;
1543 
1544   if (GGC_DEBUG_LEVEL >= 4)
1545     fprintf (G.debug_file, "Marking %p\n", p);
1546 
1547   return 0;
1548 }
1549 
1550 /* Return 1 if P has been marked, zero otherwise.
1551    P must have been allocated by the GC allocator; it mustn't point to
1552    static objects, stack variables, or memory allocated with malloc.  */
1553 
1554 int
1555 ggc_marked_p (const void *p)
1556 {
1557   page_entry *entry;
1558   unsigned bit, word;
1559   unsigned long mask;
1560 
1561   /* Look up the page on which the object is alloced.  If the object
1562      wasn't allocated by the collector, we'll probably die.  */
1563   entry = lookup_page_table_entry (p);
1564   gcc_assert (entry);
1565 
1566   /* Calculate the index of the object on the page; this is its bit
1567      position in the in_use_p bitmap.  */
1568   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1569   word = bit / HOST_BITS_PER_LONG;
1570   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1571 
1572   return (entry->in_use_p[word] & mask) != 0;
1573 }
1574 
1575 /* Return the size of the gc-able object P.  */
1576 
1577 size_t
1578 ggc_get_size (const void *p)
1579 {
1580   page_entry *pe = lookup_page_table_entry (p);
1581   return OBJECT_SIZE (pe->order);
1582 }
1583 
1584 /* Release the memory for object P.  */
1585 
1586 void
1587 ggc_free (void *p)
1588 {
1589   if (in_gc)
1590     return;
1591 
1592   page_entry *pe = lookup_page_table_entry (p);
1593   size_t order = pe->order;
1594   size_t size = OBJECT_SIZE (order);
1595 
1596   if (GATHER_STATISTICS)
1597     ggc_free_overhead (p);
1598 
1599   if (GGC_DEBUG_LEVEL >= 3)
1600     fprintf (G.debug_file,
1601 	     "Freeing object, actual size=%lu, at %p on %p\n",
1602 	     (unsigned long) size, p, (void *) pe);
1603 
1604 #ifdef ENABLE_GC_CHECKING
1605   /* Poison the data, to indicate the data is garbage.  */
1606   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1607   memset (p, 0xa5, size);
1608 #endif
1609   /* Let valgrind know the object is free.  */
1610   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1611 
1612 #ifdef ENABLE_GC_ALWAYS_COLLECT
1613   /* In the completely-anal-checking mode, we do *not* immediately free
1614      the data, but instead verify that the data is *actually* not
1615      reachable the next time we collect.  */
1616   {
1617     struct free_object *fo = XNEW (struct free_object);
1618     fo->object = p;
1619     fo->next = G.free_object_list;
1620     G.free_object_list = fo;
1621   }
1622 #else
1623   {
1624     unsigned int bit_offset, word, bit;
1625 
1626     G.allocated -= size;
1627 
1628     /* Mark the object not-in-use.  */
1629     bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1630     word = bit_offset / HOST_BITS_PER_LONG;
1631     bit = bit_offset % HOST_BITS_PER_LONG;
1632     pe->in_use_p[word] &= ~(1UL << bit);
1633 
1634     if (pe->num_free_objects++ == 0)
1635       {
1636 	page_entry *p, *q;
1637 
1638 	/* If the page is completely full, then it's supposed to
1639 	   be after all pages that aren't.  Since we've freed one
1640 	   object from a page that was full, we need to move the
1641 	   page to the head of the list.
1642 
1643 	   PE is the node we want to move.  Q is the previous node
1644 	   and P is the next node in the list.  */
1645 	q = pe->prev;
1646 	if (q && q->num_free_objects == 0)
1647 	  {
1648 	    p = pe->next;
1649 
1650 	    q->next = p;
1651 
1652 	    /* If PE was at the end of the list, then Q becomes the
1653 	       new end of the list.  If PE was not the end of the
1654 	       list, then we need to update the PREV field for P.  */
1655 	    if (!p)
1656 	      G.page_tails[order] = q;
1657 	    else
1658 	      p->prev = q;
1659 
1660 	    /* Move PE to the head of the list.  */
1661 	    pe->next = G.pages[order];
1662 	    pe->prev = NULL;
1663 	    G.pages[order]->prev = pe;
1664 	    G.pages[order] = pe;
1665 	  }
1666 
1667 	/* Reset the hint bit to point to the only free object.  */
1668 	pe->next_bit_hint = bit_offset;
1669       }
1670   }
1671 #endif
1672 }
1673 
1674 /* Subroutine of init_ggc which computes the pair of numbers used to
1675    perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1676 
1677    This algorithm is taken from Granlund and Montgomery's paper
1678    "Division by Invariant Integers using Multiplication"
1679    (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1680    constants).  */
1681 
1682 static void
1683 compute_inverse (unsigned order)
1684 {
1685   size_t size, inv;
1686   unsigned int e;
1687 
1688   size = OBJECT_SIZE (order);
1689   e = 0;
1690   while (size % 2 == 0)
1691     {
1692       e++;
1693       size >>= 1;
1694     }
1695 
1696   inv = size;
1697   while (inv * size != 1)
1698     inv = inv * (2 - inv*size);
1699 
1700   DIV_MULT (order) = inv;
1701   DIV_SHIFT (order) = e;
1702 }
1703 
1704 /* Initialize the ggc-mmap allocator.  */
1705 void
1706 init_ggc (void)
1707 {
1708   static bool init_p = false;
1709   unsigned order;
1710 
1711   if (init_p)
1712     return;
1713   init_p = true;
1714 
1715   G.pagesize = getpagesize ();
1716   G.lg_pagesize = exact_log2 (G.pagesize);
1717 
1718 #ifdef HAVE_MMAP_DEV_ZERO
1719   G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1720   if (G.dev_zero_fd == -1)
1721     internal_error ("open /dev/zero: %m");
1722 #endif
1723 
1724 #if 0
1725   G.debug_file = fopen ("ggc-mmap.debug", "w");
1726 #else
1727   G.debug_file = stdout;
1728 #endif
1729 
1730 #ifdef USING_MMAP
1731   /* StunOS has an amazing off-by-one error for the first mmap allocation
1732      after fiddling with RLIMIT_STACK.  The result, as hard as it is to
1733      believe, is an unaligned page allocation, which would cause us to
1734      hork badly if we tried to use it.  */
1735   {
1736     char *p = alloc_anon (NULL, G.pagesize, true);
1737     struct page_entry *e;
1738     if ((uintptr_t)p & (G.pagesize - 1))
1739       {
1740 	/* How losing.  Discard this one and try another.  If we still
1741 	   can't get something useful, give up.  */
1742 
1743 	p = alloc_anon (NULL, G.pagesize, true);
1744 	gcc_assert (!((uintptr_t)p & (G.pagesize - 1)));
1745       }
1746 
1747     /* We have a good page, might as well hold onto it...  */
1748     e = XCNEW (struct page_entry);
1749     e->bytes = G.pagesize;
1750     e->page = p;
1751     e->next = G.free_pages;
1752     G.free_pages = e;
1753   }
1754 #endif
1755 
1756   /* Initialize the object size table.  */
1757   for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1758     object_size_table[order] = (size_t) 1 << order;
1759   for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1760     {
1761       size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1762 
1763       /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1764 	 so that we're sure of getting aligned memory.  */
1765       s = ROUND_UP (s, MAX_ALIGNMENT);
1766       object_size_table[order] = s;
1767     }
1768 
1769   /* Initialize the objects-per-page and inverse tables.  */
1770   for (order = 0; order < NUM_ORDERS; ++order)
1771     {
1772       objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1773       if (objects_per_page_table[order] == 0)
1774 	objects_per_page_table[order] = 1;
1775       compute_inverse (order);
1776     }
1777 
1778   /* Reset the size_lookup array to put appropriately sized objects in
1779      the special orders.  All objects bigger than the previous power
1780      of two, but no greater than the special size, should go in the
1781      new order.  */
1782   for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1783     {
1784       int o;
1785       int i;
1786 
1787       i = OBJECT_SIZE (order);
1788       if (i >= NUM_SIZE_LOOKUP)
1789 	continue;
1790 
1791       for (o = size_lookup[i]; o == size_lookup [i]; --i)
1792 	size_lookup[i] = order;
1793     }
1794 
1795   G.depth_in_use = 0;
1796   G.depth_max = 10;
1797   G.depth = XNEWVEC (unsigned int, G.depth_max);
1798 
1799   G.by_depth_in_use = 0;
1800   G.by_depth_max = INITIAL_PTE_COUNT;
1801   G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1802   G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1803 
1804   /* Allocate space for the depth 0 finalizers.  */
1805   G.finalizers.safe_push (vNULL);
1806   G.vec_finalizers.safe_push (vNULL);
1807   gcc_assert (G.finalizers.length() == 1);
1808 }
1809 
1810 /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1811    reflects reality.  Recalculate NUM_FREE_OBJECTS as well.  */
1812 
1813 static void
1814 ggc_recalculate_in_use_p (page_entry *p)
1815 {
1816   unsigned int i;
1817   size_t num_objects;
1818 
1819   /* Because the past-the-end bit in in_use_p is always set, we
1820      pretend there is one additional object.  */
1821   num_objects = OBJECTS_IN_PAGE (p) + 1;
1822 
1823   /* Reset the free object count.  */
1824   p->num_free_objects = num_objects;
1825 
1826   /* Combine the IN_USE_P and SAVE_IN_USE_P arrays.  */
1827   for (i = 0;
1828        i < CEIL (BITMAP_SIZE (num_objects),
1829 		 sizeof (*p->in_use_p));
1830        ++i)
1831     {
1832       unsigned long j;
1833 
1834       /* Something is in use if it is marked, or if it was in use in a
1835 	 context further down the context stack.  */
1836       p->in_use_p[i] |= save_in_use_p (p)[i];
1837 
1838       /* Decrement the free object count for every object allocated.  */
1839       for (j = p->in_use_p[i]; j; j >>= 1)
1840 	p->num_free_objects -= (j & 1);
1841     }
1842 
1843   gcc_assert (p->num_free_objects < num_objects);
1844 }
1845 
1846 /* Unmark all objects.  */
1847 
1848 static void
1849 clear_marks (void)
1850 {
1851   unsigned order;
1852 
1853   for (order = 2; order < NUM_ORDERS; order++)
1854     {
1855       page_entry *p;
1856 
1857       for (p = G.pages[order]; p != NULL; p = p->next)
1858 	{
1859 	  size_t num_objects = OBJECTS_IN_PAGE (p);
1860 	  size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1861 
1862 	  /* The data should be page-aligned.  */
1863 	  gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1)));
1864 
1865 	  /* Pages that aren't in the topmost context are not collected;
1866 	     nevertheless, we need their in-use bit vectors to store GC
1867 	     marks.  So, back them up first.  */
1868 	  if (p->context_depth < G.context_depth)
1869 	    {
1870 	      if (! save_in_use_p (p))
1871 		save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1872 	      memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1873 	    }
1874 
1875 	  /* Reset reset the number of free objects and clear the
1876              in-use bits.  These will be adjusted by mark_obj.  */
1877 	  p->num_free_objects = num_objects;
1878 	  memset (p->in_use_p, 0, bitmap_size);
1879 
1880 	  /* Make sure the one-past-the-end bit is always set.  */
1881 	  p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1882 	    = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1883 	}
1884     }
1885 }
1886 
1887 /* Check if any blocks with a registered finalizer have become unmarked. If so
1888    run the finalizer and unregister it because the block is about to be freed.
1889    Note that no garantee is made about what order finalizers will run in so
1890    touching other objects in gc memory is extremely unwise.  */
1891 
1892 static void
1893 ggc_handle_finalizers ()
1894 {
1895   unsigned dlen = G.finalizers.length();
1896   for (unsigned d = G.context_depth; d < dlen; ++d)
1897     {
1898       vec<finalizer> &v = G.finalizers[d];
1899       unsigned length = v.length ();
1900       for (unsigned int i = 0; i < length;)
1901 	{
1902 	  finalizer &f = v[i];
1903 	  if (!ggc_marked_p (f.addr ()))
1904 	    {
1905 	      f.call ();
1906 	      v.unordered_remove (i);
1907 	      length--;
1908 	    }
1909 	  else
1910 	    i++;
1911 	}
1912     }
1913 
1914   gcc_assert (dlen == G.vec_finalizers.length());
1915   for (unsigned d = G.context_depth; d < dlen; ++d)
1916     {
1917       vec<vec_finalizer> &vv = G.vec_finalizers[d];
1918       unsigned length = vv.length ();
1919       for (unsigned int i = 0; i < length;)
1920 	{
1921 	  vec_finalizer &f = vv[i];
1922 	  if (!ggc_marked_p (f.addr ()))
1923 	    {
1924 	      f.call ();
1925 	      vv.unordered_remove (i);
1926 	      length--;
1927 	    }
1928 	  else
1929 	    i++;
1930 	}
1931     }
1932 }
1933 
1934 /* Free all empty pages.  Partially empty pages need no attention
1935    because the `mark' bit doubles as an `unused' bit.  */
1936 
1937 static void
1938 sweep_pages (void)
1939 {
1940   unsigned order;
1941 
1942   for (order = 2; order < NUM_ORDERS; order++)
1943     {
1944       /* The last page-entry to consider, regardless of entries
1945 	 placed at the end of the list.  */
1946       page_entry * const last = G.page_tails[order];
1947 
1948       size_t num_objects;
1949       size_t live_objects;
1950       page_entry *p, *previous;
1951       int done;
1952 
1953       p = G.pages[order];
1954       if (p == NULL)
1955 	continue;
1956 
1957       previous = NULL;
1958       do
1959 	{
1960 	  page_entry *next = p->next;
1961 
1962 	  /* Loop until all entries have been examined.  */
1963 	  done = (p == last);
1964 
1965 	  num_objects = OBJECTS_IN_PAGE (p);
1966 
1967 	  /* Add all live objects on this page to the count of
1968              allocated memory.  */
1969 	  live_objects = num_objects - p->num_free_objects;
1970 
1971 	  G.allocated += OBJECT_SIZE (order) * live_objects;
1972 
1973 	  /* Only objects on pages in the topmost context should get
1974 	     collected.  */
1975 	  if (p->context_depth < G.context_depth)
1976 	    ;
1977 
1978 	  /* Remove the page if it's empty.  */
1979 	  else if (live_objects == 0)
1980 	    {
1981 	      /* If P was the first page in the list, then NEXT
1982 		 becomes the new first page in the list, otherwise
1983 		 splice P out of the forward pointers.  */
1984 	      if (! previous)
1985 		G.pages[order] = next;
1986 	      else
1987 		previous->next = next;
1988 
1989 	      /* Splice P out of the back pointers too.  */
1990 	      if (next)
1991 		next->prev = previous;
1992 
1993 	      /* Are we removing the last element?  */
1994 	      if (p == G.page_tails[order])
1995 		G.page_tails[order] = previous;
1996 	      free_page (p);
1997 	      p = previous;
1998 	    }
1999 
2000 	  /* If the page is full, move it to the end.  */
2001 	  else if (p->num_free_objects == 0)
2002 	    {
2003 	      /* Don't move it if it's already at the end.  */
2004 	      if (p != G.page_tails[order])
2005 		{
2006 		  /* Move p to the end of the list.  */
2007 		  p->next = NULL;
2008 		  p->prev = G.page_tails[order];
2009 		  G.page_tails[order]->next = p;
2010 
2011 		  /* Update the tail pointer...  */
2012 		  G.page_tails[order] = p;
2013 
2014 		  /* ... and the head pointer, if necessary.  */
2015 		  if (! previous)
2016 		    G.pages[order] = next;
2017 		  else
2018 		    previous->next = next;
2019 
2020 		  /* And update the backpointer in NEXT if necessary.  */
2021 		  if (next)
2022 		    next->prev = previous;
2023 
2024 		  p = previous;
2025 		}
2026 	    }
2027 
2028 	  /* If we've fallen through to here, it's a page in the
2029 	     topmost context that is neither full nor empty.  Such a
2030 	     page must precede pages at lesser context depth in the
2031 	     list, so move it to the head.  */
2032 	  else if (p != G.pages[order])
2033 	    {
2034 	      previous->next = p->next;
2035 
2036 	      /* Update the backchain in the next node if it exists.  */
2037 	      if (p->next)
2038 		p->next->prev = previous;
2039 
2040 	      /* Move P to the head of the list.  */
2041 	      p->next = G.pages[order];
2042 	      p->prev = NULL;
2043 	      G.pages[order]->prev = p;
2044 
2045 	      /* Update the head pointer.  */
2046 	      G.pages[order] = p;
2047 
2048 	      /* Are we moving the last element?  */
2049 	      if (G.page_tails[order] == p)
2050 	        G.page_tails[order] = previous;
2051 	      p = previous;
2052 	    }
2053 
2054 	  previous = p;
2055 	  p = next;
2056 	}
2057       while (! done);
2058 
2059       /* Now, restore the in_use_p vectors for any pages from contexts
2060          other than the current one.  */
2061       for (p = G.pages[order]; p; p = p->next)
2062 	if (p->context_depth != G.context_depth)
2063 	  ggc_recalculate_in_use_p (p);
2064     }
2065 }
2066 
2067 #ifdef ENABLE_GC_CHECKING
2068 /* Clobber all free objects.  */
2069 
2070 static void
2071 poison_pages (void)
2072 {
2073   unsigned order;
2074 
2075   for (order = 2; order < NUM_ORDERS; order++)
2076     {
2077       size_t size = OBJECT_SIZE (order);
2078       page_entry *p;
2079 
2080       for (p = G.pages[order]; p != NULL; p = p->next)
2081 	{
2082 	  size_t num_objects;
2083 	  size_t i;
2084 
2085 	  if (p->context_depth != G.context_depth)
2086 	    /* Since we don't do any collection for pages in pushed
2087 	       contexts, there's no need to do any poisoning.  And
2088 	       besides, the IN_USE_P array isn't valid until we pop
2089 	       contexts.  */
2090 	    continue;
2091 
2092 	  num_objects = OBJECTS_IN_PAGE (p);
2093 	  for (i = 0; i < num_objects; i++)
2094 	    {
2095 	      size_t word, bit;
2096 	      word = i / HOST_BITS_PER_LONG;
2097 	      bit = i % HOST_BITS_PER_LONG;
2098 	      if (((p->in_use_p[word] >> bit) & 1) == 0)
2099 		{
2100 		  char *object = p->page + i * size;
2101 
2102 		  /* Keep poison-by-write when we expect to use Valgrind,
2103 		     so the exact same memory semantics is kept, in case
2104 		     there are memory errors.  We override this request
2105 		     below.  */
2106 		  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
2107 								 size));
2108 		  memset (object, 0xa5, size);
2109 
2110 		  /* Drop the handle to avoid handle leak.  */
2111 		  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
2112 		}
2113 	    }
2114 	}
2115     }
2116 }
2117 #else
2118 #define poison_pages()
2119 #endif
2120 
2121 #ifdef ENABLE_GC_ALWAYS_COLLECT
2122 /* Validate that the reportedly free objects actually are.  */
2123 
2124 static void
2125 validate_free_objects (void)
2126 {
2127   struct free_object *f, *next, *still_free = NULL;
2128 
2129   for (f = G.free_object_list; f ; f = next)
2130     {
2131       page_entry *pe = lookup_page_table_entry (f->object);
2132       size_t bit, word;
2133 
2134       bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
2135       word = bit / HOST_BITS_PER_LONG;
2136       bit = bit % HOST_BITS_PER_LONG;
2137       next = f->next;
2138 
2139       /* Make certain it isn't visible from any root.  Notice that we
2140 	 do this check before sweep_pages merges save_in_use_p.  */
2141       gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
2142 
2143       /* If the object comes from an outer context, then retain the
2144 	 free_object entry, so that we can verify that the address
2145 	 isn't live on the stack in some outer context.  */
2146       if (pe->context_depth != G.context_depth)
2147 	{
2148 	  f->next = still_free;
2149 	  still_free = f;
2150 	}
2151       else
2152 	free (f);
2153     }
2154 
2155   G.free_object_list = still_free;
2156 }
2157 #else
2158 #define validate_free_objects()
2159 #endif
2160 
2161 /* Top level mark-and-sweep routine.  */
2162 
2163 void
2164 ggc_collect (void)
2165 {
2166   /* Avoid frequent unnecessary work by skipping collection if the
2167      total allocations haven't expanded much since the last
2168      collection.  */
2169   float allocated_last_gc =
2170     MAX (G.allocated_last_gc, (size_t)PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
2171 
2172   float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
2173   if (G.allocated < allocated_last_gc + min_expand && !ggc_force_collect)
2174     return;
2175 
2176   timevar_push (TV_GC);
2177   if (!quiet_flag)
2178     fprintf (stderr, " {GC %luk -> ", (unsigned long) G.allocated / 1024);
2179   if (GGC_DEBUG_LEVEL >= 2)
2180     fprintf (G.debug_file, "BEGIN COLLECTING\n");
2181 
2182   /* Zero the total allocated bytes.  This will be recalculated in the
2183      sweep phase.  */
2184   G.allocated = 0;
2185 
2186   /* Release the pages we freed the last time we collected, but didn't
2187      reuse in the interim.  */
2188   release_pages ();
2189 
2190   /* Indicate that we've seen collections at this context depth.  */
2191   G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
2192 
2193   invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2194 
2195   in_gc = true;
2196   clear_marks ();
2197   ggc_mark_roots ();
2198   ggc_handle_finalizers ();
2199 
2200   if (GATHER_STATISTICS)
2201     ggc_prune_overhead_list ();
2202 
2203   poison_pages ();
2204   validate_free_objects ();
2205   sweep_pages ();
2206 
2207   in_gc = false;
2208   G.allocated_last_gc = G.allocated;
2209 
2210   invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2211 
2212   timevar_pop (TV_GC);
2213 
2214   if (!quiet_flag)
2215     fprintf (stderr, "%luk}", (unsigned long) G.allocated / 1024);
2216   if (GGC_DEBUG_LEVEL >= 2)
2217     fprintf (G.debug_file, "END COLLECTING\n");
2218 }
2219 
2220 /* Assume that all GGC memory is reachable and grow the limits for next collection.
2221    With checking, trigger GGC so -Q compilation outputs how much of memory really is
2222    reachable.  */
2223 
2224 void
2225 ggc_grow (void)
2226 {
2227   if (!flag_checking)
2228     G.allocated_last_gc = MAX (G.allocated_last_gc,
2229 			       G.allocated);
2230   else
2231     ggc_collect ();
2232   if (!quiet_flag)
2233     fprintf (stderr, " {GC start %luk} ", (unsigned long) G.allocated / 1024);
2234 }
2235 
2236 /* Print allocation statistics.  */
2237 #define SCALE(x) ((unsigned long) ((x) < 1024*10 \
2238 		  ? (x) \
2239 		  : ((x) < 1024*1024*10 \
2240 		     ? (x) / 1024 \
2241 		     : (x) / (1024*1024))))
2242 #define STAT_LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
2243 
2244 void
2245 ggc_print_statistics (void)
2246 {
2247   struct ggc_statistics stats;
2248   unsigned int i;
2249   size_t total_overhead = 0;
2250 
2251   /* Clear the statistics.  */
2252   memset (&stats, 0, sizeof (stats));
2253 
2254   /* Make sure collection will really occur.  */
2255   G.allocated_last_gc = 0;
2256 
2257   /* Collect and print the statistics common across collectors.  */
2258   ggc_print_common_statistics (stderr, &stats);
2259 
2260   /* Release free pages so that we will not count the bytes allocated
2261      there as part of the total allocated memory.  */
2262   release_pages ();
2263 
2264   /* Collect some information about the various sizes of
2265      allocation.  */
2266   fprintf (stderr,
2267            "Memory still allocated at the end of the compilation process\n");
2268   fprintf (stderr, "%-8s %10s  %10s  %10s\n",
2269 	   "Size", "Allocated", "Used", "Overhead");
2270   for (i = 0; i < NUM_ORDERS; ++i)
2271     {
2272       page_entry *p;
2273       size_t allocated;
2274       size_t in_use;
2275       size_t overhead;
2276 
2277       /* Skip empty entries.  */
2278       if (!G.pages[i])
2279 	continue;
2280 
2281       overhead = allocated = in_use = 0;
2282 
2283       /* Figure out the total number of bytes allocated for objects of
2284 	 this size, and how many of them are actually in use.  Also figure
2285 	 out how much memory the page table is using.  */
2286       for (p = G.pages[i]; p; p = p->next)
2287 	{
2288 	  allocated += p->bytes;
2289 	  in_use +=
2290 	    (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2291 
2292 	  overhead += (sizeof (page_entry) - sizeof (long)
2293 		       + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2294 	}
2295       fprintf (stderr, "%-8lu %10lu%c %10lu%c %10lu%c\n",
2296 	       (unsigned long) OBJECT_SIZE (i),
2297 	       SCALE (allocated), STAT_LABEL (allocated),
2298 	       SCALE (in_use), STAT_LABEL (in_use),
2299 	       SCALE (overhead), STAT_LABEL (overhead));
2300       total_overhead += overhead;
2301     }
2302   fprintf (stderr, "%-8s %10lu%c %10lu%c %10lu%c\n", "Total",
2303 	   SCALE (G.bytes_mapped), STAT_LABEL (G.bytes_mapped),
2304 	   SCALE (G.allocated), STAT_LABEL (G.allocated),
2305 	   SCALE (total_overhead), STAT_LABEL (total_overhead));
2306 
2307   if (GATHER_STATISTICS)
2308     {
2309       fprintf (stderr, "\nTotal allocations and overheads during "
2310 	       "the compilation process\n");
2311 
2312       fprintf (stderr, "Total Overhead:                          %10"
2313 	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead);
2314       fprintf (stderr, "Total Allocated:                         %10"
2315 	       HOST_LONG_LONG_FORMAT "d\n",
2316 	       G.stats.total_allocated);
2317 
2318       fprintf (stderr, "Total Overhead  under  32B:              %10"
2319 	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under32);
2320       fprintf (stderr, "Total Allocated under  32B:              %10"
2321 	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under32);
2322       fprintf (stderr, "Total Overhead  under  64B:              %10"
2323 	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under64);
2324       fprintf (stderr, "Total Allocated under  64B:              %10"
2325 	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under64);
2326       fprintf (stderr, "Total Overhead  under 128B:              %10"
2327 	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_overhead_under128);
2328       fprintf (stderr, "Total Allocated under 128B:              %10"
2329 	       HOST_LONG_LONG_FORMAT "d\n", G.stats.total_allocated_under128);
2330 
2331       for (i = 0; i < NUM_ORDERS; i++)
2332 	if (G.stats.total_allocated_per_order[i])
2333 	  {
2334 	    fprintf (stderr, "Total Overhead  page size %9lu:     %10"
2335 		     HOST_LONG_LONG_FORMAT "d\n",
2336 		     (unsigned long) OBJECT_SIZE (i),
2337 		     G.stats.total_overhead_per_order[i]);
2338 	    fprintf (stderr, "Total Allocated page size %9lu:     %10"
2339 		     HOST_LONG_LONG_FORMAT "d\n",
2340 		     (unsigned long) OBJECT_SIZE (i),
2341 		     G.stats.total_allocated_per_order[i]);
2342 	  }
2343   }
2344 }
2345 
2346 struct ggc_pch_ondisk
2347 {
2348   unsigned totals[NUM_ORDERS];
2349 };
2350 
2351 struct ggc_pch_data
2352 {
2353   struct ggc_pch_ondisk d;
2354   uintptr_t base[NUM_ORDERS];
2355   size_t written[NUM_ORDERS];
2356 };
2357 
2358 struct ggc_pch_data *
2359 init_ggc_pch (void)
2360 {
2361   return XCNEW (struct ggc_pch_data);
2362 }
2363 
2364 void
2365 ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2366 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2367 {
2368   unsigned order;
2369 
2370   if (size < NUM_SIZE_LOOKUP)
2371     order = size_lookup[size];
2372   else
2373     {
2374       order = 10;
2375       while (size > OBJECT_SIZE (order))
2376 	order++;
2377     }
2378 
2379   d->d.totals[order]++;
2380 }
2381 
2382 size_t
2383 ggc_pch_total_size (struct ggc_pch_data *d)
2384 {
2385   size_t a = 0;
2386   unsigned i;
2387 
2388   for (i = 0; i < NUM_ORDERS; i++)
2389     a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2390   return a;
2391 }
2392 
2393 void
2394 ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2395 {
2396   uintptr_t a = (uintptr_t) base;
2397   unsigned i;
2398 
2399   for (i = 0; i < NUM_ORDERS; i++)
2400     {
2401       d->base[i] = a;
2402       a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2403     }
2404 }
2405 
2406 
2407 char *
2408 ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2409 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2410 {
2411   unsigned order;
2412   char *result;
2413 
2414   if (size < NUM_SIZE_LOOKUP)
2415     order = size_lookup[size];
2416   else
2417     {
2418       order = 10;
2419       while (size > OBJECT_SIZE (order))
2420 	order++;
2421     }
2422 
2423   result = (char *) d->base[order];
2424   d->base[order] += OBJECT_SIZE (order);
2425   return result;
2426 }
2427 
2428 void
2429 ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2430 		       FILE *f ATTRIBUTE_UNUSED)
2431 {
2432   /* Nothing to do.  */
2433 }
2434 
2435 void
2436 ggc_pch_write_object (struct ggc_pch_data *d,
2437 		      FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2438 		      size_t size, bool is_string ATTRIBUTE_UNUSED)
2439 {
2440   unsigned order;
2441   static const char emptyBytes[256] = { 0 };
2442 
2443   if (size < NUM_SIZE_LOOKUP)
2444     order = size_lookup[size];
2445   else
2446     {
2447       order = 10;
2448       while (size > OBJECT_SIZE (order))
2449 	order++;
2450     }
2451 
2452   if (fwrite (x, size, 1, f) != 1)
2453     fatal_error (input_location, "can%'t write PCH file: %m");
2454 
2455   /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2456      object out to OBJECT_SIZE(order).  This happens for strings.  */
2457 
2458   if (size != OBJECT_SIZE (order))
2459     {
2460       unsigned padding = OBJECT_SIZE (order) - size;
2461 
2462       /* To speed small writes, we use a nulled-out array that's larger
2463          than most padding requests as the source for our null bytes.  This
2464          permits us to do the padding with fwrite() rather than fseek(), and
2465          limits the chance the OS may try to flush any outstanding writes.  */
2466       if (padding <= sizeof (emptyBytes))
2467         {
2468           if (fwrite (emptyBytes, 1, padding, f) != padding)
2469             fatal_error (input_location, "can%'t write PCH file");
2470         }
2471       else
2472         {
2473           /* Larger than our buffer?  Just default to fseek.  */
2474           if (fseek (f, padding, SEEK_CUR) != 0)
2475             fatal_error (input_location, "can%'t write PCH file");
2476         }
2477     }
2478 
2479   d->written[order]++;
2480   if (d->written[order] == d->d.totals[order]
2481       && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2482 				   G.pagesize),
2483 		SEEK_CUR) != 0)
2484     fatal_error (input_location, "can%'t write PCH file: %m");
2485 }
2486 
2487 void
2488 ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2489 {
2490   if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2491     fatal_error (input_location, "can%'t write PCH file: %m");
2492   free (d);
2493 }
2494 
2495 /* Move the PCH PTE entries just added to the end of by_depth, to the
2496    front.  */
2497 
2498 static void
2499 move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2500 {
2501   unsigned i;
2502 
2503   /* First, we swap the new entries to the front of the varrays.  */
2504   page_entry **new_by_depth;
2505   unsigned long **new_save_in_use;
2506 
2507   new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2508   new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2509 
2510   memcpy (&new_by_depth[0],
2511 	  &G.by_depth[count_old_page_tables],
2512 	  count_new_page_tables * sizeof (void *));
2513   memcpy (&new_by_depth[count_new_page_tables],
2514 	  &G.by_depth[0],
2515 	  count_old_page_tables * sizeof (void *));
2516   memcpy (&new_save_in_use[0],
2517 	  &G.save_in_use[count_old_page_tables],
2518 	  count_new_page_tables * sizeof (void *));
2519   memcpy (&new_save_in_use[count_new_page_tables],
2520 	  &G.save_in_use[0],
2521 	  count_old_page_tables * sizeof (void *));
2522 
2523   free (G.by_depth);
2524   free (G.save_in_use);
2525 
2526   G.by_depth = new_by_depth;
2527   G.save_in_use = new_save_in_use;
2528 
2529   /* Now update all the index_by_depth fields.  */
2530   for (i = G.by_depth_in_use; i > 0; --i)
2531     {
2532       page_entry *p = G.by_depth[i-1];
2533       p->index_by_depth = i-1;
2534     }
2535 
2536   /* And last, we update the depth pointers in G.depth.  The first
2537      entry is already 0, and context 0 entries always start at index
2538      0, so there is nothing to update in the first slot.  We need a
2539      second slot, only if we have old ptes, and if we do, they start
2540      at index count_new_page_tables.  */
2541   if (count_old_page_tables)
2542     push_depth (count_new_page_tables);
2543 }
2544 
2545 void
2546 ggc_pch_read (FILE *f, void *addr)
2547 {
2548   struct ggc_pch_ondisk d;
2549   unsigned i;
2550   char *offs = (char *) addr;
2551   unsigned long count_old_page_tables;
2552   unsigned long count_new_page_tables;
2553 
2554   count_old_page_tables = G.by_depth_in_use;
2555 
2556   /* We've just read in a PCH file.  So, every object that used to be
2557      allocated is now free.  */
2558   clear_marks ();
2559 #ifdef ENABLE_GC_CHECKING
2560   poison_pages ();
2561 #endif
2562   /* Since we free all the allocated objects, the free list becomes
2563      useless.  Validate it now, which will also clear it.  */
2564   validate_free_objects ();
2565 
2566   /* No object read from a PCH file should ever be freed.  So, set the
2567      context depth to 1, and set the depth of all the currently-allocated
2568      pages to be 1 too.  PCH pages will have depth 0.  */
2569   gcc_assert (!G.context_depth);
2570   G.context_depth = 1;
2571   /* Allocate space for the depth 1 finalizers.  */
2572   G.finalizers.safe_push (vNULL);
2573   G.vec_finalizers.safe_push (vNULL);
2574   gcc_assert (G.finalizers.length() == 2);
2575   for (i = 0; i < NUM_ORDERS; i++)
2576     {
2577       page_entry *p;
2578       for (p = G.pages[i]; p != NULL; p = p->next)
2579 	p->context_depth = G.context_depth;
2580     }
2581 
2582   /* Allocate the appropriate page-table entries for the pages read from
2583      the PCH file.  */
2584   if (fread (&d, sizeof (d), 1, f) != 1)
2585     fatal_error (input_location, "can%'t read PCH file: %m");
2586 
2587   for (i = 0; i < NUM_ORDERS; i++)
2588     {
2589       struct page_entry *entry;
2590       char *pte;
2591       size_t bytes;
2592       size_t num_objs;
2593       size_t j;
2594 
2595       if (d.totals[i] == 0)
2596 	continue;
2597 
2598       bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i));
2599       num_objs = bytes / OBJECT_SIZE (i);
2600       entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2601 					    - sizeof (long)
2602 					    + BITMAP_SIZE (num_objs + 1)));
2603       entry->bytes = bytes;
2604       entry->page = offs;
2605       entry->context_depth = 0;
2606       offs += bytes;
2607       entry->num_free_objects = 0;
2608       entry->order = i;
2609 
2610       for (j = 0;
2611 	   j + HOST_BITS_PER_LONG <= num_objs + 1;
2612 	   j += HOST_BITS_PER_LONG)
2613 	entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2614       for (; j < num_objs + 1; j++)
2615 	entry->in_use_p[j / HOST_BITS_PER_LONG]
2616 	  |= 1L << (j % HOST_BITS_PER_LONG);
2617 
2618       for (pte = entry->page;
2619 	   pte < entry->page + entry->bytes;
2620 	   pte += G.pagesize)
2621 	set_page_table_entry (pte, entry);
2622 
2623       if (G.page_tails[i] != NULL)
2624 	G.page_tails[i]->next = entry;
2625       else
2626 	G.pages[i] = entry;
2627       G.page_tails[i] = entry;
2628 
2629       /* We start off by just adding all the new information to the
2630 	 end of the varrays, later, we will move the new information
2631 	 to the front of the varrays, as the PCH page tables are at
2632 	 context 0.  */
2633       push_by_depth (entry, 0);
2634     }
2635 
2636   /* Now, we update the various data structures that speed page table
2637      handling.  */
2638   count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2639 
2640   move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2641 
2642   /* Update the statistics.  */
2643   G.allocated = G.allocated_last_gc = offs - (char *)addr;
2644 }
2645