xref: /netbsd-src/external/gpl3/gcc.old/dist/libphobos/libdruntime/gc/impl/conservative/gc.d (revision 627f7eb200a4419d89b531d55fccd2ee3ffdcde0)
1 /**
2  * Contains the garbage collector implementation.
3  *
4  * Copyright: Copyright Digital Mars 2001 - 2016.
5  * License:   $(WEB www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
6  * Authors:   Walter Bright, David Friedman, Sean Kelly
7  */
8 
9 /*          Copyright Digital Mars 2005 - 2016.
10  * Distributed under the Boost Software License, Version 1.0.
11  *    (See accompanying file LICENSE or copy at
12  *          http://www.boost.org/LICENSE_1_0.txt)
13  */
14 module gc.impl.conservative.gc;
15 
16 // D Programming Language Garbage Collector implementation
17 
18 /************** Debugging ***************************/
19 
20 //debug = PRINTF;               // turn on printf's
21 //debug = COLLECT_PRINTF;       // turn on printf's
22 //debug = PRINTF_TO_FILE;       // redirect printf's ouptut to file "gcx.log"
23 //debug = LOGGING;              // log allocations / frees
24 //debug = MEMSTOMP;             // stomp on memory
25 //debug = SENTINEL;             // add underrun/overrrun protection
26                                 // NOTE: this needs to be enabled globally in the makefiles
27                                 // (-debug=SENTINEL) to pass druntime's unittests.
28 //debug = PTRCHECK;             // more pointer checking
29 //debug = PTRCHECK2;            // thorough but slow pointer checking
30 //debug = INVARIANT;            // enable invariants
31 //debug = PROFILE_API;          // profile API calls for config.profile > 1
32 
33 /***************************************************/
34 
35 import gc.bits;
36 import gc.os;
37 import gc.config;
38 import gc.gcinterface;
39 
40 import rt.util.container.treap;
41 
42 import cstdlib = core.stdc.stdlib : calloc, free, malloc, realloc;
43 import core.stdc.string : memcpy, memset, memmove;
44 import core.bitop;
45 import core.thread;
46 static import core.memory;
47 
48 version (GNU) import gcc.builtins;
49 
50 debug (PRINTF_TO_FILE) import core.stdc.stdio : sprintf, fprintf, fopen, fflush, FILE;
51 else                   import core.stdc.stdio : sprintf, printf; // needed to output profiling results
52 
53 import core.time;
54 alias currTime = MonoTime.currTime;
55 
debug(PRINTF_TO_FILE)56 debug(PRINTF_TO_FILE)
57 {
58     private __gshared MonoTime gcStartTick;
59     private __gshared FILE* gcx_fh;
60 
61     private int printf(ARGS...)(const char* fmt, ARGS args) nothrow
62     {
63         if (!gcx_fh)
64             gcx_fh = fopen("gcx.log", "w");
65         if (!gcx_fh)
66             return 0;
67 
68         int len;
69         if (MonoTime.ticksPerSecond == 0)
70         {
71             len = fprintf(gcx_fh, "before init: ");
72         }
73         else
74         {
75             if (gcStartTick == MonoTime.init)
76                 gcStartTick = MonoTime.currTime;
77             immutable timeElapsed = MonoTime.currTime - gcStartTick;
78             immutable secondsAsDouble = timeElapsed.total!"hnsecs" / cast(double)convert!("seconds", "hnsecs")(1);
79             len = fprintf(gcx_fh, "%10.6lf: ", secondsAsDouble);
80         }
81         len += fprintf(gcx_fh, fmt, args);
82         fflush(gcx_fh);
83         return len;
84     }
85 }
86 
debug(PRINTF)87 debug(PRINTF) void printFreeInfo(Pool* pool) nothrow
88 {
89     uint nReallyFree;
90     foreach (i; 0..pool.npages) {
91         if (pool.pagetable[i] >= B_FREE) nReallyFree++;
92     }
93 
94     printf("Pool %p:  %d really free, %d supposedly free\n", pool, nReallyFree, pool.freepages);
95 }
96 
97 // Track total time spent preparing for GC,
98 // marking, sweeping and recovering pages.
99 __gshared Duration prepTime;
100 __gshared Duration markTime;
101 __gshared Duration sweepTime;
102 __gshared Duration recoverTime;
103 __gshared Duration maxPauseTime;
104 __gshared size_t numCollections;
105 __gshared size_t maxPoolMemory;
106 
107 __gshared long numMallocs;
108 __gshared long numFrees;
109 __gshared long numReallocs;
110 __gshared long numExtends;
111 __gshared long numOthers;
112 __gshared long mallocTime; // using ticks instead of MonoTime for better performance
113 __gshared long freeTime;
114 __gshared long reallocTime;
115 __gshared long extendTime;
116 __gshared long otherTime;
117 __gshared long lockTime;
118 
119 private
120 {
121     extern (C)
122     {
123         // to allow compilation of this module without access to the rt package,
124         //  make these functions available from rt.lifetime
125         void rt_finalizeFromGC(void* p, size_t size, uint attr) nothrow;
126         int rt_hasFinalizerInSegment(void* p, size_t size, uint attr, in void[] segment) nothrow;
127 
128         // Declared as an extern instead of importing core.exception
129         // to avoid inlining - see issue 13725.
130         void onInvalidMemoryOperationError() @nogc nothrow;
131         void onOutOfMemoryErrorNoGC() @nogc nothrow;
132     }
133 
134     enum
135     {
136         OPFAIL = ~cast(size_t)0
137     }
138 }
139 
140 
141 alias GC gc_t;
142 
143 
144 /* ======================= Leak Detector =========================== */
145 
146 
147 debug (LOGGING)
148 {
149     struct Log
150     {
151         void*  p;
152         size_t size;
153         size_t line;
154         char*  file;
155         void*  parent;
156 
157         void print() nothrow
158         {
159             printf("    p = %p, size = %zd, parent = %p ", p, size, parent);
160             if (file)
161             {
162                 printf("%s(%u)", file, line);
163             }
164             printf("\n");
165         }
166     }
167 
168 
169     struct LogArray
170     {
171         size_t dim;
172         size_t allocdim;
173         Log *data;
174 
175         void Dtor() nothrow
176         {
177             if (data)
178                 cstdlib.free(data);
179             data = null;
180         }
181 
182         void reserve(size_t nentries) nothrow
183         {
184             assert(dim <= allocdim);
185             if (allocdim - dim < nentries)
186             {
187                 allocdim = (dim + nentries) * 2;
188                 assert(dim + nentries <= allocdim);
189                 if (!data)
190                 {
191                     data = cast(Log*)cstdlib.malloc(allocdim * Log.sizeof);
192                     if (!data && allocdim)
193                         onOutOfMemoryErrorNoGC();
194                 }
195                 else
196                 {   Log *newdata;
197 
198                     newdata = cast(Log*)cstdlib.malloc(allocdim * Log.sizeof);
199                     if (!newdata && allocdim)
200                         onOutOfMemoryErrorNoGC();
201                     memcpy(newdata, data, dim * Log.sizeof);
202                     cstdlib.free(data);
203                     data = newdata;
204                 }
205             }
206         }
207 
208 
209         void push(Log log) nothrow
210         {
211             reserve(1);
212             data[dim++] = log;
213         }
214 
215         void remove(size_t i) nothrow
216         {
217             memmove(data + i, data + i + 1, (dim - i) * Log.sizeof);
218             dim--;
219         }
220 
221 
222         size_t find(void *p) nothrow
223         {
224             for (size_t i = 0; i < dim; i++)
225             {
226                 if (data[i].p == p)
227                     return i;
228             }
229             return OPFAIL; // not found
230         }
231 
232 
233         void copy(LogArray *from) nothrow
234         {
235             reserve(from.dim - dim);
236             assert(from.dim <= allocdim);
237             memcpy(data, from.data, from.dim * Log.sizeof);
238             dim = from.dim;
239         }
240     }
241 }
242 
243 
244 /* ============================ GC =============================== */
245 
246 class ConservativeGC : GC
247 {
248     // For passing to debug code (not thread safe)
249     __gshared size_t line;
250     __gshared char*  file;
251 
252     Gcx *gcx;                   // implementation
253 
254     import core.internal.spinlock;
255     static gcLock = shared(AlignedSpinLock)(SpinLock.Contention.lengthy);
256     static bool _inFinalizer;
257 
258     // lock GC, throw InvalidMemoryOperationError on recursive locking during finalization
259     static void lockNR() @nogc nothrow
260     {
261         if (_inFinalizer)
262             onInvalidMemoryOperationError();
263         gcLock.lock();
264     }
265 
266 
267     static void initialize(ref GC gc)
268     {
269         import core.stdc.string: memcpy;
270 
271         if (config.gc != "conservative")
272               return;
273 
274         auto p = cstdlib.malloc(__traits(classInstanceSize,ConservativeGC));
275 
276         if (!p)
277             onOutOfMemoryErrorNoGC();
278 
279         auto init = typeid(ConservativeGC).initializer();
280         assert(init.length == __traits(classInstanceSize, ConservativeGC));
281         auto instance = cast(ConservativeGC) memcpy(p, init.ptr, init.length);
282         instance.__ctor();
283 
284         gc = instance;
285     }
286 
287 
288     static void finalize(ref GC gc)
289     {
290         if (config.gc != "conservative")
291               return;
292 
293         auto instance = cast(ConservativeGC) gc;
294         instance.Dtor();
295         cstdlib.free(cast(void*)instance);
296     }
297 
298 
299     this()
300     {
301         //config is assumed to have already been initialized
302 
303         gcx = cast(Gcx*)cstdlib.calloc(1, Gcx.sizeof);
304         if (!gcx)
305             onOutOfMemoryErrorNoGC();
306         gcx.initialize();
307 
308         if (config.initReserve)
309             gcx.reserve(config.initReserve << 20);
310         if (config.disable)
311             gcx.disabled++;
312     }
313 
314 
315     void Dtor()
316     {
317         version (linux)
318         {
319             //debug(PRINTF) printf("Thread %x ", pthread_self());
320             //debug(PRINTF) printf("GC.Dtor()\n");
321         }
322 
323         if (gcx)
324         {
325             gcx.Dtor();
326             cstdlib.free(gcx);
327             gcx = null;
328         }
329     }
330 
331 
332     void enable()
333     {
334         static void go(Gcx* gcx) nothrow
335         {
336             assert(gcx.disabled > 0);
337             gcx.disabled--;
338         }
339         runLocked!(go, otherTime, numOthers)(gcx);
340     }
341 
342 
343     void disable()
344     {
345         static void go(Gcx* gcx) nothrow
346         {
347             gcx.disabled++;
348         }
349         runLocked!(go, otherTime, numOthers)(gcx);
350     }
351 
352 
353     auto runLocked(alias func, Args...)(auto ref Args args)
354     {
355         debug(PROFILE_API) immutable tm = (config.profile > 1 ? currTime.ticks : 0);
356         lockNR();
357         scope (failure) gcLock.unlock();
358         debug(PROFILE_API) immutable tm2 = (config.profile > 1 ? currTime.ticks : 0);
359 
360         static if (is(typeof(func(args)) == void))
361             func(args);
362         else
363             auto res = func(args);
364 
365         debug(PROFILE_API) if (config.profile > 1)
366             lockTime += tm2 - tm;
367         gcLock.unlock();
368 
369         static if (!is(typeof(func(args)) == void))
370             return res;
371     }
372 
373 
374     auto runLocked(alias func, alias time, alias count, Args...)(auto ref Args args)
375     {
376         debug(PROFILE_API) immutable tm = (config.profile > 1 ? currTime.ticks : 0);
377         lockNR();
378         scope (failure) gcLock.unlock();
379         debug(PROFILE_API) immutable tm2 = (config.profile > 1 ? currTime.ticks : 0);
380 
381         static if (is(typeof(func(args)) == void))
382             func(args);
383         else
384             auto res = func(args);
385 
386         debug(PROFILE_API) if (config.profile > 1)
387         {
388             count++;
389             immutable now = currTime.ticks;
390             lockTime += tm2 - tm;
391             time += now - tm2;
392         }
393         gcLock.unlock();
394 
395         static if (!is(typeof(func(args)) == void))
396             return res;
397     }
398 
399 
400     uint getAttr(void* p) nothrow
401     {
402         if (!p)
403         {
404             return 0;
405         }
406 
407         static uint go(Gcx* gcx, void* p) nothrow
408         {
409             Pool* pool = gcx.findPool(p);
410             uint  oldb = 0;
411 
412             if (pool)
413             {
414                 p = sentinel_sub(p);
415                 auto biti = cast(size_t)(p - pool.baseAddr) >> pool.shiftBy;
416 
417                 oldb = pool.getBits(biti);
418             }
419             return oldb;
420         }
421 
422         return runLocked!(go, otherTime, numOthers)(gcx, p);
423     }
424 
425 
426     uint setAttr(void* p, uint mask) nothrow
427     {
428         if (!p)
429         {
430             return 0;
431         }
432 
433         static uint go(Gcx* gcx, void* p, uint mask) nothrow
434         {
435             Pool* pool = gcx.findPool(p);
436             uint  oldb = 0;
437 
438             if (pool)
439             {
440                 p = sentinel_sub(p);
441                 auto biti = cast(size_t)(p - pool.baseAddr) >> pool.shiftBy;
442 
443                 oldb = pool.getBits(biti);
444                 pool.setBits(biti, mask);
445             }
446             return oldb;
447         }
448 
449         return runLocked!(go, otherTime, numOthers)(gcx, p, mask);
450     }
451 
452 
453     uint clrAttr(void* p, uint mask) nothrow
454     {
455         if (!p)
456         {
457             return 0;
458         }
459 
460         static uint go(Gcx* gcx, void* p, uint mask) nothrow
461         {
462             Pool* pool = gcx.findPool(p);
463             uint  oldb = 0;
464 
465             if (pool)
466             {
467                 p = sentinel_sub(p);
468                 auto biti = cast(size_t)(p - pool.baseAddr) >> pool.shiftBy;
469 
470                 oldb = pool.getBits(biti);
471                 pool.clrBits(biti, mask);
472             }
473             return oldb;
474         }
475 
476         return runLocked!(go, otherTime, numOthers)(gcx, p, mask);
477     }
478 
479 
480     void *malloc(size_t size, uint bits, const TypeInfo ti) nothrow
481     {
482         if (!size)
483         {
484             return null;
485         }
486 
487         size_t localAllocSize = void;
488 
489         auto p = runLocked!(mallocNoSync, mallocTime, numMallocs)(size, bits, localAllocSize, ti);
490 
491         if (!(bits & BlkAttr.NO_SCAN))
492         {
493             memset(p + size, 0, localAllocSize - size);
494         }
495 
496         return p;
497     }
498 
499 
500     //
501     //
502     //
503     private void *mallocNoSync(size_t size, uint bits, ref size_t alloc_size, const TypeInfo ti = null) nothrow
504     {
505         assert(size != 0);
506 
507         //debug(PRINTF) printf("GC::malloc(size = %d, gcx = %p)\n", size, gcx);
508         assert(gcx);
509         //debug(PRINTF) printf("gcx.self = %x, pthread_self() = %x\n", gcx.self, pthread_self());
510 
511         auto p = gcx.alloc(size + SENTINEL_EXTRA, alloc_size, bits);
512         if (!p)
513             onOutOfMemoryErrorNoGC();
514 
515         debug (SENTINEL)
516         {
517             p = sentinel_add(p);
518             sentinel_init(p, size);
519             alloc_size = size;
520         }
521         gcx.log_malloc(p, size);
522 
523         return p;
524     }
525 
526 
527     BlkInfo qalloc( size_t size, uint bits, const TypeInfo ti) nothrow
528     {
529 
530         if (!size)
531         {
532             return BlkInfo.init;
533         }
534 
535         BlkInfo retval;
536 
537         retval.base = runLocked!(mallocNoSync, mallocTime, numMallocs)(size, bits, retval.size, ti);
538 
539         if (!(bits & BlkAttr.NO_SCAN))
540         {
541             memset(retval.base + size, 0, retval.size - size);
542         }
543 
544         retval.attr = bits;
545         return retval;
546     }
547 
548 
549     void *calloc(size_t size, uint bits, const TypeInfo ti) nothrow
550     {
551         if (!size)
552         {
553             return null;
554         }
555 
556         size_t localAllocSize = void;
557 
558         auto p = runLocked!(mallocNoSync, mallocTime, numMallocs)(size, bits, localAllocSize, ti);
559 
560         memset(p, 0, size);
561         if (!(bits & BlkAttr.NO_SCAN))
562         {
563             memset(p + size, 0, localAllocSize - size);
564         }
565 
566         return p;
567     }
568 
569 
570     void *realloc(void *p, size_t size, uint bits, const TypeInfo ti) nothrow
571     {
572         size_t localAllocSize = void;
573         auto oldp = p;
574 
575         p = runLocked!(reallocNoSync, mallocTime, numMallocs)(p, size, bits, localAllocSize, ti);
576 
577         if (p !is oldp && !(bits & BlkAttr.NO_SCAN))
578         {
579             memset(p + size, 0, localAllocSize - size);
580         }
581 
582         return p;
583     }
584 
585 
586     //
587     // bits will be set to the resulting bits of the new block
588     //
589     private void *reallocNoSync(void *p, size_t size, ref uint bits, ref size_t alloc_size, const TypeInfo ti = null) nothrow
590     {
591         if (!size)
592         {   if (p)
593             {   freeNoSync(p);
594                 p = null;
595             }
596             alloc_size = 0;
597         }
598         else if (!p)
599         {
600             p = mallocNoSync(size, bits, alloc_size, ti);
601         }
602         else
603         {   void *p2;
604             size_t psize;
605 
606             //debug(PRINTF) printf("GC::realloc(p = %p, size = %zu)\n", p, size);
607             debug (SENTINEL)
608             {
609                 sentinel_Invariant(p);
610                 psize = *sentinel_size(p);
611                 if (psize != size)
612                 {
613                     if (psize)
614                     {
615                         Pool *pool = gcx.findPool(p);
616 
617                         if (pool)
618                         {
619                             auto biti = cast(size_t)(sentinel_sub(p) - pool.baseAddr) >> pool.shiftBy;
620 
621                             if (bits)
622                             {
623                                 pool.clrBits(biti, ~BlkAttr.NONE);
624                                 pool.setBits(biti, bits);
625                             }
626                             else
627                             {
628                                 bits = pool.getBits(biti);
629                             }
630                         }
631                     }
632                     p2 = mallocNoSync(size, bits, alloc_size, ti);
633                     if (psize < size)
634                         size = psize;
635                     //debug(PRINTF) printf("\tcopying %d bytes\n",size);
636                     memcpy(p2, p, size);
637                     p = p2;
638                 }
639             }
640             else
641             {
642                 auto pool = gcx.findPool(p);
643                 if (pool.isLargeObject)
644                 {
645                     auto lpool = cast(LargeObjectPool*) pool;
646                     psize = lpool.getSize(p);     // get allocated size
647 
648                     if (size <= PAGESIZE / 2)
649                         goto Lmalloc; // switching from large object pool to small object pool
650 
651                     auto psz = psize / PAGESIZE;
652                     auto newsz = (size + PAGESIZE - 1) / PAGESIZE;
653                     if (newsz == psz)
654                     {
655                         alloc_size = psize;
656                         return p;
657                     }
658 
659                     auto pagenum = lpool.pagenumOf(p);
660 
661                     if (newsz < psz)
662                     {   // Shrink in place
663                         debug (MEMSTOMP) memset(p + size, 0xF2, psize - size);
664                         lpool.freePages(pagenum + newsz, psz - newsz);
665                     }
666                     else if (pagenum + newsz <= pool.npages)
667                     {   // Attempt to expand in place
668                         foreach (binsz; lpool.pagetable[pagenum + psz .. pagenum + newsz])
669                             if (binsz != B_FREE)
670                                 goto Lmalloc;
671 
672                         debug (MEMSTOMP) memset(p + psize, 0xF0, size - psize);
673                         debug(PRINTF) printFreeInfo(pool);
674                         memset(&lpool.pagetable[pagenum + psz], B_PAGEPLUS, newsz - psz);
675                         gcx.usedLargePages += newsz - psz;
676                         lpool.freepages -= (newsz - psz);
677                         debug(PRINTF) printFreeInfo(pool);
678                     }
679                     else
680                         goto Lmalloc; // does not fit into current pool
681 
682                     lpool.updateOffsets(pagenum);
683                     if (bits)
684                     {
685                         immutable biti = cast(size_t)(p - pool.baseAddr) >> pool.shiftBy;
686                         pool.clrBits(biti, ~BlkAttr.NONE);
687                         pool.setBits(biti, bits);
688                     }
689                     alloc_size = newsz * PAGESIZE;
690                     return p;
691                 }
692 
693                 psize = (cast(SmallObjectPool*) pool).getSize(p);   // get allocated size
694                 if (psize < size ||             // if new size is bigger
695                     psize > size * 2)           // or less than half
696                 {
697                 Lmalloc:
698                     if (psize && pool)
699                     {
700                         auto biti = cast(size_t)(p - pool.baseAddr) >> pool.shiftBy;
701 
702                         if (bits)
703                         {
704                             pool.clrBits(biti, ~BlkAttr.NONE);
705                             pool.setBits(biti, bits);
706                         }
707                         else
708                         {
709                             bits = pool.getBits(biti);
710                         }
711                     }
712                     p2 = mallocNoSync(size, bits, alloc_size, ti);
713                     if (psize < size)
714                         size = psize;
715                     //debug(PRINTF) printf("\tcopying %d bytes\n",size);
716                     memcpy(p2, p, size);
717                     p = p2;
718                 }
719                 else
720                     alloc_size = psize;
721             }
722         }
723         return p;
724     }
725 
726 
727     size_t extend(void* p, size_t minsize, size_t maxsize, const TypeInfo ti) nothrow
728     {
729         return runLocked!(extendNoSync, extendTime, numExtends)(p, minsize, maxsize, ti);
730     }
731 
732 
733     //
734     //
735     //
736     private size_t extendNoSync(void* p, size_t minsize, size_t maxsize, const TypeInfo ti = null) nothrow
737     in
738     {
739         assert(minsize <= maxsize);
740     }
741     body
742     {
743         //debug(PRINTF) printf("GC::extend(p = %p, minsize = %zu, maxsize = %zu)\n", p, minsize, maxsize);
744         debug (SENTINEL)
745         {
746             return 0;
747         }
748         else
749         {
750             auto pool = gcx.findPool(p);
751             if (!pool || !pool.isLargeObject)
752                 return 0;
753 
754             auto lpool = cast(LargeObjectPool*) pool;
755             auto psize = lpool.getSize(p);   // get allocated size
756             if (psize < PAGESIZE)
757                 return 0;                   // cannot extend buckets
758 
759             auto psz = psize / PAGESIZE;
760             auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE;
761             auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE;
762 
763             auto pagenum = lpool.pagenumOf(p);
764 
765             size_t sz;
766             for (sz = 0; sz < maxsz; sz++)
767             {
768                 auto i = pagenum + psz + sz;
769                 if (i == lpool.npages)
770                     break;
771                 if (lpool.pagetable[i] != B_FREE)
772                 {   if (sz < minsz)
773                         return 0;
774                     break;
775                 }
776             }
777             if (sz < minsz)
778                 return 0;
779             debug (MEMSTOMP) memset(pool.baseAddr + (pagenum + psz) * PAGESIZE, 0xF0, sz * PAGESIZE);
780             memset(lpool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
781             lpool.updateOffsets(pagenum);
782             lpool.freepages -= sz;
783             gcx.usedLargePages += sz;
784             return (psz + sz) * PAGESIZE;
785         }
786     }
787 
788 
789     size_t reserve(size_t size) nothrow
790     {
791         if (!size)
792         {
793             return 0;
794         }
795 
796         return runLocked!(reserveNoSync, otherTime, numOthers)(size);
797     }
798 
799 
800     //
801     //
802     //
803     private size_t reserveNoSync(size_t size) nothrow
804     {
805         assert(size != 0);
806         assert(gcx);
807 
808         return gcx.reserve(size);
809     }
810 
811 
812     void free(void *p) nothrow
813     {
814         if (!p || _inFinalizer)
815         {
816             return;
817         }
818 
819         return runLocked!(freeNoSync, freeTime, numFrees)(p);
820     }
821 
822 
823     //
824     //
825     //
826     private void freeNoSync(void *p) nothrow
827     {
828         debug(PRINTF) printf("Freeing %p\n", cast(size_t) p);
829         assert (p);
830 
831         Pool*  pool;
832         size_t pagenum;
833         Bins   bin;
834         size_t biti;
835 
836         // Find which page it is in
837         pool = gcx.findPool(p);
838         if (!pool)                              // if not one of ours
839             return;                             // ignore
840 
841         pagenum = pool.pagenumOf(p);
842 
843         debug(PRINTF) printf("pool base = %p, PAGENUM = %d of %d, bin = %d\n", pool.baseAddr, pagenum, pool.npages, pool.pagetable[pagenum]);
844         debug(PRINTF) if (pool.isLargeObject) printf("Block size = %d\n", pool.bPageOffsets[pagenum]);
845 
846         bin = cast(Bins)pool.pagetable[pagenum];
847 
848         // Verify that the pointer is at the beginning of a block,
849         //  no action should be taken if p is an interior pointer
850         if (bin > B_PAGE) // B_PAGEPLUS or B_FREE
851             return;
852         if ((sentinel_sub(p) - pool.baseAddr) & (binsize[bin] - 1))
853             return;
854 
855         sentinel_Invariant(p);
856         p = sentinel_sub(p);
857         biti = cast(size_t)(p - pool.baseAddr) >> pool.shiftBy;
858 
859         pool.clrBits(biti, ~BlkAttr.NONE);
860 
861         if (pool.isLargeObject)              // if large alloc
862         {
863             assert(bin == B_PAGE);
864             auto lpool = cast(LargeObjectPool*) pool;
865 
866             // Free pages
867             size_t npages = lpool.bPageOffsets[pagenum];
868             debug (MEMSTOMP) memset(p, 0xF2, npages * PAGESIZE);
869             lpool.freePages(pagenum, npages);
870         }
871         else
872         {   // Add to free list
873             List *list = cast(List*)p;
874 
875             debug (MEMSTOMP) memset(p, 0xF2, binsize[bin]);
876 
877             list.next = gcx.bucket[bin];
878             list.pool = pool;
879             gcx.bucket[bin] = list;
880         }
881 
882         gcx.log_free(sentinel_add(p));
883     }
884 
885 
886     void* addrOf(void *p) nothrow
887     {
888         if (!p)
889         {
890             return null;
891         }
892 
893         return runLocked!(addrOfNoSync, otherTime, numOthers)(p);
894     }
895 
896 
897     //
898     //
899     //
900     void* addrOfNoSync(void *p) nothrow
901     {
902         if (!p)
903         {
904             return null;
905         }
906 
907         auto q = gcx.findBase(p);
908         if (q)
909             q = sentinel_add(q);
910         return q;
911     }
912 
913 
914     size_t sizeOf(void *p) nothrow
915     {
916         if (!p)
917         {
918             return 0;
919         }
920 
921         return runLocked!(sizeOfNoSync, otherTime, numOthers)(p);
922     }
923 
924 
925     //
926     //
927     //
928     private size_t sizeOfNoSync(void *p) nothrow
929     {
930         assert (p);
931 
932         debug (SENTINEL)
933         {
934             p = sentinel_sub(p);
935             size_t size = gcx.findSize(p);
936 
937             // Check for interior pointer
938             // This depends on:
939             // 1) size is a power of 2 for less than PAGESIZE values
940             // 2) base of memory pool is aligned on PAGESIZE boundary
941             if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
942                 size = 0;
943             return size ? size - SENTINEL_EXTRA : 0;
944         }
945         else
946         {
947             size_t size = gcx.findSize(p);
948 
949             // Check for interior pointer
950             // This depends on:
951             // 1) size is a power of 2 for less than PAGESIZE values
952             // 2) base of memory pool is aligned on PAGESIZE boundary
953             if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
954                 return 0;
955             return size;
956         }
957     }
958 
959 
960     BlkInfo query(void *p) nothrow
961     {
962         if (!p)
963         {
964             BlkInfo i;
965             return  i;
966         }
967 
968         return runLocked!(queryNoSync, otherTime, numOthers)(p);
969     }
970 
971     //
972     //
973     //
974     BlkInfo queryNoSync(void *p) nothrow
975     {
976         assert(p);
977 
978         BlkInfo info = gcx.getInfo(p);
979         debug(SENTINEL)
980         {
981             if (info.base)
982             {
983                 info.base = sentinel_add(info.base);
984                 info.size = *sentinel_size(info.base);
985             }
986         }
987         return info;
988     }
989 
990 
991     /**
992      * Verify that pointer p:
993      *  1) belongs to this memory pool
994      *  2) points to the start of an allocated piece of memory
995      *  3) is not on a free list
996      */
997     void check(void *p) nothrow
998     {
999         if (!p)
1000         {
1001             return;
1002         }
1003 
1004         return runLocked!(checkNoSync, otherTime, numOthers)(p);
1005     }
1006 
1007 
1008     //
1009     //
1010     //
1011     private void checkNoSync(void *p) nothrow
1012     {
1013         assert(p);
1014 
1015         sentinel_Invariant(p);
1016         debug (PTRCHECK)
1017         {
1018             Pool*  pool;
1019             size_t pagenum;
1020             Bins   bin;
1021             size_t size;
1022 
1023             p = sentinel_sub(p);
1024             pool = gcx.findPool(p);
1025             assert(pool);
1026             pagenum = pool.pagenumOf(p);
1027             bin = cast(Bins)pool.pagetable[pagenum];
1028             assert(bin <= B_PAGE);
1029             size = binsize[bin];
1030             assert((cast(size_t)p & (size - 1)) == 0);
1031 
1032             debug (PTRCHECK2)
1033             {
1034                 if (bin < B_PAGE)
1035                 {
1036                     // Check that p is not on a free list
1037                     List *list;
1038 
1039                     for (list = gcx.bucket[bin]; list; list = list.next)
1040                     {
1041                         assert(cast(void*)list != p);
1042                     }
1043                 }
1044             }
1045         }
1046     }
1047 
1048 
1049     void addRoot(void *p) nothrow @nogc
1050     {
1051         if (!p)
1052         {
1053             return;
1054         }
1055 
1056         gcx.addRoot(p);
1057     }
1058 
1059 
1060     void removeRoot(void *p) nothrow @nogc
1061     {
1062         if (!p)
1063         {
1064             return;
1065         }
1066 
1067         gcx.removeRoot(p);
1068     }
1069 
1070 
1071     @property RootIterator rootIter() @nogc
1072     {
1073         return &gcx.rootsApply;
1074     }
1075 
1076 
1077     void addRange(void *p, size_t sz, const TypeInfo ti = null) nothrow @nogc
1078     {
1079         if (!p || !sz)
1080         {
1081             return;
1082         }
1083 
1084         gcx.addRange(p, p + sz, ti);
1085     }
1086 
1087 
1088     void removeRange(void *p) nothrow @nogc
1089     {
1090         if (!p)
1091         {
1092             return;
1093         }
1094 
1095         gcx.removeRange(p);
1096     }
1097 
1098 
1099     @property RangeIterator rangeIter() @nogc
1100     {
1101         return &gcx.rangesApply;
1102     }
1103 
1104 
1105     void runFinalizers(in void[] segment) nothrow
1106     {
1107         static void go(Gcx* gcx, in void[] segment) nothrow
1108         {
1109             gcx.runFinalizers(segment);
1110         }
1111         return runLocked!(go, otherTime, numOthers)(gcx, segment);
1112     }
1113 
1114 
1115     bool inFinalizer() nothrow
1116     {
1117         return _inFinalizer;
1118     }
1119 
1120 
1121     void collect() nothrow
1122     {
1123         fullCollect();
1124     }
1125 
1126 
1127     void collectNoStack() nothrow
1128     {
1129         fullCollectNoStack();
1130     }
1131 
1132 
1133     /**
1134      * Do full garbage collection.
1135      * Return number of pages free'd.
1136      */
1137     size_t fullCollect() nothrow
1138     {
1139         debug(PRINTF) printf("GC.fullCollect()\n");
1140 
1141         // Since a finalizer could launch a new thread, we always need to lock
1142         // when collecting.
1143         static size_t go(Gcx* gcx) nothrow
1144         {
1145             return gcx.fullcollect();
1146         }
1147         immutable result = runLocked!go(gcx);
1148 
1149         version (none)
1150         {
1151             GCStats stats;
1152 
1153             getStats(stats);
1154             debug(PRINTF) printf("heapSize = %zx, freeSize = %zx\n",
1155                 stats.heapSize, stats.freeSize);
1156         }
1157 
1158         gcx.log_collect();
1159         return result;
1160     }
1161 
1162 
1163     /**
1164      * do full garbage collection ignoring roots
1165      */
1166     void fullCollectNoStack() nothrow
1167     {
1168         // Since a finalizer could launch a new thread, we always need to lock
1169         // when collecting.
1170         static size_t go(Gcx* gcx) nothrow
1171         {
1172             return gcx.fullcollect(true);
1173         }
1174         runLocked!go(gcx);
1175     }
1176 
1177 
1178     void minimize() nothrow
1179     {
1180         static void go(Gcx* gcx) nothrow
1181         {
1182             gcx.minimize();
1183         }
1184         runLocked!(go, otherTime, numOthers)(gcx);
1185     }
1186 
1187 
1188     core.memory.GC.Stats stats() nothrow
1189     {
1190         typeof(return) ret;
1191 
1192         runLocked!(getStatsNoSync, otherTime, numOthers)(ret);
1193 
1194         return ret;
1195     }
1196 
1197 
1198     //
1199     //
1200     //
1201     private void getStatsNoSync(out core.memory.GC.Stats stats) nothrow
1202     {
1203         foreach (pool; gcx.pooltable[0 .. gcx.npools])
1204         {
1205             foreach (bin; pool.pagetable[0 .. pool.npages])
1206             {
1207                 if (bin == B_FREE)
1208                     stats.freeSize += PAGESIZE;
1209                 else
1210                     stats.usedSize += PAGESIZE;
1211             }
1212         }
1213 
1214         size_t freeListSize;
1215         foreach (n; 0 .. B_PAGE)
1216         {
1217             immutable sz = binsize[n];
1218             for (List *list = gcx.bucket[n]; list; list = list.next)
1219                 freeListSize += sz;
1220         }
1221 
1222         stats.usedSize -= freeListSize;
1223         stats.freeSize += freeListSize;
1224     }
1225 }
1226 
1227 
1228 /* ============================ Gcx =============================== */
1229 
1230 enum
1231 {   PAGESIZE =    4096,
1232     POOLSIZE =   (4096*256),
1233 }
1234 
1235 
1236 enum
1237 {
1238     B_16,
1239     B_32,
1240     B_64,
1241     B_128,
1242     B_256,
1243     B_512,
1244     B_1024,
1245     B_2048,
1246     B_PAGE,             // start of large alloc
1247     B_PAGEPLUS,         // continuation of large alloc
1248     B_FREE,             // free page
1249     B_MAX
1250 }
1251 
1252 
1253 alias ubyte Bins;
1254 
1255 
1256 struct List
1257 {
1258     List *next;
1259     Pool *pool;
1260 }
1261 
1262 
1263 immutable uint[B_MAX] binsize = [ 16,32,64,128,256,512,1024,2048,4096 ];
1264 immutable size_t[B_MAX] notbinsize = [ ~(16-1),~(32-1),~(64-1),~(128-1),~(256-1),
1265                                 ~(512-1),~(1024-1),~(2048-1),~(4096-1) ];
1266 
1267 alias PageBits = GCBits.wordtype[PAGESIZE / 16 / GCBits.BITS_PER_WORD];
1268 static assert(PAGESIZE % (GCBits.BITS_PER_WORD * 16) == 0);
1269 
1270 private void set(ref PageBits bits, size_t i) @nogc pure nothrow
1271 {
1272     assert(i < PageBits.sizeof * 8);
1273     bts(bits.ptr, i);
1274 }
1275 
1276 /* ============================ Gcx =============================== */
1277 
1278 struct Gcx
1279 {
1280     import core.internal.spinlock;
1281     auto rootsLock = shared(AlignedSpinLock)(SpinLock.Contention.brief);
1282     auto rangesLock = shared(AlignedSpinLock)(SpinLock.Contention.brief);
1283     Treap!Root roots;
1284     Treap!Range ranges;
1285 
1286     bool log; // turn on logging
1287     debug(INVARIANT) bool initialized;
1288     uint disabled; // turn off collections if >0
1289 
1290     import gc.pooltable;
1291     @property size_t npools() pure const nothrow { return pooltable.length; }
1292     PoolTable!Pool pooltable;
1293 
1294     List*[B_PAGE] bucket; // free list for each small size
1295 
1296     // run a collection when reaching those thresholds (number of used pages)
1297     float smallCollectThreshold, largeCollectThreshold;
1298     uint usedSmallPages, usedLargePages;
1299     // total number of mapped pages
1300     uint mappedPages;
1301 
1302     void initialize()
1303     {
1304         (cast(byte*)&this)[0 .. Gcx.sizeof] = 0;
1305         log_init();
1306         roots.initialize();
1307         ranges.initialize();
1308         smallCollectThreshold = largeCollectThreshold = 0.0f;
1309         usedSmallPages = usedLargePages = 0;
1310         mappedPages = 0;
1311         //printf("gcx = %p, self = %x\n", &this, self);
1312         debug(INVARIANT) initialized = true;
1313     }
1314 
1315 
1316     void Dtor()
1317     {
1318         if (config.profile)
1319         {
1320             printf("\tNumber of collections:  %llu\n", cast(ulong)numCollections);
1321             printf("\tTotal GC prep time:  %lld milliseconds\n",
1322                    prepTime.total!("msecs"));
1323             printf("\tTotal mark time:  %lld milliseconds\n",
1324                    markTime.total!("msecs"));
1325             printf("\tTotal sweep time:  %lld milliseconds\n",
1326                    sweepTime.total!("msecs"));
1327             printf("\tTotal page recovery time:  %lld milliseconds\n",
1328                    recoverTime.total!("msecs"));
1329             long maxPause = maxPauseTime.total!("msecs");
1330             printf("\tMax Pause Time:  %lld milliseconds\n", maxPause);
1331             long gcTime = (recoverTime + sweepTime + markTime + prepTime).total!("msecs");
1332             printf("\tGrand total GC time:  %lld milliseconds\n", gcTime);
1333             long pauseTime = (markTime + prepTime).total!("msecs");
1334 
1335             char[30] apitxt;
1336             apitxt[0] = 0;
1337             debug(PROFILE_API) if (config.profile > 1)
1338             {
1339                 static Duration toDuration(long dur)
1340                 {
1341                     return MonoTime(dur) - MonoTime(0);
1342                 }
1343 
1344                 printf("\n");
1345                 printf("\tmalloc:  %llu calls, %lld ms\n", cast(ulong)numMallocs, toDuration(mallocTime).total!"msecs");
1346                 printf("\trealloc: %llu calls, %lld ms\n", cast(ulong)numReallocs, toDuration(reallocTime).total!"msecs");
1347                 printf("\tfree:    %llu calls, %lld ms\n", cast(ulong)numFrees, toDuration(freeTime).total!"msecs");
1348                 printf("\textend:  %llu calls, %lld ms\n", cast(ulong)numExtends, toDuration(extendTime).total!"msecs");
1349                 printf("\tother:   %llu calls, %lld ms\n", cast(ulong)numOthers, toDuration(otherTime).total!"msecs");
1350                 printf("\tlock time: %lld ms\n", toDuration(lockTime).total!"msecs");
1351 
1352                 long apiTime = mallocTime + reallocTime + freeTime + extendTime + otherTime + lockTime;
1353                 printf("\tGC API: %lld ms\n", toDuration(apiTime).total!"msecs");
1354                 sprintf(apitxt.ptr, " API%5ld ms", toDuration(apiTime).total!"msecs");
1355             }
1356 
1357             printf("GC summary:%5lld MB,%5lld GC%5lld ms, Pauses%5lld ms <%5lld ms%s\n",
1358                    cast(long) maxPoolMemory >> 20, cast(ulong)numCollections, gcTime,
1359                    pauseTime, maxPause, apitxt.ptr);
1360         }
1361 
1362         debug(INVARIANT) initialized = false;
1363 
1364         for (size_t i = 0; i < npools; i++)
1365         {
1366             Pool *pool = pooltable[i];
1367             mappedPages -= pool.npages;
1368             pool.Dtor();
1369             cstdlib.free(pool);
1370         }
1371         assert(!mappedPages);
1372         pooltable.Dtor();
1373 
1374         roots.removeAll();
1375         ranges.removeAll();
1376         toscan.reset();
1377     }
1378 
1379 
1380     void Invariant() const { }
1381 
1382     debug(INVARIANT)
1383     invariant()
1384     {
1385         if (initialized)
1386         {
1387             //printf("Gcx.invariant(): this = %p\n", &this);
1388             pooltable.Invariant();
1389 
1390             rangesLock.lock();
1391             foreach (range; ranges)
1392             {
1393                 assert(range.pbot);
1394                 assert(range.ptop);
1395                 assert(range.pbot <= range.ptop);
1396             }
1397             rangesLock.unlock();
1398 
1399             for (size_t i = 0; i < B_PAGE; i++)
1400             {
1401                 for (auto list = cast(List*)bucket[i]; list; list = list.next)
1402                 {
1403                 }
1404             }
1405         }
1406     }
1407 
1408 
1409     /**
1410      *
1411      */
1412     void addRoot(void *p) nothrow @nogc
1413     {
1414         rootsLock.lock();
1415         scope (failure) rootsLock.unlock();
1416         roots.insert(Root(p));
1417         rootsLock.unlock();
1418     }
1419 
1420 
1421     /**
1422      *
1423      */
1424     void removeRoot(void *p) nothrow @nogc
1425     {
1426         rootsLock.lock();
1427         scope (failure) rootsLock.unlock();
1428         roots.remove(Root(p));
1429         rootsLock.unlock();
1430     }
1431 
1432 
1433     /**
1434      *
1435      */
1436     int rootsApply(scope int delegate(ref Root) nothrow dg) nothrow
1437     {
1438         rootsLock.lock();
1439         scope (failure) rootsLock.unlock();
1440         auto ret = roots.opApply(dg);
1441         rootsLock.unlock();
1442         return ret;
1443     }
1444 
1445 
1446     /**
1447      *
1448      */
1449     void addRange(void *pbot, void *ptop, const TypeInfo ti) nothrow @nogc
1450     {
1451         //debug(PRINTF) printf("Thread %x ", pthread_self());
1452         debug(PRINTF) printf("%p.Gcx::addRange(%p, %p)\n", &this, pbot, ptop);
1453         rangesLock.lock();
1454         scope (failure) rangesLock.unlock();
1455         ranges.insert(Range(pbot, ptop));
1456         rangesLock.unlock();
1457     }
1458 
1459 
1460     /**
1461      *
1462      */
1463     void removeRange(void *pbot) nothrow @nogc
1464     {
1465         //debug(PRINTF) printf("Thread %x ", pthread_self());
1466         debug(PRINTF) printf("Gcx.removeRange(%p)\n", pbot);
1467         rangesLock.lock();
1468         scope (failure) rangesLock.unlock();
1469         ranges.remove(Range(pbot, pbot)); // only pbot is used, see Range.opCmp
1470         rangesLock.unlock();
1471 
1472         // debug(PRINTF) printf("Wrong thread\n");
1473         // This is a fatal error, but ignore it.
1474         // The problem is that we can get a Close() call on a thread
1475         // other than the one the range was allocated on.
1476         //assert(zero);
1477     }
1478 
1479     /**
1480      *
1481      */
1482     int rangesApply(scope int delegate(ref Range) nothrow dg) nothrow
1483     {
1484         rangesLock.lock();
1485         scope (failure) rangesLock.unlock();
1486         auto ret = ranges.opApply(dg);
1487         rangesLock.unlock();
1488         return ret;
1489     }
1490 
1491 
1492     /**
1493      *
1494      */
1495     void runFinalizers(in void[] segment) nothrow
1496     {
1497         ConservativeGC._inFinalizer = true;
1498         scope (failure) ConservativeGC._inFinalizer = false;
1499 
1500         foreach (pool; pooltable[0 .. npools])
1501         {
1502             if (!pool.finals.nbits) continue;
1503 
1504             if (pool.isLargeObject)
1505             {
1506                 auto lpool = cast(LargeObjectPool*) pool;
1507                 lpool.runFinalizers(segment);
1508             }
1509             else
1510             {
1511                 auto spool = cast(SmallObjectPool*) pool;
1512                 spool.runFinalizers(segment);
1513             }
1514         }
1515         ConservativeGC._inFinalizer = false;
1516     }
1517 
1518     Pool* findPool(void* p) pure nothrow
1519     {
1520         return pooltable.findPool(p);
1521     }
1522 
1523     /**
1524      * Find base address of block containing pointer p.
1525      * Returns null if not a gc'd pointer
1526      */
1527     void* findBase(void *p) nothrow
1528     {
1529         Pool *pool;
1530 
1531         pool = findPool(p);
1532         if (pool)
1533         {
1534             size_t offset = cast(size_t)(p - pool.baseAddr);
1535             size_t pn = offset / PAGESIZE;
1536             Bins   bin = cast(Bins)pool.pagetable[pn];
1537 
1538             // Adjust bit to be at start of allocated memory block
1539             if (bin <= B_PAGE)
1540             {
1541                 return pool.baseAddr + (offset & notbinsize[bin]);
1542             }
1543             else if (bin == B_PAGEPLUS)
1544             {
1545                 auto pageOffset = pool.bPageOffsets[pn];
1546                 offset -= pageOffset * PAGESIZE;
1547                 pn -= pageOffset;
1548 
1549                 return pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1550             }
1551             else
1552             {
1553                 // we are in a B_FREE page
1554                 assert(bin == B_FREE);
1555                 return null;
1556             }
1557         }
1558         return null;
1559     }
1560 
1561 
1562     /**
1563      * Find size of pointer p.
1564      * Returns 0 if not a gc'd pointer
1565      */
1566     size_t findSize(void *p) nothrow
1567     {
1568         Pool* pool = findPool(p);
1569         if (pool)
1570             return pool.slGetSize(p);
1571         return 0;
1572     }
1573 
1574     /**
1575      *
1576      */
1577     BlkInfo getInfo(void* p) nothrow
1578     {
1579         Pool* pool = findPool(p);
1580         if (pool)
1581             return pool.slGetInfo(p);
1582         return BlkInfo();
1583     }
1584 
1585     /**
1586      * Computes the bin table using CTFE.
1587      */
1588     static byte[2049] ctfeBins() nothrow
1589     {
1590         byte[2049] ret;
1591         size_t p = 0;
1592         for (Bins b = B_16; b <= B_2048; b++)
1593             for ( ; p <= binsize[b]; p++)
1594                 ret[p] = b;
1595 
1596         return ret;
1597     }
1598 
1599     static const byte[2049] binTable = ctfeBins();
1600 
1601     /**
1602      * Allocate a new pool of at least size bytes.
1603      * Sort it into pooltable[].
1604      * Mark all memory in the pool as B_FREE.
1605      * Return the actual number of bytes reserved or 0 on error.
1606      */
1607     size_t reserve(size_t size) nothrow
1608     {
1609         size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
1610 
1611         // Assume reserve() is for small objects.
1612         Pool*  pool = newPool(npages, false);
1613 
1614         if (!pool)
1615             return 0;
1616         return pool.npages * PAGESIZE;
1617     }
1618 
1619     /**
1620      * Update the thresholds for when to collect the next time
1621      */
1622     void updateCollectThresholds() nothrow
1623     {
1624         static float max(float a, float b) nothrow
1625         {
1626             return a >= b ? a : b;
1627         }
1628 
1629         // instantly increases, slowly decreases
1630         static float smoothDecay(float oldVal, float newVal) nothrow
1631         {
1632             // decay to 63.2% of newVal over 5 collections
1633             // http://en.wikipedia.org/wiki/Low-pass_filter#Simple_infinite_impulse_response_filter
1634             enum alpha = 1.0 / (5 + 1);
1635             immutable decay = (newVal - oldVal) * alpha + oldVal;
1636             return max(newVal, decay);
1637         }
1638 
1639         immutable smTarget = usedSmallPages * config.heapSizeFactor;
1640         smallCollectThreshold = smoothDecay(smallCollectThreshold, smTarget);
1641         immutable lgTarget = usedLargePages * config.heapSizeFactor;
1642         largeCollectThreshold = smoothDecay(largeCollectThreshold, lgTarget);
1643     }
1644 
1645     /**
1646      * Minimizes physical memory usage by returning free pools to the OS.
1647      */
1648     void minimize() nothrow
1649     {
1650         debug(PRINTF) printf("Minimizing.\n");
1651 
1652         foreach (pool; pooltable.minimize())
1653         {
1654             debug(PRINTF) printFreeInfo(pool);
1655             mappedPages -= pool.npages;
1656             pool.Dtor();
1657             cstdlib.free(pool);
1658         }
1659 
1660         debug(PRINTF) printf("Done minimizing.\n");
1661     }
1662 
1663     private @property bool lowMem() const nothrow
1664     {
1665         return isLowOnMem(mappedPages * PAGESIZE);
1666     }
1667 
1668     void* alloc(size_t size, ref size_t alloc_size, uint bits) nothrow
1669     {
1670         return size <= 2048 ? smallAlloc(binTable[size], alloc_size, bits)
1671                             : bigAlloc(size, alloc_size, bits);
1672     }
1673 
1674     void* smallAlloc(Bins bin, ref size_t alloc_size, uint bits) nothrow
1675     {
1676         alloc_size = binsize[bin];
1677 
1678         void* p;
1679         bool tryAlloc() nothrow
1680         {
1681             if (!bucket[bin])
1682             {
1683                 bucket[bin] = allocPage(bin);
1684                 if (!bucket[bin])
1685                     return false;
1686             }
1687             p = bucket[bin];
1688             return true;
1689         }
1690 
1691         if (!tryAlloc())
1692         {
1693             if (!lowMem && (disabled || usedSmallPages < smallCollectThreshold))
1694             {
1695                 // disabled or threshold not reached => allocate a new pool instead of collecting
1696                 if (!newPool(1, false))
1697                 {
1698                     // out of memory => try to free some memory
1699                     fullcollect();
1700                     if (lowMem) minimize();
1701                 }
1702             }
1703             else
1704             {
1705                 fullcollect();
1706                 if (lowMem) minimize();
1707             }
1708             // tryAlloc will succeed if a new pool was allocated above, if it fails allocate a new pool now
1709             if (!tryAlloc() && (!newPool(1, false) || !tryAlloc()))
1710                 // out of luck or memory
1711                 onOutOfMemoryErrorNoGC();
1712         }
1713         assert(p !is null);
1714 
1715         // Return next item from free list
1716         bucket[bin] = (cast(List*)p).next;
1717         auto pool = (cast(List*)p).pool;
1718         if (bits)
1719             pool.setBits((p - pool.baseAddr) >> pool.shiftBy, bits);
1720         //debug(PRINTF) printf("\tmalloc => %p\n", p);
1721         debug (MEMSTOMP) memset(p, 0xF0, alloc_size);
1722         return p;
1723     }
1724 
1725     /**
1726      * Allocate a chunk of memory that is larger than a page.
1727      * Return null if out of memory.
1728      */
1729     void* bigAlloc(size_t size, ref size_t alloc_size, uint bits, const TypeInfo ti = null) nothrow
1730     {
1731         debug(PRINTF) printf("In bigAlloc.  Size:  %d\n", size);
1732 
1733         LargeObjectPool* pool;
1734         size_t pn;
1735         immutable npages = (size + PAGESIZE - 1) / PAGESIZE;
1736         if (npages == 0)
1737             onOutOfMemoryErrorNoGC(); // size just below size_t.max requested
1738 
1739         bool tryAlloc() nothrow
1740         {
1741             foreach (p; pooltable[0 .. npools])
1742             {
1743                 if (!p.isLargeObject || p.freepages < npages)
1744                     continue;
1745                 auto lpool = cast(LargeObjectPool*) p;
1746                 if ((pn = lpool.allocPages(npages)) == OPFAIL)
1747                     continue;
1748                 pool = lpool;
1749                 return true;
1750             }
1751             return false;
1752         }
1753 
1754         bool tryAllocNewPool() nothrow
1755         {
1756             pool = cast(LargeObjectPool*) newPool(npages, true);
1757             if (!pool) return false;
1758             pn = pool.allocPages(npages);
1759             assert(pn != OPFAIL);
1760             return true;
1761         }
1762 
1763         if (!tryAlloc())
1764         {
1765             if (!lowMem && (disabled || usedLargePages < largeCollectThreshold))
1766             {
1767                 // disabled or threshold not reached => allocate a new pool instead of collecting
1768                 if (!tryAllocNewPool())
1769                 {
1770                     // disabled but out of memory => try to free some memory
1771                     fullcollect();
1772                     minimize();
1773                 }
1774             }
1775             else
1776             {
1777                 fullcollect();
1778                 minimize();
1779             }
1780             // If alloc didn't yet succeed retry now that we collected/minimized
1781             if (!pool && !tryAlloc() && !tryAllocNewPool())
1782                 // out of luck or memory
1783                 return null;
1784         }
1785         assert(pool);
1786 
1787         debug(PRINTF) printFreeInfo(&pool.base);
1788         pool.pagetable[pn] = B_PAGE;
1789         if (npages > 1)
1790             memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
1791         pool.updateOffsets(pn);
1792         usedLargePages += npages;
1793         pool.freepages -= npages;
1794 
1795         debug(PRINTF) printFreeInfo(&pool.base);
1796 
1797         auto p = pool.baseAddr + pn * PAGESIZE;
1798         debug(PRINTF) printf("Got large alloc:  %p, pt = %d, np = %d\n", p, pool.pagetable[pn], npages);
1799         debug (MEMSTOMP) memset(p, 0xF1, size);
1800         alloc_size = npages * PAGESIZE;
1801         //debug(PRINTF) printf("\tp = %p\n", p);
1802 
1803         if (bits)
1804             pool.setBits(pn, bits);
1805         return p;
1806     }
1807 
1808 
1809     /**
1810      * Allocate a new pool with at least npages in it.
1811      * Sort it into pooltable[].
1812      * Return null if failed.
1813      */
1814     Pool *newPool(size_t npages, bool isLargeObject) nothrow
1815     {
1816         //debug(PRINTF) printf("************Gcx::newPool(npages = %d)****************\n", npages);
1817 
1818         // Minimum of POOLSIZE
1819         size_t minPages = (config.minPoolSize << 20) / PAGESIZE;
1820         if (npages < minPages)
1821             npages = minPages;
1822         else if (npages > minPages)
1823         {   // Give us 150% of requested size, so there's room to extend
1824             auto n = npages + (npages >> 1);
1825             if (n < size_t.max/PAGESIZE)
1826                 npages = n;
1827         }
1828 
1829         // Allocate successively larger pools up to 8 megs
1830         if (npools)
1831         {   size_t n;
1832 
1833             n = config.minPoolSize + config.incPoolSize * npools;
1834             if (n > config.maxPoolSize)
1835                 n = config.maxPoolSize;                 // cap pool size
1836             n *= (1 << 20) / PAGESIZE;                     // convert MB to pages
1837             if (npages < n)
1838                 npages = n;
1839         }
1840 
1841         //printf("npages = %d\n", npages);
1842 
1843         auto pool = cast(Pool *)cstdlib.calloc(1, isLargeObject ? LargeObjectPool.sizeof : SmallObjectPool.sizeof);
1844         if (pool)
1845         {
1846             pool.initialize(npages, isLargeObject);
1847             if (!pool.baseAddr || !pooltable.insert(pool))
1848             {
1849                 pool.Dtor();
1850                 cstdlib.free(pool);
1851                 return null;
1852             }
1853         }
1854 
1855         mappedPages += npages;
1856 
1857         if (config.profile)
1858         {
1859             if (mappedPages * PAGESIZE > maxPoolMemory)
1860                 maxPoolMemory = mappedPages * PAGESIZE;
1861         }
1862         return pool;
1863     }
1864 
1865     /**
1866     * Allocate a page of bin's.
1867     * Returns:
1868     *           head of a single linked list of new entries
1869     */
1870     List* allocPage(Bins bin) nothrow
1871     {
1872         //debug(PRINTF) printf("Gcx::allocPage(bin = %d)\n", bin);
1873         for (size_t n = 0; n < npools; n++)
1874         {
1875             Pool* pool = pooltable[n];
1876             if (pool.isLargeObject)
1877                 continue;
1878             if (List* p = (cast(SmallObjectPool*)pool).allocPage(bin))
1879             {
1880                 ++usedSmallPages;
1881                 return p;
1882             }
1883         }
1884         return null;
1885     }
1886 
1887     static struct ToScanStack
1888     {
1889     nothrow:
1890         @disable this(this);
1891 
1892         void reset()
1893         {
1894             _length = 0;
1895             os_mem_unmap(_p, _cap * Range.sizeof);
1896             _p = null;
1897             _cap = 0;
1898         }
1899 
1900         void push(Range rng)
1901         {
1902             if (_length == _cap) grow();
1903             _p[_length++] = rng;
1904         }
1905 
1906         Range pop()
1907         in { assert(!empty); }
1908         body
1909         {
1910             return _p[--_length];
1911         }
1912 
1913         ref inout(Range) opIndex(size_t idx) inout
1914         in { assert(idx < _length); }
1915         body
1916         {
1917             return _p[idx];
1918         }
1919 
1920         @property size_t length() const { return _length; }
1921         @property bool empty() const { return !length; }
1922 
1923     private:
1924         void grow()
1925         {
1926             enum initSize = 64 * 1024; // Windows VirtualAlloc granularity
1927             immutable ncap = _cap ? 2 * _cap : initSize / Range.sizeof;
1928             auto p = cast(Range*)os_mem_map(ncap * Range.sizeof);
1929             if (p is null) onOutOfMemoryErrorNoGC();
1930             if (_p !is null)
1931             {
1932                 p[0 .. _length] = _p[0 .. _length];
1933                 os_mem_unmap(_p, _cap * Range.sizeof);
1934             }
1935             _p = p;
1936             _cap = ncap;
1937         }
1938 
1939         size_t _length;
1940         Range* _p;
1941         size_t _cap;
1942     }
1943 
1944     ToScanStack toscan;
1945 
1946     /**
1947      * Search a range of memory values and mark any pointers into the GC pool.
1948      */
1949     void mark(void *pbot, void *ptop) scope nothrow
1950     {
1951         void **p1 = cast(void **)pbot;
1952         void **p2 = cast(void **)ptop;
1953 
1954         // limit the amount of ranges added to the toscan stack
1955         enum FANOUT_LIMIT = 32;
1956         size_t stackPos;
1957         Range[FANOUT_LIMIT] stack = void;
1958 
1959     Lagain:
1960         size_t pcache = 0;
1961 
1962         // let dmd allocate a register for this.pools
1963         auto pools = pooltable.pools;
1964         const highpool = pooltable.npools - 1;
1965         const minAddr = pooltable.minAddr;
1966         const maxAddr = pooltable.maxAddr;
1967 
1968         //printf("marking range: [%p..%p] (%#zx)\n", p1, p2, cast(size_t)p2 - cast(size_t)p1);
1969     Lnext: for (; p1 < p2; p1++)
1970         {
1971             auto p = *p1;
1972 
1973             //if (log) debug(PRINTF) printf("\tmark %p\n", p);
1974             if (p >= minAddr && p < maxAddr)
1975             {
1976                 if ((cast(size_t)p & ~cast(size_t)(PAGESIZE-1)) == pcache)
1977                     continue;
1978 
1979                 Pool* pool = void;
1980                 size_t low = 0;
1981                 size_t high = highpool;
1982                 while (true)
1983                 {
1984                     size_t mid = (low + high) >> 1;
1985                     pool = pools[mid];
1986                     if (p < pool.baseAddr)
1987                         high = mid - 1;
1988                     else if (p >= pool.topAddr)
1989                         low = mid + 1;
1990                     else break;
1991 
1992                     if (low > high)
1993                         continue Lnext;
1994                 }
1995                 size_t offset = cast(size_t)(p - pool.baseAddr);
1996                 size_t biti = void;
1997                 size_t pn = offset / PAGESIZE;
1998                 Bins   bin = cast(Bins)pool.pagetable[pn];
1999                 void* base = void;
2000 
2001                 //debug(PRINTF) printf("\t\tfound pool %p, base=%p, pn = %zd, bin = %d, biti = x%x\n", pool, pool.baseAddr, pn, bin, biti);
2002 
2003                 // Adjust bit to be at start of allocated memory block
2004                 if (bin < B_PAGE)
2005                 {
2006                     // We don't care abou setting pointsToBase correctly
2007                     // because it's ignored for small object pools anyhow.
2008                     auto offsetBase = offset & notbinsize[bin];
2009                     biti = offsetBase >> pool.shiftBy;
2010                     base = pool.baseAddr + offsetBase;
2011                     //debug(PRINTF) printf("\t\tbiti = x%x\n", biti);
2012 
2013                     if (!pool.mark.set(biti) && !pool.noscan.test(biti)) {
2014                         stack[stackPos++] = Range(base, base + binsize[bin]);
2015                         if (stackPos == stack.length)
2016                             break;
2017                     }
2018                 }
2019                 else if (bin == B_PAGE)
2020                 {
2021                     auto offsetBase = offset & notbinsize[bin];
2022                     base = pool.baseAddr + offsetBase;
2023                     biti = offsetBase >> pool.shiftBy;
2024                     //debug(PRINTF) printf("\t\tbiti = x%x\n", biti);
2025 
2026                     pcache = cast(size_t)p & ~cast(size_t)(PAGESIZE-1);
2027 
2028                     // For the NO_INTERIOR attribute.  This tracks whether
2029                     // the pointer is an interior pointer or points to the
2030                     // base address of a block.
2031                     bool pointsToBase = (base == sentinel_sub(p));
2032                     if (!pointsToBase && pool.nointerior.nbits && pool.nointerior.test(biti))
2033                         continue;
2034 
2035                     if (!pool.mark.set(biti) && !pool.noscan.test(biti)) {
2036                         stack[stackPos++] = Range(base, base + pool.bPageOffsets[pn] * PAGESIZE);
2037                         if (stackPos == stack.length)
2038                             break;
2039                     }
2040                 }
2041                 else if (bin == B_PAGEPLUS)
2042                 {
2043                     pn -= pool.bPageOffsets[pn];
2044                     base = pool.baseAddr + (pn * PAGESIZE);
2045                     biti = pn * (PAGESIZE >> pool.shiftBy);
2046 
2047                     pcache = cast(size_t)p & ~cast(size_t)(PAGESIZE-1);
2048                     if (pool.nointerior.nbits && pool.nointerior.test(biti))
2049                         continue;
2050 
2051                     if (!pool.mark.set(biti) && !pool.noscan.test(biti)) {
2052                         stack[stackPos++] = Range(base, base + pool.bPageOffsets[pn] * PAGESIZE);
2053                         if (stackPos == stack.length)
2054                             break;
2055                     }
2056                 }
2057                 else
2058                 {
2059                     // Don't mark bits in B_FREE pages
2060                     assert(bin == B_FREE);
2061                     continue;
2062                 }
2063             }
2064         }
2065 
2066         Range next=void;
2067         if (p1 < p2)
2068         {
2069             // local stack is full, push it to the global stack
2070             assert(stackPos == stack.length);
2071             toscan.push(Range(p1, p2));
2072             // reverse order for depth-first-order traversal
2073             foreach_reverse (ref rng; stack[0 .. $ - 1])
2074                 toscan.push(rng);
2075             stackPos = 0;
2076             next = stack[$-1];
2077         }
2078         else if (stackPos)
2079         {
2080             // pop range from local stack and recurse
2081             next = stack[--stackPos];
2082         }
2083         else if (!toscan.empty)
2084         {
2085             // pop range from global stack and recurse
2086             next = toscan.pop();
2087         }
2088         else
2089         {
2090             // nothing more to do
2091             return;
2092         }
2093         p1 = cast(void**)next.pbot;
2094         p2 = cast(void**)next.ptop;
2095         // printf("  pop [%p..%p] (%#zx)\n", p1, p2, cast(size_t)p2 - cast(size_t)p1);
2096         goto Lagain;
2097     }
2098 
2099     // collection step 1: prepare freebits and mark bits
2100     void prepare() nothrow
2101     {
2102         size_t n;
2103         Pool*  pool;
2104 
2105         for (n = 0; n < npools; n++)
2106         {
2107             pool = pooltable[n];
2108             pool.mark.zero();
2109             if (!pool.isLargeObject) pool.freebits.zero();
2110         }
2111 
2112         debug(COLLECT_PRINTF) printf("Set bits\n");
2113 
2114         // Mark each free entry, so it doesn't get scanned
2115         for (n = 0; n < B_PAGE; n++)
2116         {
2117             for (List *list = bucket[n]; list; list = list.next)
2118             {
2119                 pool = list.pool;
2120                 assert(pool);
2121                 pool.freebits.set(cast(size_t)(cast(void*)list - pool.baseAddr) / 16);
2122             }
2123         }
2124 
2125         debug(COLLECT_PRINTF) printf("Marked free entries.\n");
2126 
2127         for (n = 0; n < npools; n++)
2128         {
2129             pool = pooltable[n];
2130             if (!pool.isLargeObject)
2131             {
2132                 pool.mark.copy(&pool.freebits);
2133             }
2134         }
2135     }
2136 
2137     // collection step 2: mark roots and heap
2138     void markAll(bool nostack) nothrow
2139     {
2140         if (!nostack)
2141         {
2142             debug(COLLECT_PRINTF) printf("\tscan stacks.\n");
2143             // Scan stacks and registers for each paused thread
2144             thread_scanAll(&mark);
2145         }
2146 
2147         // Scan roots[]
2148         debug(COLLECT_PRINTF) printf("\tscan roots[]\n");
2149         foreach (root; roots)
2150         {
2151             mark(cast(void*)&root.proot, cast(void*)(&root.proot + 1));
2152         }
2153 
2154         // Scan ranges[]
2155         debug(COLLECT_PRINTF) printf("\tscan ranges[]\n");
2156         //log++;
2157         foreach (range; ranges)
2158         {
2159             debug(COLLECT_PRINTF) printf("\t\t%p .. %p\n", range.pbot, range.ptop);
2160             mark(range.pbot, range.ptop);
2161         }
2162         //log--;
2163     }
2164 
2165     // collection step 3: free all unreferenced objects
2166     size_t sweep() nothrow
2167     {
2168         // Free up everything not marked
2169         debug(COLLECT_PRINTF) printf("\tfree'ing\n");
2170         size_t freedLargePages;
2171         size_t freed;
2172         for (size_t n = 0; n < npools; n++)
2173         {
2174             size_t pn;
2175             Pool* pool = pooltable[n];
2176 
2177             if (pool.isLargeObject)
2178             {
2179                 for (pn = 0; pn < pool.npages; pn++)
2180                 {
2181                     Bins bin = cast(Bins)pool.pagetable[pn];
2182                     if (bin > B_PAGE) continue;
2183                     size_t biti = pn;
2184 
2185                     if (!pool.mark.test(biti))
2186                     {
2187                         void *p = pool.baseAddr + pn * PAGESIZE;
2188                         void* q = sentinel_add(p);
2189                         sentinel_Invariant(q);
2190 
2191                         if (pool.finals.nbits && pool.finals.clear(biti))
2192                         {
2193                             size_t size = pool.bPageOffsets[pn] * PAGESIZE - SENTINEL_EXTRA;
2194                             uint attr = pool.getBits(biti);
2195                             rt_finalizeFromGC(q, size, attr);
2196                         }
2197 
2198                         pool.clrBits(biti, ~BlkAttr.NONE ^ BlkAttr.FINALIZE);
2199 
2200                         debug(COLLECT_PRINTF) printf("\tcollecting big %p\n", p);
2201                         log_free(q);
2202                         pool.pagetable[pn] = B_FREE;
2203                         if (pn < pool.searchStart) pool.searchStart = pn;
2204                         freedLargePages++;
2205                         pool.freepages++;
2206 
2207                         debug (MEMSTOMP) memset(p, 0xF3, PAGESIZE);
2208                         while (pn + 1 < pool.npages && pool.pagetable[pn + 1] == B_PAGEPLUS)
2209                         {
2210                             pn++;
2211                             pool.pagetable[pn] = B_FREE;
2212 
2213                             // Don't need to update searchStart here because
2214                             // pn is guaranteed to be greater than last time
2215                             // we updated it.
2216 
2217                             pool.freepages++;
2218                             freedLargePages++;
2219 
2220                             debug (MEMSTOMP)
2221                             {   p += PAGESIZE;
2222                                 memset(p, 0xF3, PAGESIZE);
2223                             }
2224                         }
2225                         pool.largestFree = pool.freepages; // invalidate
2226                     }
2227                 }
2228             }
2229             else
2230             {
2231 
2232                 for (pn = 0; pn < pool.npages; pn++)
2233                 {
2234                     Bins bin = cast(Bins)pool.pagetable[pn];
2235 
2236                     if (bin < B_PAGE)
2237                     {
2238                         immutable size = binsize[bin];
2239                         void *p = pool.baseAddr + pn * PAGESIZE;
2240                         void *ptop = p + PAGESIZE;
2241                         immutable base = pn * (PAGESIZE/16);
2242                         immutable bitstride = size / 16;
2243 
2244                         bool freeBits;
2245                         PageBits toFree;
2246 
2247                         for (size_t i; p < ptop; p += size, i += bitstride)
2248                         {
2249                             immutable biti = base + i;
2250 
2251                             if (!pool.mark.test(biti))
2252                             {
2253                                 void* q = sentinel_add(p);
2254                                 sentinel_Invariant(q);
2255 
2256                                 if (pool.finals.nbits && pool.finals.test(biti))
2257                                     rt_finalizeFromGC(q, size - SENTINEL_EXTRA, pool.getBits(biti));
2258 
2259                                 freeBits = true;
2260                                 toFree.set(i);
2261 
2262                                 debug(COLLECT_PRINTF) printf("\tcollecting %p\n", p);
2263                                 log_free(sentinel_add(p));
2264 
2265                                 debug (MEMSTOMP) memset(p, 0xF3, size);
2266 
2267                                 freed += size;
2268                             }
2269                         }
2270 
2271                         if (freeBits)
2272                             pool.freePageBits(pn, toFree);
2273                     }
2274                 }
2275             }
2276         }
2277 
2278         assert(freedLargePages <= usedLargePages);
2279         usedLargePages -= freedLargePages;
2280         debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedLargePages, npools);
2281         return freedLargePages;
2282     }
2283 
2284     // collection step 4: recover pages with no live objects, rebuild free lists
2285     size_t recover() nothrow
2286     {
2287         // init tail list
2288         List**[B_PAGE] tail = void;
2289         foreach (i, ref next; tail)
2290             next = &bucket[i];
2291 
2292         // Free complete pages, rebuild free list
2293         debug(COLLECT_PRINTF) printf("\tfree complete pages\n");
2294         size_t freedSmallPages;
2295         for (size_t n = 0; n < npools; n++)
2296         {
2297             size_t pn;
2298             Pool* pool = pooltable[n];
2299 
2300             if (pool.isLargeObject)
2301                 continue;
2302 
2303             for (pn = 0; pn < pool.npages; pn++)
2304             {
2305                 Bins   bin = cast(Bins)pool.pagetable[pn];
2306                 size_t biti;
2307                 size_t u;
2308 
2309                 if (bin < B_PAGE)
2310                 {
2311                     size_t size = binsize[bin];
2312                     size_t bitstride = size / 16;
2313                     size_t bitbase = pn * (PAGESIZE / 16);
2314                     size_t bittop = bitbase + (PAGESIZE / 16);
2315                     void*  p;
2316 
2317                     biti = bitbase;
2318                     for (biti = bitbase; biti < bittop; biti += bitstride)
2319                     {
2320                         if (!pool.freebits.test(biti))
2321                             goto Lnotfree;
2322                     }
2323                     pool.pagetable[pn] = B_FREE;
2324                     if (pn < pool.searchStart) pool.searchStart = pn;
2325                     pool.freepages++;
2326                     freedSmallPages++;
2327                     continue;
2328 
2329                 Lnotfree:
2330                     p = pool.baseAddr + pn * PAGESIZE;
2331                     for (u = 0; u < PAGESIZE; u += size)
2332                     {
2333                         biti = bitbase + u / 16;
2334                         if (!pool.freebits.test(biti))
2335                             continue;
2336                         auto elem = cast(List *)(p + u);
2337                         elem.pool = pool;
2338                         *tail[bin] = elem;
2339                         tail[bin] = &elem.next;
2340                     }
2341                 }
2342             }
2343         }
2344         // terminate tail list
2345         foreach (ref next; tail)
2346             *next = null;
2347 
2348         assert(freedSmallPages <= usedSmallPages);
2349         usedSmallPages -= freedSmallPages;
2350         debug(COLLECT_PRINTF) printf("\trecovered pages = %d\n", freedSmallPages);
2351         return freedSmallPages;
2352     }
2353 
2354     /**
2355      * Return number of full pages free'd.
2356      */
2357     size_t fullcollect(bool nostack = false) nothrow
2358     {
2359         MonoTime start, stop, begin;
2360 
2361         if (config.profile)
2362         {
2363             begin = start = currTime;
2364         }
2365 
2366         debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n");
2367         //printf("\tpool address range = %p .. %p\n", minAddr, maxAddr);
2368 
2369         {
2370             // lock roots and ranges around suspending threads b/c they're not reentrant safe
2371             rangesLock.lock();
2372             rootsLock.lock();
2373             scope (exit)
2374             {
2375                 rangesLock.unlock();
2376                 rootsLock.unlock();
2377             }
2378             thread_suspendAll();
2379 
2380             prepare();
2381 
2382             if (config.profile)
2383             {
2384                 stop = currTime;
2385                 prepTime += (stop - start);
2386                 start = stop;
2387             }
2388 
2389             markAll(nostack);
2390 
2391             thread_processGCMarks(&isMarked);
2392             thread_resumeAll();
2393         }
2394 
2395         if (config.profile)
2396         {
2397             stop = currTime;
2398             markTime += (stop - start);
2399             Duration pause = stop - begin;
2400             if (pause > maxPauseTime)
2401                 maxPauseTime = pause;
2402             start = stop;
2403         }
2404 
2405         ConservativeGC._inFinalizer = true;
2406         size_t freedLargePages=void;
2407         {
2408             scope (failure) ConservativeGC._inFinalizer = false;
2409             freedLargePages = sweep();
2410             ConservativeGC._inFinalizer = false;
2411         }
2412 
2413         if (config.profile)
2414         {
2415             stop = currTime;
2416             sweepTime += (stop - start);
2417             start = stop;
2418         }
2419 
2420         immutable freedSmallPages = recover();
2421 
2422         if (config.profile)
2423         {
2424             stop = currTime;
2425             recoverTime += (stop - start);
2426             ++numCollections;
2427         }
2428 
2429         updateCollectThresholds();
2430 
2431         return freedLargePages + freedSmallPages;
2432     }
2433 
2434     /**
2435      * Returns true if the addr lies within a marked block.
2436      *
2437      * Warning! This should only be called while the world is stopped inside
2438      * the fullcollect function.
2439      */
2440     int isMarked(void *addr) scope nothrow
2441     {
2442         // first, we find the Pool this block is in, then check to see if the
2443         // mark bit is clear.
2444         auto pool = findPool(addr);
2445         if (pool)
2446         {
2447             auto offset = cast(size_t)(addr - pool.baseAddr);
2448             auto pn = offset / PAGESIZE;
2449             auto bins = cast(Bins)pool.pagetable[pn];
2450             size_t biti = void;
2451             if (bins <= B_PAGE)
2452             {
2453                 biti = (offset & notbinsize[bins]) >> pool.shiftBy;
2454             }
2455             else if (bins == B_PAGEPLUS)
2456             {
2457                 pn -= pool.bPageOffsets[pn];
2458                 biti = pn * (PAGESIZE >> pool.shiftBy);
2459             }
2460             else // bins == B_FREE
2461             {
2462                 assert(bins == B_FREE);
2463                 return IsMarked.no;
2464             }
2465             return pool.mark.test(biti) ? IsMarked.yes : IsMarked.no;
2466         }
2467         return IsMarked.unknown;
2468     }
2469 
2470 
2471     /***** Leak Detector ******/
2472 
2473 
2474     debug (LOGGING)
2475     {
2476         LogArray current;
2477         LogArray prev;
2478 
2479 
2480         void log_init()
2481         {
2482             //debug(PRINTF) printf("+log_init()\n");
2483             current.reserve(1000);
2484             prev.reserve(1000);
2485             //debug(PRINTF) printf("-log_init()\n");
2486         }
2487 
2488 
2489         void log_malloc(void *p, size_t size) nothrow
2490         {
2491             //debug(PRINTF) printf("+log_malloc(p = %p, size = %zd)\n", p, size);
2492             Log log;
2493 
2494             log.p = p;
2495             log.size = size;
2496             log.line = GC.line;
2497             log.file = GC.file;
2498             log.parent = null;
2499 
2500             GC.line = 0;
2501             GC.file = null;
2502 
2503             current.push(log);
2504             //debug(PRINTF) printf("-log_malloc()\n");
2505         }
2506 
2507 
2508         void log_free(void *p) nothrow
2509         {
2510             //debug(PRINTF) printf("+log_free(%p)\n", p);
2511             auto i = current.find(p);
2512             if (i == OPFAIL)
2513             {
2514                 debug(PRINTF) printf("free'ing unallocated memory %p\n", p);
2515             }
2516             else
2517                 current.remove(i);
2518             //debug(PRINTF) printf("-log_free()\n");
2519         }
2520 
2521 
2522         void log_collect() nothrow
2523         {
2524             //debug(PRINTF) printf("+log_collect()\n");
2525             // Print everything in current that is not in prev
2526 
2527             debug(PRINTF) printf("New pointers this cycle: --------------------------------\n");
2528             size_t used = 0;
2529             for (size_t i = 0; i < current.dim; i++)
2530             {
2531                 auto j = prev.find(current.data[i].p);
2532                 if (j == OPFAIL)
2533                     current.data[i].print();
2534                 else
2535                     used++;
2536             }
2537 
2538             debug(PRINTF) printf("All roots this cycle: --------------------------------\n");
2539             for (size_t i = 0; i < current.dim; i++)
2540             {
2541                 void* p = current.data[i].p;
2542                 if (!findPool(current.data[i].parent))
2543                 {
2544                     auto j = prev.find(current.data[i].p);
2545                     debug(PRINTF) printf(j == OPFAIL ? "N" : " ");
2546                     current.data[i].print();
2547                 }
2548             }
2549 
2550             debug(PRINTF) printf("Used = %d-------------------------------------------------\n", used);
2551             prev.copy(&current);
2552 
2553             debug(PRINTF) printf("-log_collect()\n");
2554         }
2555 
2556 
2557         void log_parent(void *p, void *parent) nothrow
2558         {
2559             //debug(PRINTF) printf("+log_parent()\n");
2560             auto i = current.find(p);
2561             if (i == OPFAIL)
2562             {
2563                 debug(PRINTF) printf("parent'ing unallocated memory %p, parent = %p\n", p, parent);
2564                 Pool *pool;
2565                 pool = findPool(p);
2566                 assert(pool);
2567                 size_t offset = cast(size_t)(p - pool.baseAddr);
2568                 size_t biti;
2569                 size_t pn = offset / PAGESIZE;
2570                 Bins bin = cast(Bins)pool.pagetable[pn];
2571                 biti = (offset & notbinsize[bin]);
2572                 debug(PRINTF) printf("\tbin = %d, offset = x%x, biti = x%x\n", bin, offset, biti);
2573             }
2574             else
2575             {
2576                 current.data[i].parent = parent;
2577             }
2578             //debug(PRINTF) printf("-log_parent()\n");
2579         }
2580 
2581     }
2582     else
2583     {
2584         void log_init() nothrow { }
2585         void log_malloc(void *p, size_t size) nothrow { }
2586         void log_free(void *p) nothrow { }
2587         void log_collect() nothrow { }
2588         void log_parent(void *p, void *parent) nothrow { }
2589     }
2590 }
2591 
2592 /* ============================ Pool  =============================== */
2593 
2594 struct Pool
2595 {
2596     void* baseAddr;
2597     void* topAddr;
2598     GCBits mark;        // entries already scanned, or should not be scanned
2599     GCBits freebits;    // entries that are on the free list
2600     GCBits finals;      // entries that need finalizer run on them
2601     GCBits structFinals;// struct entries that need a finalzier run on them
2602     GCBits noscan;      // entries that should not be scanned
2603     GCBits appendable;  // entries that are appendable
2604     GCBits nointerior;  // interior pointers should be ignored.
2605                         // Only implemented for large object pools.
2606     size_t npages;
2607     size_t freepages;     // The number of pages not in use.
2608     ubyte* pagetable;
2609 
2610     bool isLargeObject;
2611 
2612     uint shiftBy;    // shift count for the divisor used for determining bit indices.
2613 
2614     // This tracks how far back we have to go to find the nearest B_PAGE at
2615     // a smaller address than a B_PAGEPLUS.  To save space, we use a uint.
2616     // This limits individual allocations to 16 terabytes, assuming a 4k
2617     // pagesize.
2618     uint* bPageOffsets;
2619 
2620     // This variable tracks a conservative estimate of where the first free
2621     // page in this pool is, so that if a lot of pages towards the beginning
2622     // are occupied, we can bypass them in O(1).
2623     size_t searchStart;
2624     size_t largestFree; // upper limit for largest free chunk in large object pool
2625 
2626     void initialize(size_t npages, bool isLargeObject) nothrow
2627     {
2628         this.isLargeObject = isLargeObject;
2629         size_t poolsize;
2630 
2631         shiftBy = isLargeObject ? 12 : 4;
2632 
2633         //debug(PRINTF) printf("Pool::Pool(%u)\n", npages);
2634         poolsize = npages * PAGESIZE;
2635         assert(poolsize >= POOLSIZE);
2636         baseAddr = cast(byte *)os_mem_map(poolsize);
2637 
2638         // Some of the code depends on page alignment of memory pools
2639         assert((cast(size_t)baseAddr & (PAGESIZE - 1)) == 0);
2640 
2641         if (!baseAddr)
2642         {
2643             //debug(PRINTF) printf("GC fail: poolsize = x%zx, errno = %d\n", poolsize, errno);
2644             //debug(PRINTF) printf("message = '%s'\n", sys_errlist[errno]);
2645 
2646             npages = 0;
2647             poolsize = 0;
2648         }
2649         //assert(baseAddr);
2650         topAddr = baseAddr + poolsize;
2651         auto nbits = cast(size_t)poolsize >> shiftBy;
2652 
2653         mark.alloc(nbits);
2654 
2655         // pagetable already keeps track of what's free for the large object
2656         // pool.
2657         if (!isLargeObject)
2658         {
2659             freebits.alloc(nbits);
2660         }
2661 
2662         noscan.alloc(nbits);
2663         appendable.alloc(nbits);
2664 
2665         pagetable = cast(ubyte*)cstdlib.malloc(npages);
2666         if (!pagetable)
2667             onOutOfMemoryErrorNoGC();
2668 
2669         if (isLargeObject)
2670         {
2671             bPageOffsets = cast(uint*)cstdlib.malloc(npages * uint.sizeof);
2672             if (!bPageOffsets)
2673                 onOutOfMemoryErrorNoGC();
2674         }
2675 
2676         memset(pagetable, B_FREE, npages);
2677 
2678         this.npages = npages;
2679         this.freepages = npages;
2680         this.searchStart = 0;
2681         this.largestFree = npages;
2682     }
2683 
2684 
2685     void Dtor() nothrow
2686     {
2687         if (baseAddr)
2688         {
2689             int result;
2690 
2691             if (npages)
2692             {
2693                 result = os_mem_unmap(baseAddr, npages * PAGESIZE);
2694                 assert(result == 0);
2695                 npages = 0;
2696             }
2697 
2698             baseAddr = null;
2699             topAddr = null;
2700         }
2701         if (pagetable)
2702         {
2703             cstdlib.free(pagetable);
2704             pagetable = null;
2705         }
2706 
2707         if (bPageOffsets)
2708             cstdlib.free(bPageOffsets);
2709 
2710         mark.Dtor();
2711         if (isLargeObject)
2712         {
2713             nointerior.Dtor();
2714         }
2715         else
2716         {
2717             freebits.Dtor();
2718         }
2719         finals.Dtor();
2720         structFinals.Dtor();
2721         noscan.Dtor();
2722         appendable.Dtor();
2723     }
2724 
2725     /**
2726     *
2727     */
2728     uint getBits(size_t biti) nothrow
2729     {
2730         uint bits;
2731 
2732         if (finals.nbits && finals.test(biti))
2733             bits |= BlkAttr.FINALIZE;
2734         if (structFinals.nbits && structFinals.test(biti))
2735             bits |= BlkAttr.STRUCTFINAL;
2736         if (noscan.test(biti))
2737             bits |= BlkAttr.NO_SCAN;
2738         if (nointerior.nbits && nointerior.test(biti))
2739             bits |= BlkAttr.NO_INTERIOR;
2740         if (appendable.test(biti))
2741             bits |= BlkAttr.APPENDABLE;
2742         return bits;
2743     }
2744 
2745     /**
2746      *
2747      */
2748     void clrBits(size_t biti, uint mask) nothrow
2749     {
2750         immutable dataIndex =  biti >> GCBits.BITS_SHIFT;
2751         immutable bitOffset = biti & GCBits.BITS_MASK;
2752         immutable keep = ~(GCBits.BITS_1 << bitOffset);
2753 
2754         if (mask & BlkAttr.FINALIZE && finals.nbits)
2755             finals.data[dataIndex] &= keep;
2756 
2757         if (structFinals.nbits && (mask & BlkAttr.STRUCTFINAL))
2758             structFinals.data[dataIndex] &= keep;
2759 
2760         if (mask & BlkAttr.NO_SCAN)
2761             noscan.data[dataIndex] &= keep;
2762         if (mask & BlkAttr.APPENDABLE)
2763             appendable.data[dataIndex] &= keep;
2764         if (nointerior.nbits && (mask & BlkAttr.NO_INTERIOR))
2765             nointerior.data[dataIndex] &= keep;
2766     }
2767 
2768     /**
2769      *
2770      */
2771     void setBits(size_t biti, uint mask) nothrow
2772     {
2773         // Calculate the mask and bit offset once and then use it to
2774         // set all of the bits we need to set.
2775         immutable dataIndex = biti >> GCBits.BITS_SHIFT;
2776         immutable bitOffset = biti & GCBits.BITS_MASK;
2777         immutable orWith = GCBits.BITS_1 << bitOffset;
2778 
2779         if (mask & BlkAttr.STRUCTFINAL)
2780         {
2781             if (!structFinals.nbits)
2782                 structFinals.alloc(mark.nbits);
2783             structFinals.data[dataIndex] |= orWith;
2784         }
2785 
2786         if (mask & BlkAttr.FINALIZE)
2787         {
2788             if (!finals.nbits)
2789                 finals.alloc(mark.nbits);
2790             finals.data[dataIndex] |= orWith;
2791         }
2792 
2793         if (mask & BlkAttr.NO_SCAN)
2794         {
2795             noscan.data[dataIndex] |= orWith;
2796         }
2797 //        if (mask & BlkAttr.NO_MOVE)
2798 //        {
2799 //            if (!nomove.nbits)
2800 //                nomove.alloc(mark.nbits);
2801 //            nomove.data[dataIndex] |= orWith;
2802 //        }
2803         if (mask & BlkAttr.APPENDABLE)
2804         {
2805             appendable.data[dataIndex] |= orWith;
2806         }
2807 
2808         if (isLargeObject && (mask & BlkAttr.NO_INTERIOR))
2809         {
2810             if (!nointerior.nbits)
2811                 nointerior.alloc(mark.nbits);
2812             nointerior.data[dataIndex] |= orWith;
2813         }
2814     }
2815 
2816     void freePageBits(size_t pagenum, in ref PageBits toFree) nothrow
2817     {
2818         assert(!isLargeObject);
2819         assert(!nointerior.nbits); // only for large objects
2820 
2821         import core.internal.traits : staticIota;
2822         immutable beg = pagenum * (PAGESIZE / 16 / GCBits.BITS_PER_WORD);
2823         foreach (i; staticIota!(0, PageBits.length))
2824         {
2825             immutable w = toFree[i];
2826             if (!w) continue;
2827 
2828             immutable wi = beg + i;
2829             freebits.data[wi] |= w;
2830             noscan.data[wi] &= ~w;
2831             appendable.data[wi] &= ~w;
2832         }
2833 
2834         if (finals.nbits)
2835         {
2836             foreach (i; staticIota!(0, PageBits.length))
2837                 if (toFree[i])
2838                     finals.data[beg + i] &= ~toFree[i];
2839         }
2840 
2841         if (structFinals.nbits)
2842         {
2843             foreach (i; staticIota!(0, PageBits.length))
2844                 if (toFree[i])
2845                     structFinals.data[beg + i] &= ~toFree[i];
2846         }
2847     }
2848 
2849     /**
2850      * Given a pointer p in the p, return the pagenum.
2851      */
2852     size_t pagenumOf(void *p) const nothrow
2853     in
2854     {
2855         assert(p >= baseAddr);
2856         assert(p < topAddr);
2857     }
2858     body
2859     {
2860         return cast(size_t)(p - baseAddr) / PAGESIZE;
2861     }
2862 
2863     @property bool isFree() const pure nothrow
2864     {
2865         return npages == freepages;
2866     }
2867 
2868     size_t slGetSize(void* p) nothrow
2869     {
2870         if (isLargeObject)
2871             return (cast(LargeObjectPool*)&this).getSize(p);
2872         else
2873             return (cast(SmallObjectPool*)&this).getSize(p);
2874     }
2875 
2876     BlkInfo slGetInfo(void* p) nothrow
2877     {
2878         if (isLargeObject)
2879             return (cast(LargeObjectPool*)&this).getInfo(p);
2880         else
2881             return (cast(SmallObjectPool*)&this).getInfo(p);
2882     }
2883 
2884 
2885     void Invariant() const {}
2886 
2887     debug(INVARIANT)
2888     invariant()
2889     {
2890         //mark.Invariant();
2891         //scan.Invariant();
2892         //freebits.Invariant();
2893         //finals.Invariant();
2894         //structFinals.Invariant();
2895         //noscan.Invariant();
2896         //appendable.Invariant();
2897         //nointerior.Invariant();
2898 
2899         if (baseAddr)
2900         {
2901             //if (baseAddr + npages * PAGESIZE != topAddr)
2902                 //printf("baseAddr = %p, npages = %d, topAddr = %p\n", baseAddr, npages, topAddr);
2903             assert(baseAddr + npages * PAGESIZE == topAddr);
2904         }
2905 
2906         if (pagetable !is null)
2907         {
2908             for (size_t i = 0; i < npages; i++)
2909             {
2910                 Bins bin = cast(Bins)pagetable[i];
2911                 assert(bin < B_MAX);
2912             }
2913         }
2914     }
2915 }
2916 
2917 struct LargeObjectPool
2918 {
2919     Pool base;
2920     alias base this;
2921 
2922     void updateOffsets(size_t fromWhere) nothrow
2923     {
2924         assert(pagetable[fromWhere] == B_PAGE);
2925         size_t pn = fromWhere + 1;
2926         for (uint offset = 1; pn < npages; pn++, offset++)
2927         {
2928             if (pagetable[pn] != B_PAGEPLUS) break;
2929             bPageOffsets[pn] = offset;
2930         }
2931 
2932         // Store the size of the block in bPageOffsets[fromWhere].
2933         bPageOffsets[fromWhere] = cast(uint) (pn - fromWhere);
2934     }
2935 
2936     /**
2937      * Allocate n pages from Pool.
2938      * Returns OPFAIL on failure.
2939      */
2940     size_t allocPages(size_t n) nothrow
2941     {
2942         if (largestFree < n || searchStart + n > npages)
2943             return OPFAIL;
2944 
2945         //debug(PRINTF) printf("Pool::allocPages(n = %d)\n", n);
2946         size_t largest = 0;
2947         if (pagetable[searchStart] == B_PAGEPLUS)
2948         {
2949             searchStart -= bPageOffsets[searchStart]; // jump to B_PAGE
2950             searchStart += bPageOffsets[searchStart];
2951         }
2952         while (searchStart < npages && pagetable[searchStart] == B_PAGE)
2953             searchStart += bPageOffsets[searchStart];
2954 
2955         for (size_t i = searchStart; i < npages; )
2956         {
2957             assert(pagetable[i] == B_FREE);
2958             size_t p = 1;
2959             while (p < n && i + p < npages && pagetable[i + p] == B_FREE)
2960                 p++;
2961 
2962             if (p == n)
2963                 return i;
2964 
2965             if (p > largest)
2966                 largest = p;
2967 
2968             i += p;
2969             while (i < npages && pagetable[i] == B_PAGE)
2970             {
2971                 // we have the size information, so we skip a whole bunch of pages.
2972                 i += bPageOffsets[i];
2973             }
2974         }
2975 
2976         // not enough free pages found, remember largest free chunk
2977         largestFree = largest;
2978         return OPFAIL;
2979     }
2980 
2981     /**
2982      * Free npages pages starting with pagenum.
2983      */
2984     void freePages(size_t pagenum, size_t npages) nothrow
2985     {
2986         //memset(&pagetable[pagenum], B_FREE, npages);
2987         if (pagenum < searchStart)
2988             searchStart = pagenum;
2989 
2990         for (size_t i = pagenum; i < npages + pagenum; i++)
2991         {
2992             if (pagetable[i] < B_FREE)
2993             {
2994                 freepages++;
2995             }
2996 
2997             pagetable[i] = B_FREE;
2998         }
2999         largestFree = freepages; // invalidate
3000     }
3001 
3002     /**
3003      * Get size of pointer p in pool.
3004      */
3005     size_t getSize(void *p) const nothrow
3006     in
3007     {
3008         assert(p >= baseAddr);
3009         assert(p < topAddr);
3010     }
3011     body
3012     {
3013         size_t pagenum = pagenumOf(p);
3014         Bins bin = cast(Bins)pagetable[pagenum];
3015         assert(bin == B_PAGE);
3016         return bPageOffsets[pagenum] * PAGESIZE;
3017     }
3018 
3019     /**
3020     *
3021     */
3022     BlkInfo getInfo(void* p) nothrow
3023     {
3024         BlkInfo info;
3025 
3026         size_t offset = cast(size_t)(p - baseAddr);
3027         size_t pn = offset / PAGESIZE;
3028         Bins bin = cast(Bins)pagetable[pn];
3029 
3030         if (bin == B_PAGEPLUS)
3031             pn -= bPageOffsets[pn];
3032         else if (bin != B_PAGE)
3033             return info;           // no info for free pages
3034 
3035         info.base = baseAddr + pn * PAGESIZE;
3036         info.size = bPageOffsets[pn] * PAGESIZE;
3037 
3038         info.attr = getBits(pn);
3039         return info;
3040     }
3041 
3042     void runFinalizers(in void[] segment) nothrow
3043     {
3044         foreach (pn; 0 .. npages)
3045         {
3046             Bins bin = cast(Bins)pagetable[pn];
3047             if (bin > B_PAGE)
3048                 continue;
3049             size_t biti = pn;
3050 
3051             if (!finals.test(biti))
3052                 continue;
3053 
3054             auto p = sentinel_add(baseAddr + pn * PAGESIZE);
3055             size_t size = bPageOffsets[pn] * PAGESIZE - SENTINEL_EXTRA;
3056             uint attr = getBits(biti);
3057 
3058             if (!rt_hasFinalizerInSegment(p, size, attr, segment))
3059                 continue;
3060 
3061             rt_finalizeFromGC(p, size, attr);
3062 
3063             clrBits(biti, ~BlkAttr.NONE);
3064 
3065             if (pn < searchStart)
3066                 searchStart = pn;
3067 
3068             debug(COLLECT_PRINTF) printf("\tcollecting big %p\n", p);
3069             //log_free(sentinel_add(p));
3070 
3071             size_t n = 1;
3072             for (; pn + n < npages; ++n)
3073                 if (pagetable[pn + n] != B_PAGEPLUS)
3074                     break;
3075             debug (MEMSTOMP) memset(baseAddr + pn * PAGESIZE, 0xF3, n * PAGESIZE);
3076             freePages(pn, n);
3077         }
3078     }
3079 }
3080 
3081 
3082 struct SmallObjectPool
3083 {
3084     Pool base;
3085     alias base this;
3086 
3087     /**
3088     * Get size of pointer p in pool.
3089     */
3090     size_t getSize(void *p) const nothrow
3091     in
3092     {
3093         assert(p >= baseAddr);
3094         assert(p < topAddr);
3095     }
3096     body
3097     {
3098         size_t pagenum = pagenumOf(p);
3099         Bins bin = cast(Bins)pagetable[pagenum];
3100         assert(bin < B_PAGE);
3101         return binsize[bin];
3102     }
3103 
3104     BlkInfo getInfo(void* p) nothrow
3105     {
3106         BlkInfo info;
3107         size_t offset = cast(size_t)(p - baseAddr);
3108         size_t pn = offset / PAGESIZE;
3109         Bins   bin = cast(Bins)pagetable[pn];
3110 
3111         if (bin >= B_PAGE)
3112             return info;
3113 
3114         info.base = cast(void*)((cast(size_t)p) & notbinsize[bin]);
3115         info.size = binsize[bin];
3116         offset = info.base - baseAddr;
3117         info.attr = getBits(cast(size_t)(offset >> shiftBy));
3118 
3119         return info;
3120     }
3121 
3122     void runFinalizers(in void[] segment) nothrow
3123     {
3124         foreach (pn; 0 .. npages)
3125         {
3126             Bins bin = cast(Bins)pagetable[pn];
3127             if (bin >= B_PAGE)
3128                 continue;
3129 
3130             immutable size = binsize[bin];
3131             auto p = baseAddr + pn * PAGESIZE;
3132             const ptop = p + PAGESIZE;
3133             immutable base = pn * (PAGESIZE/16);
3134             immutable bitstride = size / 16;
3135 
3136             bool freeBits;
3137             PageBits toFree;
3138 
3139             for (size_t i; p < ptop; p += size, i += bitstride)
3140             {
3141                 immutable biti = base + i;
3142 
3143                 if (!finals.test(biti))
3144                     continue;
3145 
3146                 auto q = sentinel_add(p);
3147                 uint attr = getBits(biti);
3148 
3149                 if (!rt_hasFinalizerInSegment(q, size, attr, segment))
3150                     continue;
3151 
3152                 rt_finalizeFromGC(q, size, attr);
3153 
3154                 freeBits = true;
3155                 toFree.set(i);
3156 
3157                 debug(COLLECT_PRINTF) printf("\tcollecting %p\n", p);
3158                 //log_free(sentinel_add(p));
3159 
3160                 debug (MEMSTOMP) memset(p, 0xF3, size);
3161             }
3162 
3163             if (freeBits)
3164                 freePageBits(pn, toFree);
3165         }
3166     }
3167 
3168     /**
3169     * Allocate a page of bin's.
3170     * Returns:
3171     *           head of a single linked list of new entries
3172     */
3173     List* allocPage(Bins bin) nothrow
3174     {
3175         size_t pn;
3176         for (pn = searchStart; pn < npages; pn++)
3177             if (pagetable[pn] == B_FREE)
3178                 goto L1;
3179 
3180         return null;
3181 
3182     L1:
3183         searchStart = pn + 1;
3184         pagetable[pn] = cast(ubyte)bin;
3185         freepages--;
3186 
3187         // Convert page to free list
3188         size_t size = binsize[bin];
3189         void* p = baseAddr + pn * PAGESIZE;
3190         void* ptop = p + PAGESIZE - size;
3191         auto first = cast(List*) p;
3192 
3193         for (; p < ptop; p += size)
3194         {
3195             (cast(List *)p).next = cast(List *)(p + size);
3196             (cast(List *)p).pool = &base;
3197         }
3198         (cast(List *)p).next = null;
3199         (cast(List *)p).pool = &base;
3200         return first;
3201     }
3202 }
3203 
3204 unittest // bugzilla 14467
3205 {
3206     int[] arr = new int[10];
3207     assert(arr.capacity);
3208     arr = arr[$..$];
3209     assert(arr.capacity);
3210 }
3211 
3212 unittest // bugzilla 15353
3213 {
3214     import core.memory : GC;
3215 
3216     static struct Foo
3217     {
3218         ~this()
3219         {
3220             GC.free(buf); // ignored in finalizer
3221         }
3222 
3223         void* buf;
3224     }
3225     new Foo(GC.malloc(10));
3226     GC.collect();
3227 }
3228 
3229 unittest // bugzilla 15822
3230 {
3231     import core.memory : GC;
3232 
3233     ubyte[16] buf;
3234     static struct Foo
3235     {
3236         ~this()
3237         {
3238             GC.removeRange(ptr);
3239             GC.removeRoot(ptr);
3240         }
3241 
3242         ubyte* ptr;
3243     }
3244     GC.addRoot(buf.ptr);
3245     GC.addRange(buf.ptr, buf.length);
3246     new Foo(buf.ptr);
3247     GC.collect();
3248 }
3249 
3250 unittest // bugzilla 1180
3251 {
3252     import core.exception;
3253     try
3254     {
3255         size_t x = size_t.max - 100;
3256         byte[] big_buf = new byte[x];
3257     }
3258     catch (OutOfMemoryError)
3259     {
3260     }
3261 }
3262 
3263 /* ============================ SENTINEL =============================== */
3264 
3265 
3266 debug (SENTINEL)
3267 {
3268     const size_t SENTINEL_PRE = cast(size_t) 0xF4F4F4F4F4F4F4F4UL; // 32 or 64 bits
3269     const ubyte SENTINEL_POST = 0xF5;           // 8 bits
3270     const uint SENTINEL_EXTRA = 2 * size_t.sizeof + 1;
3271 
3272 
3273     inout(size_t*) sentinel_size(inout void *p) nothrow { return &(cast(inout size_t *)p)[-2]; }
3274     inout(size_t*) sentinel_pre(inout void *p)  nothrow { return &(cast(inout size_t *)p)[-1]; }
3275     inout(ubyte*) sentinel_post(inout void *p)  nothrow { return &(cast(inout ubyte *)p)[*sentinel_size(p)]; }
3276 
3277 
3278     void sentinel_init(void *p, size_t size) nothrow
3279     {
3280         *sentinel_size(p) = size;
3281         *sentinel_pre(p) = SENTINEL_PRE;
3282         *sentinel_post(p) = SENTINEL_POST;
3283     }
3284 
3285 
3286     void sentinel_Invariant(const void *p) nothrow
3287     {
3288         debug
3289         {
3290             assert(*sentinel_pre(p) == SENTINEL_PRE);
3291             assert(*sentinel_post(p) == SENTINEL_POST);
3292         }
3293         else if (*sentinel_pre(p) != SENTINEL_PRE || *sentinel_post(p) != SENTINEL_POST)
3294             onInvalidMemoryOperationError(); // also trigger in release build
3295     }
3296 
3297 
3298     void *sentinel_add(void *p) nothrow
3299     {
3300         return p + 2 * size_t.sizeof;
3301     }
3302 
3303 
3304     void *sentinel_sub(void *p) nothrow
3305     {
3306         return p - 2 * size_t.sizeof;
3307     }
3308 }
3309 else
3310 {
3311     const uint SENTINEL_EXTRA = 0;
3312 
3313 
3314     void sentinel_init(void *p, size_t size) nothrow
3315     {
3316     }
3317 
3318 
3319     void sentinel_Invariant(const void *p) nothrow
3320     {
3321     }
3322 
3323 
3324     void *sentinel_add(void *p) nothrow
3325     {
3326         return p;
3327     }
3328 
3329 
3330     void *sentinel_sub(void *p) nothrow
3331     {
3332         return p;
3333     }
3334 }
3335 
3336 debug (MEMSTOMP)
3337 unittest
3338 {
3339     import core.memory;
3340     auto p = cast(uint*)GC.malloc(uint.sizeof*3);
3341     assert(*p == 0xF0F0F0F0);
3342     p[2] = 0; // First two will be used for free list
3343     GC.free(p);
3344     assert(p[2] == 0xF2F2F2F2);
3345 }
3346 
3347 debug (SENTINEL)
3348 unittest
3349 {
3350     import core.memory;
3351     auto p = cast(ubyte*)GC.malloc(1);
3352     assert(p[-1] == 0xF4);
3353     assert(p[ 1] == 0xF5);
3354 /*
3355     p[1] = 0;
3356     bool thrown;
3357     try
3358         GC.free(p);
3359     catch (Error e)
3360         thrown = true;
3361     p[1] = 0xF5;
3362     assert(thrown);
3363 */
3364 }
3365 
3366 unittest
3367 {
3368     import core.memory;
3369 
3370     // https://issues.dlang.org/show_bug.cgi?id=9275
3371     GC.removeRoot(null);
3372     GC.removeRoot(cast(void*)13);
3373 }
3374 
3375 // improve predictability of coverage of code that is eventually not hit by other tests
3376 unittest
3377 {
3378     import core.memory;
3379     auto p = GC.malloc(260 << 20); // new pool has 390 MB
3380     auto q = GC.malloc(65 << 20);  // next chunk (larger than 64MB to ensure the same pool is used)
3381     auto r = GC.malloc(65 << 20);  // another chunk in same pool
3382     assert(p + (260 << 20) == q);
3383     assert(q + (65 << 20) == r);
3384     GC.free(q);
3385     // should trigger "assert(bin == B_FREE);" in mark due to dangling pointer q:
3386     GC.collect();
3387     // should trigger "break;" in extendNoSync:
3388     size_t sz = GC.extend(p, 64 << 20, 66 << 20); // trigger size after p large enough (but limited)
3389     assert(sz == 325 << 20);
3390     GC.free(p);
3391     GC.free(r);
3392     r = q; // ensure q is not trashed before collection above
3393 
3394     p = GC.malloc(70 << 20); // from the same pool
3395     q = GC.malloc(70 << 20);
3396     r = GC.malloc(70 << 20);
3397     auto s = GC.malloc(70 << 20);
3398     auto t = GC.malloc(70 << 20); // 350 MB of 390 MB used
3399     assert(p + (70 << 20) == q);
3400     assert(q + (70 << 20) == r);
3401     assert(r + (70 << 20) == s);
3402     assert(s + (70 << 20) == t);
3403     GC.free(r); // ensure recalculation of largestFree in nxxt allocPages
3404     auto z = GC.malloc(75 << 20); // needs new pool
3405 
3406     GC.free(p);
3407     GC.free(q);
3408     GC.free(s);
3409     GC.free(t);
3410     GC.free(z);
3411     GC.minimize(); // release huge pool
3412 }
3413 
3414