xref: /netbsd-src/external/gpl3/gcc/dist/libphobos/src/std/experimental/allocator/building_blocks/region.d (revision b1e838363e3c6fc78a55519254d99869742dd33c)
1 // Written in the D programming language.
2 /**
3 Source: $(PHOBOSSRC std/experimental/allocator/building_blocks/region.d)
4 */
5 module std.experimental.allocator.building_blocks.region;
6 
7 import std.experimental.allocator.building_blocks.null_allocator;
8 import std.experimental.allocator.common;
9 import std.typecons : Flag, Yes, No;
10 
11 version (OSX)
12     version = Darwin;
13 else version (iOS)
14     version = Darwin;
15 else version (TVOS)
16     version = Darwin;
17 else version (WatchOS)
18     version = Darwin;
19 
20 /**
21 A `Region` allocator allocates memory straight from one contiguous chunk.
22 There is no deallocation, and once the region is full, allocation requests
23 return `null`. Therefore, `Region`s are often used (a) in conjunction with
24 more sophisticated allocators; or (b) for batch-style very fast allocations
25 that deallocate everything at once.
26 
27 The region only stores three pointers, corresponding to the current position in
28 the store and the limits. One allocation entails rounding up the allocation
29 size for alignment purposes, bumping the current pointer, and comparing it
30 against the limit.
31 
32 If `ParentAllocator` is different from $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator), `Region`
33 deallocates the chunk of memory during destruction.
34 
35 The `minAlign` parameter establishes alignment. If $(D minAlign > 1), the
36 sizes of all allocation requests are rounded up to a multiple of `minAlign`.
37 Applications aiming at maximum speed may want to choose $(D minAlign = 1) and
38 control alignment externally.
39 
40 */
41 struct Region(ParentAllocator = NullAllocator,
42     uint minAlign = platformAlignment,
43     Flag!"growDownwards" growDownwards = No.growDownwards)
44 {
45     static assert(minAlign.isGoodStaticAlignment);
46     static assert(ParentAllocator.alignment >= minAlign);
47 
48     import std.traits : hasMember;
49     import std.typecons : Ternary;
50 
51     // state
52     /**
53     The _parent allocator. Depending on whether `ParentAllocator` holds state
54     or not, this is a member variable or an alias for
55     `ParentAllocator.instance`.
56     */
57     static if (stateSize!ParentAllocator)
58     {
59         ParentAllocator parent;
60     }
61     else
62     {
63         alias parent = ParentAllocator.instance;
64     }
65 
66     private void* _current, _begin, _end;
67 
roundedBeginRegion68     private void* roundedBegin() const pure nothrow @trusted @nogc
69     {
70         return cast(void*) roundUpToAlignment(cast(size_t) _begin, alignment);
71     }
72 
roundedEndRegion73     private void* roundedEnd() const pure nothrow @trusted @nogc
74     {
75         return cast(void*) roundDownToAlignment(cast(size_t) _end, alignment);
76     }
77     /**
78     Constructs a region backed by a user-provided store.
79     Assumes the memory was allocated with `ParentAllocator`
80     (if different from $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator)).
81 
82     Params:
83         store = User-provided store backing up the region. If $(D
84         ParentAllocator) is different from $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator), memory is assumed to
85         have been allocated with `ParentAllocator`.
86         n = Bytes to allocate using `ParentAllocator`. This constructor is only
87         defined If `ParentAllocator` is different from $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator). If
88         `parent.allocate(n)` returns `null`, the region will be initialized
89         as empty (correctly initialized but unable to allocate).
90         */
thisRegion91     this(ubyte[] store) pure nothrow @nogc
92     {
93         _begin = store.ptr;
94         _end = store.ptr + store.length;
95         static if (growDownwards)
96             _current = roundedEnd();
97         else
98             _current = roundedBegin();
99     }
100 
101     /// Ditto
102     static if (!is(ParentAllocator == NullAllocator) && !stateSize!ParentAllocator)
thisRegion103     this(size_t n)
104     {
105         this(cast(ubyte[]) (parent.allocate(n.roundUpToAlignment(alignment))));
106     }
107 
108     /// Ditto
109     static if (!is(ParentAllocator == NullAllocator) && stateSize!ParentAllocator)
thisRegion110     this(ParentAllocator parent, size_t n)
111     {
112         this.parent = parent;
113         this(cast(ubyte[]) (parent.allocate(n.roundUpToAlignment(alignment))));
114     }
115 
116     /*
117     TODO: The postblit of `BasicRegion` should be disabled because such objects
118     should not be copied around naively.
119     */
120 
121     /**
122     If `ParentAllocator` is not $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator) and defines `deallocate`,
123     the region defines a destructor that uses `ParentAllocator.deallocate` to free the
124     memory chunk.
125     */
126     static if (!is(ParentAllocator == NullAllocator)
127         && hasMember!(ParentAllocator, "deallocate"))
~thisRegion128     ~this()
129     {
130         parent.deallocate(_begin[0 .. _end - _begin]);
131     }
132 
133     /**
134     Rounds the given size to a multiple of the `alignment`
135     */
goodAllocSizeRegion136     size_t goodAllocSize(size_t n) const pure nothrow @safe @nogc
137     {
138         return n.roundUpToAlignment(alignment);
139     }
140 
141     /**
142     Alignment offered.
143     */
144     alias alignment = minAlign;
145 
146     /**
147     Allocates `n` bytes of memory. The shortest path involves an alignment
148     adjustment (if $(D alignment > 1)), an increment, and a comparison.
149 
150     Params:
151         n = number of bytes to allocate
152 
153     Returns:
154         A properly-aligned buffer of size `n` or `null` if request could not
155         be satisfied.
156     */
allocateRegion157     void[] allocate(size_t n) pure nothrow @trusted @nogc
158     {
159         const rounded = goodAllocSize(n);
160         if (n == 0 || rounded < n || available < rounded) return null;
161 
162         static if (growDownwards)
163         {
164             assert(available >= rounded);
165             auto result = (_current - rounded)[0 .. n];
166             assert(result.ptr >= _begin);
167             _current = result.ptr;
168             assert(owns(result) == Ternary.yes);
169         }
170         else
171         {
172             auto result = _current[0 .. n];
173             _current += rounded;
174         }
175 
176         return result;
177     }
178 
179     /**
180     Allocates `n` bytes of memory aligned at alignment `a`.
181 
182     Params:
183         n = number of bytes to allocate
184         a = alignment for the allocated block
185 
186     Returns:
187         Either a suitable block of `n` bytes aligned at `a`, or `null`.
188     */
alignedAllocateRegion189     void[] alignedAllocate(size_t n, uint a) pure nothrow @trusted @nogc
190     {
191         import std.math.traits : isPowerOf2;
192         assert(a.isPowerOf2);
193 
194         const rounded = goodAllocSize(n);
195         if (n == 0 || rounded < n || available < rounded) return null;
196 
197         static if (growDownwards)
198         {
199             auto tmpCurrent = _current - rounded;
200             auto result = tmpCurrent.alignDownTo(a);
201             if (result <= tmpCurrent && result >= _begin)
202             {
203                 _current = result;
204                 return cast(void[]) result[0 .. n];
205             }
206         }
207         else
208         {
209             // Just bump the pointer to the next good allocation
210             auto newCurrent = _current.alignUpTo(a);
211             if (newCurrent < _current || newCurrent > _end)
212                 return null;
213 
214             auto save = _current;
215             _current = newCurrent;
216             auto result = allocate(n);
217             if (result.ptr)
218             {
219                 assert(result.length == n);
220                 return result;
221             }
222             // Failed, rollback
223             _current = save;
224         }
225         return null;
226     }
227 
228     /// Allocates and returns all memory available to this region.
allocateAllRegion229     void[] allocateAll() pure nothrow @trusted @nogc
230     {
231         static if (growDownwards)
232         {
233             auto result = _begin[0 .. available];
234             _current = _begin;
235         }
236         else
237         {
238             auto result = _current[0 .. available];
239             _current = _end;
240         }
241         return result;
242     }
243 
244     /**
245     Expands an allocated block in place. Expansion will succeed only if the
246     block is the last allocated. Defined only if `growDownwards` is
247     `No.growDownwards`.
248     */
249     static if (growDownwards == No.growDownwards)
expandRegion250     bool expand(ref void[] b, size_t delta) pure nothrow @safe @nogc
251     {
252         assert(owns(b) == Ternary.yes || b is null);
253         assert((() @trusted => b.ptr + b.length <= _current)() || b is null);
254         if (b is null || delta == 0) return delta == 0;
255         auto newLength = b.length + delta;
256         if ((() @trusted => _current < b.ptr + b.length + alignment)())
257         {
258             immutable currentGoodSize = this.goodAllocSize(b.length);
259             immutable newGoodSize = this.goodAllocSize(newLength);
260             immutable goodDelta = newGoodSize - currentGoodSize;
261             // This was the last allocation! Allocate some more and we're done.
262             if (goodDelta == 0
263                 || (() @trusted => allocate(goodDelta).length == goodDelta)())
264             {
265                 b = (() @trusted => b.ptr[0 .. newLength])();
266                 assert((() @trusted => _current < b.ptr + b.length + alignment)());
267                 return true;
268             }
269         }
270         return false;
271     }
272 
273     /**
274     Deallocates `b`. This works only if `b` was obtained as the last call
275     to `allocate`; otherwise (i.e. another allocation has occurred since) it
276     does nothing.
277 
278     Params:
279         b = Block previously obtained by a call to `allocate` against this
280         allocator (`null` is allowed).
281     */
deallocateRegion282     bool deallocate(void[] b) pure nothrow @nogc
283     {
284         assert(owns(b) == Ternary.yes || b.ptr is null);
285         auto rounded = goodAllocSize(b.length);
286         static if (growDownwards)
287         {
288             if (b.ptr == _current)
289             {
290                 _current += rounded;
291                 return true;
292             }
293         }
294         else
295         {
296             if (b.ptr + rounded == _current)
297             {
298                 assert(b.ptr !is null || _current is null);
299                 _current = b.ptr;
300                 return true;
301             }
302         }
303         return false;
304     }
305 
306     /**
307     Deallocates all memory allocated by this region, which can be subsequently
308     reused for new allocations.
309     */
deallocateAllRegion310     bool deallocateAll() pure nothrow @nogc
311     {
312         static if (growDownwards)
313         {
314             _current = roundedEnd();
315         }
316         else
317         {
318             _current = roundedBegin();
319         }
320         return true;
321     }
322 
323     /**
324     Queries whether `b` has been allocated with this region.
325 
326     Params:
327         b = Arbitrary block of memory (`null` is allowed; `owns(null)` returns
328         `false`).
329 
330     Returns:
331         `true` if `b` has been allocated with this region, `false` otherwise.
332     */
ownsRegion333     Ternary owns(const void[] b) const pure nothrow @trusted @nogc
334     {
335         return Ternary(b && (&b[0] >= _begin) && (&b[0] + b.length <= _end));
336     }
337 
338     /**
339     Returns `Ternary.yes` if no memory has been allocated in this region,
340     `Ternary.no` otherwise. (Never returns `Ternary.unknown`.)
341     */
emptyRegion342     Ternary empty() const pure nothrow @safe @nogc
343     {
344         static if (growDownwards)
345             return Ternary(_current == roundedEnd());
346         else
347             return Ternary(_current == roundedBegin());
348     }
349 
350     /// Nonstandard property that returns bytes available for allocation.
availableRegion351     size_t available() const @safe pure nothrow @nogc
352     {
353         static if (growDownwards)
354         {
355             return _current - _begin;
356         }
357         else
358         {
359             return _end - _current;
360         }
361     }
362 }
363 
364 ///
365 @system nothrow unittest
366 {
367     import std.algorithm.comparison : max;
368     import std.experimental.allocator.building_blocks.allocator_list
369         : AllocatorList;
370     import std.experimental.allocator.mallocator : Mallocator;
371     import std.typecons : Ternary;
372     // Create a scalable list of regions. Each gets at least 1MB at a time by
373     // using malloc.
374     auto batchAllocator = AllocatorList!(
375         (size_t n) => Region!Mallocator(max(n, 1024 * 1024))
376     )();
377     assert(batchAllocator.empty ==  Ternary.yes);
378     auto b = batchAllocator.allocate(101);
379     assert(b.length == 101);
380     assert(batchAllocator.empty ==  Ternary.no);
381     // This will cause a second allocation
382     b = batchAllocator.allocate(2 * 1024 * 1024);
383     assert(b.length == 2 * 1024 * 1024);
384     // Destructor will free the memory
385 }
386 
387 @system nothrow @nogc unittest
388 {
389     import std.experimental.allocator.mallocator : Mallocator;
390     import std.typecons : Ternary;
391 
testAlloc(Allocator)392     static void testAlloc(Allocator)(ref Allocator a)
393     {
394         assert((() pure nothrow @safe @nogc => a.empty)() ==  Ternary.yes);
395         const b = a.allocate(101);
396         assert(b.length == 101);
397         assert((() nothrow @safe @nogc => a.owns(b))() == Ternary.yes);
398 
399         // Ensure deallocate inherits from parent allocators
400         auto c = a.allocate(42);
401         assert(c.length == 42);
402         assert((() nothrow @nogc => a.deallocate(c))());
403         assert((() pure nothrow @safe @nogc => a.empty)() ==  Ternary.no);
404     }
405 
406     // Create a 64 KB region allocated with malloc
407     auto reg = Region!(Mallocator, Mallocator.alignment,
408         Yes.growDownwards)(1024 * 64);
409     testAlloc(reg);
410 
411     // Create a 64 KB shared region allocated with malloc
412     auto sharedReg = SharedRegion!(Mallocator, Mallocator.alignment,
413         Yes.growDownwards)(1024 * 64);
414     testAlloc(sharedReg);
415 }
416 
417 @system nothrow @nogc unittest
418 {
419     import std.experimental.allocator.mallocator : AlignedMallocator;
420     import std.typecons : Ternary;
421 
422     ubyte[] buf = cast(ubyte[]) AlignedMallocator.instance.alignedAllocate(64, 64);
423     auto reg = Region!(NullAllocator, 64, Yes.growDownwards)(buf);
424     assert(reg.alignedAllocate(10, 32).length == 10);
425     assert(!reg.available);
426 }
427 
428 @system nothrow @nogc unittest
429 {
430     // test 'this(ubyte[] store)' constructed regions properly clean up
431     // their inner storage after destruction
432     import std.experimental.allocator.mallocator : Mallocator;
433 
434     static shared struct LocalAllocator
435     {
436     nothrow @nogc:
437         enum alignment = Mallocator.alignment;
438         void[] buf;
deallocateLocalAllocator439         bool deallocate(void[] b)
440         {
441             assert(buf.ptr == b.ptr && buf.length == b.length);
442             return true;
443         }
444 
allocateLocalAllocator445         void[] allocate(size_t n)
446         {
447             return null;
448         }
449 
450     }
451 
452     enum bufLen = 10 * Mallocator.alignment;
453     void[] tmp = Mallocator.instance.allocate(bufLen);
454 
455     LocalAllocator a;
456     a.buf = cast(typeof(a.buf)) tmp[1 .. $];
457 
458     auto reg = Region!(LocalAllocator, Mallocator.alignment,
459         Yes.growDownwards)(cast(ubyte[]) a.buf);
460     auto sharedReg = SharedRegion!(LocalAllocator, Mallocator.alignment,
461         Yes.growDownwards)(cast(ubyte[]) a.buf);
462     reg.parent = a;
463     sharedReg.parent = a;
464 
465     Mallocator.instance.deallocate(tmp);
466 }
467 
version(StdUnittest)468 version (StdUnittest)
469 @system unittest
470 {
471     import std.experimental.allocator.mallocator : Mallocator;
472 
473     testAllocator!(() => Region!(Mallocator)(1024 * 64));
474     testAllocator!(() => Region!(Mallocator, Mallocator.alignment, Yes.growDownwards)(1024 * 64));
475 
476     testAllocator!(() => SharedRegion!(Mallocator)(1024 * 64));
477     testAllocator!(() => SharedRegion!(Mallocator, Mallocator.alignment, Yes.growDownwards)(1024 * 64));
478 }
479 
480 @system nothrow @nogc unittest
481 {
482     import std.experimental.allocator.mallocator : Mallocator;
483 
484     auto reg = Region!(Mallocator)(1024 * 64);
485     auto b = reg.allocate(101);
486     assert(b.length == 101);
487     assert((() pure nothrow @safe @nogc => reg.expand(b, 20))());
488     assert((() pure nothrow @safe @nogc => reg.expand(b, 73))());
489     assert((() pure nothrow @safe @nogc => !reg.expand(b, 1024 * 64))());
490     assert((() nothrow @nogc => reg.deallocateAll())());
491 }
492 
493 /**
494 
495 `InSituRegion` is a convenient region that carries its storage within itself
496 (in the form of a statically-sized array).
497 
498 The first template argument is the size of the region and the second is the
499 needed alignment. Depending on the alignment requested and platform details,
500 the actual available storage may be smaller than the compile-time parameter. To
501 make sure that at least `n` bytes are available in the region, use
502 $(D InSituRegion!(n + a - 1, a)).
503 
504 Given that the most frequent use of `InSituRegion` is as a stack allocator, it
505 allocates starting at the end on systems where stack grows downwards, such that
506 hot memory is used first.
507 
508 */
509 struct InSituRegion(size_t size, size_t minAlign = platformAlignment)
510 {
511     import std.algorithm.comparison : max;
512     import std.conv : to;
513     import std.traits : hasMember;
514     import std.typecons : Ternary;
515 
516     static assert(minAlign.isGoodStaticAlignment);
517     static assert(size >= minAlign);
518 
519     version (X86) enum growDownwards = Yes.growDownwards;
520     else version (X86_64) enum growDownwards = Yes.growDownwards;
521     else version (ARM) enum growDownwards = Yes.growDownwards;
522     else version (AArch64) enum growDownwards = Yes.growDownwards;
523     else version (HPPA) enum growDownwards = No.growDownwards;
524     else version (PPC) enum growDownwards = Yes.growDownwards;
525     else version (PPC64) enum growDownwards = Yes.growDownwards;
526     else version (RISCV32) enum growDownwards = Yes.growDownwards;
527     else version (RISCV64) enum growDownwards = Yes.growDownwards;
528     else version (MIPS32) enum growDownwards = Yes.growDownwards;
529     else version (MIPS64) enum growDownwards = Yes.growDownwards;
530     else version (SPARC) enum growDownwards = Yes.growDownwards;
531     else version (SPARC64) enum growDownwards = Yes.growDownwards;
532     else version (SystemZ) enum growDownwards = Yes.growDownwards;
533     else static assert(0, "Dunno how the stack grows on this architecture.");
534 
535     @disable this(this);
536 
537     // state {
538     private Region!(NullAllocator, minAlign, growDownwards) _impl;
539     union
540     {
541         private ubyte[size] _store = void;
542         private double _forAlignmentOnly1;
543     }
544     // }
545 
546     /**
547     An alias for `minAlign`, which must be a valid alignment (nonzero power
548     of 2). The start of the region and all allocation requests will be rounded
549     up to a multiple of the alignment.
550 
551     ----
552     InSituRegion!(4096) a1;
553     assert(a1.alignment == platformAlignment);
554     InSituRegion!(4096, 64) a2;
555     assert(a2.alignment == 64);
556     ----
557     */
558     alias alignment = minAlign;
559 
lazyInitInSituRegion560     private void lazyInit()
561     {
562         assert(!_impl._current);
563         _impl = typeof(_impl)(_store);
564         assert(_impl._current.alignedAt(alignment));
565     }
566 
567     /**
568     Allocates `bytes` and returns them, or `null` if the region cannot
569     accommodate the request. For efficiency reasons, if $(D bytes == 0) the
570     function returns an empty non-null slice.
571     */
allocateInSituRegion572     void[] allocate(size_t n)
573     {
574         // Fast path
575     entry:
576         auto result = _impl.allocate(n);
577         if (result.length == n) return result;
578         // Slow path
579         if (_impl._current) return null; // no more room
580         lazyInit;
581         assert(_impl._current);
582         goto entry;
583     }
584 
585     /**
586     As above, but the memory allocated is aligned at `a` bytes.
587     */
alignedAllocateInSituRegion588     void[] alignedAllocate(size_t n, uint a)
589     {
590         // Fast path
591     entry:
592         auto result = _impl.alignedAllocate(n, a);
593         if (result.length == n) return result;
594         // Slow path
595         if (_impl._current) return null; // no more room
596         lazyInit;
597         assert(_impl._current);
598         goto entry;
599     }
600 
601     /**
602     Deallocates `b`. This works only if `b` was obtained as the last call
603     to `allocate`; otherwise (i.e. another allocation has occurred since) it
604     does nothing. This semantics is tricky and therefore `deallocate` is
605     defined only if `Region` is instantiated with `Yes.defineDeallocate`
606     as the third template argument.
607 
608     Params:
609         b = Block previously obtained by a call to `allocate` against this
610         allocator (`null` is allowed).
611     */
deallocateInSituRegion612     bool deallocate(void[] b)
613     {
614         if (!_impl._current) return b is null;
615         return _impl.deallocate(b);
616     }
617 
618     /**
619     Returns `Ternary.yes` if `b` is the result of a previous allocation,
620     `Ternary.no` otherwise.
621     */
ownsInSituRegion622     Ternary owns(const void[] b) pure nothrow @safe @nogc
623     {
624         if (!_impl._current) return Ternary.no;
625         return _impl.owns(b);
626     }
627 
628     /**
629     Expands an allocated block in place. Expansion will succeed only if the
630     block is the last allocated.
631     */
632     static if (hasMember!(typeof(_impl), "expand"))
expandInSituRegion633     bool expand(ref void[] b, size_t delta)
634     {
635         if (!_impl._current) lazyInit;
636         return _impl.expand(b, delta);
637     }
638 
639     /**
640     Deallocates all memory allocated with this allocator.
641     */
deallocateAllInSituRegion642     bool deallocateAll()
643     {
644         // We don't care to lazily init the region
645         return _impl.deallocateAll;
646     }
647 
648     /**
649     Allocates all memory available with this allocator.
650     */
allocateAllInSituRegion651     void[] allocateAll()
652     {
653         if (!_impl._current) lazyInit;
654         return _impl.allocateAll;
655     }
656 
657     /**
658     Nonstandard function that returns the bytes available for allocation.
659     */
availableInSituRegion660     size_t available()
661     {
662         if (!_impl._current) lazyInit;
663         return _impl.available;
664     }
665 }
666 
667 ///
668 @system unittest
669 {
670     // 128KB region, allocated to x86's cache line
671     InSituRegion!(128 * 1024, 16) r1;
672     auto a1 = r1.allocate(101);
673     assert(a1.length == 101);
674 
675     // 128KB region, with fallback to the garbage collector.
676     import std.experimental.allocator.building_blocks.fallback_allocator
677         : FallbackAllocator;
678     import std.experimental.allocator.building_blocks.free_list
679         : FreeList;
680     import std.experimental.allocator.building_blocks.bitmapped_block
681         : BitmappedBlock;
682     import std.experimental.allocator.gc_allocator : GCAllocator;
683     FallbackAllocator!(InSituRegion!(128 * 1024), GCAllocator) r2;
684     const a2 = r2.allocate(102);
685     assert(a2.length == 102);
686 
687     // Reap with GC fallback.
688     InSituRegion!(128 * 1024, 8) tmp3;
689     FallbackAllocator!(BitmappedBlock!(64, 8), GCAllocator) r3;
690     r3.primary = BitmappedBlock!(64, 8)(cast(ubyte[]) (tmp3.allocateAll()));
691     const a3 = r3.allocate(103);
692     assert(a3.length == 103);
693 
694     // Reap/GC with a freelist for small objects up to 16 bytes.
695     InSituRegion!(128 * 1024, 64) tmp4;
696     FreeList!(FallbackAllocator!(BitmappedBlock!(64, 64), GCAllocator), 0, 16) r4;
697     r4.parent.primary = BitmappedBlock!(64, 64)(cast(ubyte[]) (tmp4.allocateAll()));
698     const a4 = r4.allocate(104);
699     assert(a4.length == 104);
700 }
701 
702 @system pure nothrow unittest
703 {
704     import std.typecons : Ternary;
705 
706     InSituRegion!(4096, 1) r1;
707     auto a = r1.allocate(2001);
708     assert(a.length == 2001);
709     import std.conv : text;
710     assert(r1.available == 2095, text(r1.available));
711     // Ensure deallocate inherits from parent
712     assert((() nothrow @nogc => r1.deallocate(a))());
713     assert((() nothrow @nogc => r1.deallocateAll())());
714 
715     InSituRegion!(65_536, 1024*4) r2;
716     assert(r2.available <= 65_536);
717     a = r2.allocate(2001);
718     assert(a.length == 2001);
719     const void[] buff = r2.allocate(42);
720     assert((() nothrow @safe @nogc => r2.owns(buff))() == Ternary.yes);
721     assert((() nothrow @nogc => r2.deallocateAll())());
722 }
723 
version(CRuntime_Musl)724 version (CRuntime_Musl)
725 {
726     // sbrk and brk are disabled in Musl:
727     // https://git.musl-libc.org/cgit/musl/commit/?id=7a995fe706e519a4f55399776ef0df9596101f93
728     // https://git.musl-libc.org/cgit/musl/commit/?id=863d628d93ea341b6a32661a1654320ce69f6a07
729 }
version(DragonFlyBSD)730 version (DragonFlyBSD)
731 {
732     // sbrk is deprecated in favor of mmap   (we could implement a mmap + MAP_NORESERVE + PROT_NONE version)
733     // brk has been removed
734     // https://www.dragonflydigest.com/2019/02/22/22586.html
735     // http://gitweb.dragonflybsd.org/dragonfly.git/commitdiff/dc676eaefa61b0f47bbea1c53eab86fd5ccd78c6
736     // http://gitweb.dragonflybsd.org/dragonfly.git/commitdiff/4b5665564ef37dc939a3a9ffbafaab9894c18885
737     // http://gitweb.dragonflybsd.org/dragonfly.git/commitdiff/8618d94a0e2ff8303ad93c123a3fa598c26a116e
738 }
739 else
740 {
741     private extern(C) void* sbrk(long) nothrow @nogc;
742     private extern(C) int brk(shared void*) nothrow @nogc;
743 }
744 
745 /**
746 
747 Allocator backed by $(D $(LINK2 https://en.wikipedia.org/wiki/Sbrk, sbrk))
748 for Posix systems. Due to the fact that `sbrk` is not thread-safe
749 $(HTTP lifecs.likai.org/2010/02/sbrk-is-not-thread-safe.html, by design),
750 `SbrkRegion` uses a mutex internally. This implies
751 that uncontrolled calls to `brk` and `sbrk` may affect the workings of $(D
752 SbrkRegion) adversely.
753 
754 */
version(CRuntime_Musl)755 version (CRuntime_Musl) {} else
version(DragonFlyBSD)756 version (DragonFlyBSD) {} else
757 version (Posix) struct SbrkRegion(uint minAlign = platformAlignment)
758 {
759     import core.sys.posix.pthread : pthread_mutex_init, pthread_mutex_destroy,
760         pthread_mutex_t, pthread_mutex_lock, pthread_mutex_unlock,
761 
762     PTHREAD_MUTEX_INITIALIZER;
763     private static shared pthread_mutex_t sbrkMutex = PTHREAD_MUTEX_INITIALIZER;
764     import std.typecons : Ternary;
765 
766     static assert(minAlign.isGoodStaticAlignment);
767     static assert(size_t.sizeof == (void*).sizeof);
768     private shared void* _brkInitial, _brkCurrent;
769 
770     /**
771     Instance shared by all callers.
772     */
773     static shared SbrkRegion instance;
774 
775     /**
776     Standard allocator primitives.
777     */
778     enum uint alignment = minAlign;
779 
780     /**
781     Rounds the given size to a multiple of thew `alignment`
782     */
goodAllocSizeSbrkRegion783     size_t goodAllocSize(size_t n) shared const pure nothrow @safe @nogc
784     {
785         return n.roundUpToMultipleOf(alignment);
786     }
787 
788     /// Ditto
allocateSbrkRegion789     void[] allocate(size_t bytes) shared @trusted nothrow @nogc
790     {
791         // Take alignment rounding into account
792         const rounded = goodAllocSize(bytes);
793 
794         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
795         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
796             || assert(0);
797         // Assume sbrk returns the old break. Most online documentation confirms
798         // that, except for http://www.inf.udec.cl/~leo/Malloc_tutorial.pdf,
799         // which claims the returned value is not portable.
800         auto p = sbrk(rounded);
801         if (p == cast(void*) -1)
802         {
803             return null;
804         }
805         if (!_brkInitial)
806         {
807             _brkInitial = cast(shared) p;
808             assert(cast(size_t) _brkInitial % minAlign == 0,
809                 "Too large alignment chosen for " ~ typeof(this).stringof);
810         }
811         _brkCurrent = cast(shared) (p + rounded);
812         return p[0 .. bytes];
813     }
814 
815     /// Ditto
alignedAllocateSbrkRegion816     void[] alignedAllocate(size_t bytes, uint a) shared @trusted nothrow @nogc
817     {
818         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
819         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
820             || assert(0);
821         if (!_brkInitial)
822         {
823             // This is one extra call, but it'll happen only once.
824             _brkInitial = cast(shared) sbrk(0);
825             assert(cast(size_t) _brkInitial % minAlign == 0,
826                 "Too large alignment chosen for " ~ typeof(this).stringof);
827             (_brkInitial != cast(void*) -1) || assert(0);
828             _brkCurrent = _brkInitial;
829         }
830         immutable size_t delta = cast(shared void*) roundUpToMultipleOf(
831             cast(size_t) _brkCurrent, a) - _brkCurrent;
832         // Still must make sure the total size is aligned to the allocator's
833         // alignment.
834         immutable rounded = (bytes + delta).roundUpToMultipleOf(alignment);
835 
836         auto p = sbrk(rounded);
837         if (p == cast(void*) -1)
838         {
839             return null;
840         }
841         _brkCurrent = cast(shared) (p + rounded);
842         return p[delta .. delta + bytes];
843     }
844 
845     /**
846 
847     The `expand` method may only succeed if the argument is the last block
848     allocated. In that case, `expand` attempts to push the break pointer to
849     the right.
850 
851     */
expandSbrkRegion852     bool expand(ref void[] b, size_t delta) shared nothrow @trusted @nogc
853     {
854         if (b is null || delta == 0) return delta == 0;
855         assert(_brkInitial && _brkCurrent); // otherwise where did b come from?
856         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
857         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
858             || assert(0);
859 
860         // Take alignment rounding into account
861         const rounded = goodAllocSize(b.length);
862 
863         const slack = rounded - b.length;
864         if (delta <= slack)
865         {
866             b = b.ptr[0 .. b.length + delta];
867             return true;
868         }
869 
870         if (_brkCurrent != b.ptr + rounded) return false;
871         // Great, can expand the last block
872         delta -= slack;
873 
874         const roundedDelta = goodAllocSize(delta);
875         auto p = sbrk(roundedDelta);
876         if (p == cast(void*) -1)
877         {
878             return false;
879         }
880         _brkCurrent = cast(shared) (p + roundedDelta);
881         b = b.ptr[0 .. b.length + slack + delta];
882         return true;
883     }
884 
885     /// Ditto
ownsSbrkRegion886     Ternary owns(const void[] b) shared pure nothrow @trusted @nogc
887     {
888         // No need to lock here.
889         assert(!_brkCurrent || !b || &b[0] + b.length <= _brkCurrent);
890         return Ternary(_brkInitial && b && (&b[0] >= _brkInitial));
891     }
892 
893     /**
894 
895     The `deallocate` method only works (and returns `true`)  on systems
896     that support reducing the  break address (i.e. accept calls to `sbrk`
897     with negative offsets). OSX does not accept such. In addition the argument
898     must be the last block allocated.
899 
900     */
deallocateSbrkRegion901     bool deallocate(void[] b) shared nothrow @nogc
902     {
903         // Take alignment rounding into account
904         const rounded = goodAllocSize(b.length);
905         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
906         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
907             || assert(0);
908         if (_brkCurrent != b.ptr + rounded) return false;
909         assert(b.ptr >= _brkInitial);
910         if (sbrk(-rounded) == cast(void*) -1)
911             return false;
912         _brkCurrent = cast(shared) b.ptr;
913         return true;
914     }
915 
916     /**
917     The `deallocateAll` method only works (and returns `true`) on systems
918     that support reducing the  break address (i.e. accept calls to `sbrk`
919     with negative offsets). OSX does not accept such.
920     */
921     nothrow @nogc
deallocateAllSbrkRegion922     bool deallocateAll() shared
923     {
924         pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) == 0 || assert(0);
925         scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex) == 0
926             || assert(0);
927         return !_brkInitial || brk(_brkInitial) == 0;
928     }
929 
930     /// Standard allocator API.
emptySbrkRegion931     Ternary empty() shared pure nothrow @safe @nogc
932     {
933         // Also works when they're both null.
934         return Ternary(_brkCurrent == _brkInitial);
935     }
936 }
937 
version(CRuntime_Musl)938 version (CRuntime_Musl) {} else
version(DragonFlyBSD)939 version (DragonFlyBSD) {} else
version(Posix)940 version (Posix) @system nothrow @nogc unittest
941 {
942     // Let's test the assumption that sbrk(n) returns the old address
943     const p1 = sbrk(0);
944     const p2 = sbrk(4096);
945     assert(p1 == p2);
946     const p3 = sbrk(0);
947     assert(p3 == p2 + 4096);
948     // Try to reset brk, but don't make a fuss if it doesn't work
949     sbrk(-4096);
950 }
951 
version(CRuntime_Musl)952 version (CRuntime_Musl) {} else
version(DragonFlyBSD)953 version (DragonFlyBSD) {} else
version(Posix)954 version (Posix) @system nothrow @nogc unittest
955 {
956     import std.typecons : Ternary;
957     import std.algorithm.comparison : min;
958     alias alloc = SbrkRegion!(min(8, platformAlignment)).instance;
959     assert((() nothrow @safe @nogc => alloc.empty)() == Ternary.yes);
960     auto a = alloc.alignedAllocate(2001, 4096);
961     assert(a.length == 2001);
962     assert((() nothrow @safe @nogc => alloc.empty)() == Ternary.no);
963     auto oldBrkCurr = alloc._brkCurrent;
964     auto b = alloc.allocate(2001);
965     assert(b.length == 2001);
966     assert((() nothrow @safe @nogc => alloc.expand(b, 0))());
967     assert(b.length == 2001);
968     // Expand with a small size to fit the rounded slack due to alignment
969     assert((() nothrow @safe @nogc => alloc.expand(b, 1))());
970     assert(b.length == 2002);
971     // Exceed the rounded slack due to alignment
972     assert((() nothrow @safe @nogc => alloc.expand(b, 10))());
973     assert(b.length == 2012);
974     assert((() nothrow @safe @nogc => alloc.owns(a))() == Ternary.yes);
975     assert((() nothrow @safe @nogc => alloc.owns(b))() == Ternary.yes);
976     // reducing the brk does not work on OSX
977     version (Darwin) {} else
978     {
979         assert((() nothrow @nogc => alloc.deallocate(b))());
980         // Check that expand and deallocate work well
981         assert(oldBrkCurr == alloc._brkCurrent);
982         assert((() nothrow @nogc => alloc.deallocate(a))());
983         assert((() nothrow @nogc => alloc.deallocateAll())());
984     }
985     const void[] c = alloc.allocate(2001);
986     assert(c.length == 2001);
987     assert((() nothrow @safe @nogc => alloc.owns(c))() == Ternary.yes);
988     assert((() nothrow @safe @nogc => alloc.owns(null))() == Ternary.no);
989 }
990 
991 /**
992 The threadsafe version of the `Region` allocator.
993 Allocations and deallocations are lock-free based using $(REF cas, core,atomic).
994 */
995 shared struct SharedRegion(ParentAllocator = NullAllocator,
996     uint minAlign = platformAlignment,
997     Flag!"growDownwards" growDownwards = No.growDownwards)
998 {
999     static assert(minAlign.isGoodStaticAlignment);
1000     static assert(ParentAllocator.alignment >= minAlign);
1001 
1002     import std.traits : hasMember;
1003     import std.typecons : Ternary;
1004 
1005     // state
1006     /**
1007     The _parent allocator. Depending on whether `ParentAllocator` holds state
1008     or not, this is a member variable or an alias for
1009     `ParentAllocator.instance`.
1010     */
1011     static if (stateSize!ParentAllocator)
1012     {
1013         ParentAllocator parent;
1014     }
1015     else
1016     {
1017         alias parent = ParentAllocator.instance;
1018     }
1019     private shared void* _current, _begin, _end;
1020 
roundedBeginSharedRegion1021     private void* roundedBegin() const pure nothrow @trusted @nogc
1022     {
1023         return cast(void*) roundUpToAlignment(cast(size_t) _begin, alignment);
1024     }
1025 
roundedEndSharedRegion1026     private void* roundedEnd() const pure nothrow @trusted @nogc
1027     {
1028         return cast(void*) roundDownToAlignment(cast(size_t) _end, alignment);
1029     }
1030 
1031 
1032     /**
1033     Constructs a region backed by a user-provided store.
1034     Assumes the memory was allocated with `ParentAllocator`
1035     (if different from $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator)).
1036 
1037     Params:
1038         store = User-provided store backing up the region. If `ParentAllocator`
1039         is different from $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator), memory is assumed to
1040         have been allocated with `ParentAllocator`.
1041         n = Bytes to allocate using `ParentAllocator`. This constructor is only
1042         defined If `ParentAllocator` is different from $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator). If
1043         `parent.allocate(n)` returns `null`, the region will be initialized
1044         as empty (correctly initialized but unable to allocate).
1045     */
thisSharedRegion1046     this(ubyte[] store) pure nothrow @nogc
1047     {
1048         _begin = cast(typeof(_begin)) store.ptr;
1049         _end = cast(typeof(_end)) (store.ptr + store.length);
1050         static if (growDownwards)
1051             _current = cast(typeof(_current)) roundedEnd();
1052         else
1053             _current = cast(typeof(_current)) roundedBegin();
1054     }
1055 
1056     /// Ditto
1057     static if (!is(ParentAllocator == NullAllocator))
thisSharedRegion1058     this(size_t n)
1059     {
1060         this(cast(ubyte[]) (parent.allocate(n.roundUpToAlignment(alignment))));
1061     }
1062 
1063     /**
1064     Rounds the given size to a multiple of the `alignment`
1065     */
goodAllocSizeSharedRegion1066     size_t goodAllocSize(size_t n) const pure nothrow @safe @nogc
1067     {
1068         return n.roundUpToAlignment(alignment);
1069     }
1070 
1071     /**
1072     Alignment offered.
1073     */
1074     alias alignment = minAlign;
1075 
1076     /**
1077     Allocates `n` bytes of memory. The allocation is served by atomically incrementing
1078     a pointer which keeps track of the current used space.
1079 
1080     Params:
1081         n = number of bytes to allocate
1082 
1083     Returns:
1084         A properly-aligned buffer of size `n`, or `null` if request could not
1085         be satisfied.
1086     */
allocateSharedRegion1087     void[] allocate(size_t n) pure nothrow @trusted @nogc
1088     {
1089         import core.atomic : cas, atomicLoad;
1090 
1091         if (n == 0) return null;
1092         const rounded = goodAllocSize(n);
1093 
1094         shared void* localCurrent, localNewCurrent;
1095         static if (growDownwards)
1096         {
1097             do
1098             {
1099                 localCurrent = atomicLoad(_current);
1100                 localNewCurrent = localCurrent - rounded;
1101                 if (localNewCurrent > localCurrent || localNewCurrent < _begin)
1102                     return null;
1103             } while (!cas(&_current, localCurrent, localNewCurrent));
1104 
1105             return cast(void[]) localNewCurrent[0 .. n];
1106         }
1107         else
1108         {
1109             do
1110             {
1111                 localCurrent = atomicLoad(_current);
1112                 localNewCurrent = localCurrent + rounded;
1113                 if (localNewCurrent < localCurrent || localNewCurrent > _end)
1114                     return null;
1115             } while (!cas(&_current, localCurrent, localNewCurrent));
1116 
1117             return cast(void[]) localCurrent[0 .. n];
1118         }
1119 
1120         assert(0, "Unexpected error in SharedRegion.allocate");
1121     }
1122 
1123     /**
1124     Deallocates `b`. This works only if `b` was obtained as the last call
1125     to `allocate`; otherwise (i.e. another allocation has occurred since) it
1126     does nothing.
1127 
1128     Params:
1129         b = Block previously obtained by a call to `allocate` against this
1130         allocator (`null` is allowed).
1131     */
deallocateSharedRegion1132     bool deallocate(void[] b) pure nothrow @nogc
1133     {
1134         import core.atomic : cas, atomicLoad;
1135 
1136         const rounded = goodAllocSize(b.length);
1137         shared void* localCurrent, localNewCurrent;
1138 
1139         // The cas is done only once, because only the last allocation can be reverted
1140         localCurrent = atomicLoad(_current);
1141         static if (growDownwards)
1142         {
1143             localNewCurrent = localCurrent + rounded;
1144             if (b.ptr == localCurrent)
1145                 return cas(&_current, localCurrent, localNewCurrent);
1146         }
1147         else
1148         {
1149             localNewCurrent = localCurrent - rounded;
1150             if (b.ptr == localNewCurrent)
1151                 return cas(&_current, localCurrent, localNewCurrent);
1152         }
1153 
1154         return false;
1155     }
1156 
1157     /**
1158     Deallocates all memory allocated by this region, which can be subsequently
1159     reused for new allocations.
1160     */
deallocateAllSharedRegion1161     bool deallocateAll() pure nothrow @nogc
1162     {
1163         import core.atomic : atomicStore;
1164         static if (growDownwards)
1165         {
1166             atomicStore(_current, cast(shared(void*)) roundedEnd());
1167         }
1168         else
1169         {
1170             atomicStore(_current, cast(shared(void*)) roundedBegin());
1171         }
1172         return true;
1173     }
1174 
1175     /**
1176     Allocates `n` bytes of memory aligned at alignment `a`.
1177     Params:
1178         n = number of bytes to allocate
1179         a = alignment for the allocated block
1180 
1181     Returns:
1182         Either a suitable block of `n` bytes aligned at `a`, or `null`.
1183     */
alignedAllocateSharedRegion1184     void[] alignedAllocate(size_t n, uint a) pure nothrow @trusted @nogc
1185     {
1186         import core.atomic : cas, atomicLoad;
1187         import std.math.traits : isPowerOf2;
1188 
1189         assert(a.isPowerOf2);
1190         if (n == 0) return null;
1191 
1192         const rounded = goodAllocSize(n);
1193         shared void* localCurrent, localNewCurrent;
1194 
1195         static if (growDownwards)
1196         {
1197             do
1198             {
1199                 localCurrent = atomicLoad(_current);
1200                 auto alignedCurrent = cast(void*)(localCurrent - rounded);
1201                 localNewCurrent = cast(shared(void*)) alignedCurrent.alignDownTo(a);
1202                 if (alignedCurrent > localCurrent || localNewCurrent > alignedCurrent ||
1203                     localNewCurrent < _begin)
1204                     return null;
1205             } while (!cas(&_current, localCurrent, localNewCurrent));
1206 
1207             return cast(void[]) localNewCurrent[0 .. n];
1208         }
1209         else
1210         {
1211             do
1212             {
1213                 localCurrent = atomicLoad(_current);
1214                 auto alignedCurrent = alignUpTo(cast(void*) localCurrent, a);
1215                 localNewCurrent = cast(shared(void*)) (alignedCurrent + rounded);
1216                 if (alignedCurrent < localCurrent || localNewCurrent < alignedCurrent ||
1217                     localNewCurrent > _end)
1218                     return null;
1219             } while (!cas(&_current, localCurrent, localNewCurrent));
1220 
1221             return cast(void[]) (localNewCurrent - rounded)[0 .. n];
1222         }
1223 
1224         assert(0, "Unexpected error in SharedRegion.alignedAllocate");
1225     }
1226 
1227     /**
1228     Queries whether `b` has been allocated with this region.
1229 
1230     Params:
1231         b = Arbitrary block of memory (`null` is allowed; `owns(null)` returns
1232         `false`).
1233 
1234     Returns:
1235         `true` if `b` has been allocated with this region, `false` otherwise.
1236     */
ownsSharedRegion1237     Ternary owns(const void[] b) const pure nothrow @trusted @nogc
1238     {
1239         return Ternary(b && (&b[0] >= _begin) && (&b[0] + b.length <= _end));
1240     }
1241 
1242     /**
1243     Returns `Ternary.yes` if no memory has been allocated in this region,
1244     `Ternary.no` otherwise. (Never returns `Ternary.unknown`.)
1245     */
emptySharedRegion1246     Ternary empty() const pure nothrow @safe @nogc
1247     {
1248         import core.atomic : atomicLoad;
1249 
1250         auto localCurrent = atomicLoad(_current);
1251         static if (growDownwards)
1252             return Ternary(localCurrent == roundedEnd());
1253         else
1254             return Ternary(localCurrent == roundedBegin());
1255     }
1256 
1257     /**
1258     If `ParentAllocator` is not $(REF_ALTTEXT `NullAllocator`, NullAllocator, std,experimental,allocator,building_blocks,null_allocator) and defines `deallocate`,
1259     the region defines a destructor that uses `ParentAllocator.deallocate` to free the
1260     memory chunk.
1261     */
1262     static if (!is(ParentAllocator == NullAllocator)
1263         && hasMember!(ParentAllocator, "deallocate"))
~thisSharedRegion1264     ~this()
1265     {
1266         parent.deallocate(cast(void[]) _begin[0 .. _end - _begin]);
1267     }
1268 }
1269 
1270 @system unittest
1271 {
1272     import std.experimental.allocator.mallocator : Mallocator;
1273 
testAlloc(Allocator)1274     static void testAlloc(Allocator)(ref Allocator a, bool growDownwards)
1275     {
1276         import core.thread : ThreadGroup;
1277         import std.algorithm.sorting : sort;
1278         import core.internal.spinlock : SpinLock;
1279 
1280         SpinLock lock = SpinLock(SpinLock.Contention.brief);
1281         enum numThreads = 100;
1282         void[][numThreads] buf;
1283         size_t count = 0;
1284 
1285         void fun()
1286         {
1287             void[] b = a.allocate(63);
1288             assert(b.length == 63);
1289 
1290             lock.lock();
1291             buf[count] = b;
1292             count++;
1293             lock.unlock();
1294         }
1295 
1296         auto tg = new ThreadGroup;
1297         foreach (i; 0 .. numThreads)
1298         {
1299             tg.create(&fun);
1300         }
1301         tg.joinAll();
1302 
1303         sort!((a, b) => a.ptr < b.ptr)(buf[0 .. numThreads]);
1304         foreach (i; 0 .. numThreads - 1)
1305         {
1306             assert(buf[i].ptr + a.goodAllocSize(buf[i].length) == buf[i + 1].ptr);
1307         }
1308 
1309         assert(!a.deallocate(buf[1]));
1310 
1311         foreach (i; 0 .. numThreads)
1312         {
1313             if (!growDownwards)
1314                 assert(a.deallocate(buf[numThreads - 1 - i]));
1315             else
1316                 assert(a.deallocate(buf[i]));
1317         }
1318 
1319         assert(a.deallocateAll());
1320         void[] b = a.allocate(63);
1321         assert(b.length == 63);
1322         assert(a.deallocate(b));
1323     }
1324 
1325     auto a1 = SharedRegion!(Mallocator, Mallocator.alignment,
1326         Yes.growDownwards)(1024 * 64);
1327 
1328     auto a2 = SharedRegion!(Mallocator, Mallocator.alignment,
1329         No.growDownwards)(1024 * 64);
1330 
1331     testAlloc(a1, true);
1332     testAlloc(a2, false);
1333 }
1334 
1335 @system unittest
1336 {
1337     import std.experimental.allocator.mallocator : Mallocator;
1338 
testAlloc(Allocator)1339     static void testAlloc(Allocator)(ref Allocator a, bool growDownwards)
1340     {
1341         import core.thread : ThreadGroup;
1342         import std.algorithm.sorting : sort;
1343         import core.internal.spinlock : SpinLock;
1344 
1345         SpinLock lock = SpinLock(SpinLock.Contention.brief);
1346         enum numThreads = 100;
1347         void[][2 * numThreads] buf;
1348         size_t count = 0;
1349 
1350         void fun()
1351         {
1352             void[] b = a.allocate(63);
1353             assert(b.length == 63);
1354 
1355             lock.lock();
1356             buf[count] = b;
1357             count++;
1358             lock.unlock();
1359 
1360             b = a.alignedAllocate(63, 32);
1361             assert(b.length == 63);
1362             assert(cast(size_t) b.ptr % 32 == 0);
1363 
1364             lock.lock();
1365             buf[count] = b;
1366             count++;
1367             lock.unlock();
1368         }
1369 
1370         auto tg = new ThreadGroup;
1371         foreach (i; 0 .. numThreads)
1372         {
1373             tg.create(&fun);
1374         }
1375         tg.joinAll();
1376 
1377         sort!((a, b) => a.ptr < b.ptr)(buf[0 .. 2 * numThreads]);
1378         foreach (i; 0 .. 2 * numThreads - 1)
1379         {
1380             assert(buf[i].ptr + buf[i].length <= buf[i + 1].ptr);
1381         }
1382 
1383         assert(!a.deallocate(buf[1]));
1384         assert(a.deallocateAll());
1385 
1386         void[] b = a.allocate(13);
1387         assert(b.length == 13);
1388         assert(a.deallocate(b));
1389     }
1390 
1391     auto a1 = SharedRegion!(Mallocator, Mallocator.alignment,
1392         Yes.growDownwards)(1024 * 64);
1393 
1394     auto a2 = SharedRegion!(Mallocator, Mallocator.alignment,
1395         No.growDownwards)(1024 * 64);
1396 
1397     testAlloc(a1, true);
1398     testAlloc(a2, false);
1399 }
1400