1 // Written in the D programming language.
2 /**
3 Source: $(PHOBOSSRC std/experimental/allocator/building_blocks/ascending_page_allocator.d)
4 */
5 module std.experimental.allocator.building_blocks.ascending_page_allocator;
6 
7 import core.memory : pageSize;
8 
9 import std.experimental.allocator.common;
10 
11 // Common implementations for shared and thread local AscendingPageAllocator
12 private mixin template AscendingPageAllocatorImpl(bool isShared)
13 {
14     bool deallocate(void[] buf) nothrow @nogc
15     {
16         size_t goodSize = goodAllocSize(buf.length);
17         version (Posix)
18         {
19             import core.sys.posix.sys.mman : mmap, MAP_FAILED, MAP_PRIVATE,
20                 MAP_ANON, MAP_FIXED, PROT_NONE, munmap;
21 
22             auto ptr = mmap(buf.ptr, goodSize, PROT_NONE, MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0);
23             if (ptr == MAP_FAILED)
24                  return false;
25         }
26         else version (Windows)
27         {
28             import core.sys.windows.winbase : VirtualFree;
29             import core.sys.windows.winnt : MEM_DECOMMIT;
30 
31             auto ret = VirtualFree(buf.ptr, goodSize, MEM_DECOMMIT);
32             if (ret == 0)
33                  return false;
34         }
35         else
36         {
37             static assert(0, "Unsupported OS");
38         }
39 
40         static if (!isShared)
41         {
42             pagesUsed -= goodSize / pageSize;
43         }
44 
45         return true;
46     }
47 
48     Ternary owns(void[] buf) nothrow @nogc
49     {
50         if (!data)
51             return Ternary.no;
52         return Ternary(buf.ptr >= data && buf.ptr < buf.ptr + numPages * pageSize);
53     }
54 
55     bool deallocateAll() nothrow @nogc
56     {
57         version (Posix)
58         {
59             import core.sys.posix.sys.mman : munmap;
60             auto ret = munmap(cast(void*) data, numPages * pageSize);
61             if (ret != 0)
62                 assert(0, "Failed to unmap memory, munmap failure");
63         }
64         else version (Windows)
65         {
66             import core.sys.windows.winbase : VirtualFree;
67             import core.sys.windows.winnt : MEM_RELEASE;
68             auto ret = VirtualFree(cast(void*) data, 0, MEM_RELEASE);
69             if (ret == 0)
70                 assert(0, "Failed to unmap memory, VirtualFree failure");
71         }
72         else
73         {
74             static assert(0, "Unsupported OS version");
75         }
76         data = null;
77         offset = null;
78         return true;
79     }
80 
81     size_t goodAllocSize(size_t n) nothrow @nogc
82     {
83         return n.roundUpToMultipleOf(cast(uint) pageSize);
84     }
85 
86     this(size_t n) nothrow @nogc
87     {
88         static if (isShared)
89         {
90             lock = SpinLock(SpinLock.Contention.brief);
91         }
92 
93         pageSize = .pageSize;
94         numPages = n.roundUpToMultipleOf(cast(uint) pageSize) / pageSize;
95 
96         version (Posix)
97         {
98             import core.sys.posix.sys.mman : mmap, MAP_ANON, PROT_NONE,
99                 MAP_PRIVATE, MAP_FAILED;
100 
101             data = cast(typeof(data)) mmap(null, pageSize * numPages,
102                 PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0);
103             if (data == MAP_FAILED)
104                 assert(0, "Failed to mmap memory");
105         }
106         else version (Windows)
107         {
108             import core.sys.windows.winbase : VirtualAlloc;
109             import core.sys.windows.winnt : MEM_RESERVE, PAGE_NOACCESS;
110 
111             data = cast(typeof(data)) VirtualAlloc(null, pageSize * numPages,
112                 MEM_RESERVE, PAGE_NOACCESS);
113             if (!data)
114                 assert(0, "Failed to VirtualAlloc memory");
115         }
116         else
117         {
118             static assert(0, "Unsupported OS version");
119         }
120 
121         offset = data;
122         readWriteLimit = data;
123     }
124 
125     size_t getAvailableSize() nothrow @nogc
126     {
127         static if (isShared)
128         {
129             lock.lock();
130         }
131 
132         auto size = numPages * pageSize + data - offset;
133         static if (isShared)
134         {
135             lock.unlock();
136         }
137         return size;
138     }
139 
140     // Sets the protection of a memory range to read/write
141     private bool extendMemoryProtection(void* start, size_t size) nothrow @nogc
142     {
143         version (Posix)
144         {
145             import core.sys.posix.sys.mman : mprotect, PROT_WRITE, PROT_READ;
146 
147             auto ret = mprotect(start, size, PROT_WRITE | PROT_READ);
148             return ret == 0;
149         }
150         else version (Windows)
151         {
152             import core.sys.windows.winbase : VirtualAlloc;
153             import core.sys.windows.winnt : MEM_COMMIT, PAGE_READWRITE;
154 
155             auto ret = VirtualAlloc(start, size, MEM_COMMIT, PAGE_READWRITE);
156             return ret != null;
157         }
158         else
159         {
160             static assert(0, "Unsupported OS");
161         }
162     }
163 }
164 
165 /**
166 `AscendingPageAllocator` is a fast and safe allocator that rounds all allocations
167 to multiples of the system's page size. It reserves a range of virtual addresses
168 (using `mmap` on Posix and `VirtualAlloc` on Windows) and allocates memory at consecutive virtual
169 addresses.
170 
171 When a chunk of memory is requested, the allocator finds a range of
172 virtual pages that satisfy the requested size, changing their protection to
173 read/write using OS primitives (`mprotect` and `VirtualProtect`, respectively).
174 The physical memory is allocated on demand, when the pages are accessed.
175 
176 Deallocation removes any read/write permissions from the target pages
177 and notifies the OS to reclaim the physical memory, while keeping the virtual
178 memory.
179 
180 Because the allocator does not reuse memory, any dangling references to
181 deallocated memory will always result in deterministically crashing the process.
182 
183 See_Also:
184 $(HTTPS microsoft.com/en-us/research/wp-content/uploads/2017/03/kedia2017mem.pdf, Simple Fast and Safe Manual Memory Management) for the general approach.
185 */
186 struct AscendingPageAllocator
187 {
188     import std.typecons : Ternary;
189 
190     // Docs for mixin functions
191     version (StdDdoc)
192     {
193         /**
194         Rounds the mapping size to the next multiple of the page size and calls
195         the OS primitive responsible for creating memory mappings: `mmap` on POSIX and
196         `VirtualAlloc` on Windows.
197 
198         Params:
199         n = mapping size in bytes
200         */
201         this(size_t n) nothrow @nogc;
202 
203         /**
204         Rounds the requested size to the next multiple of the page size.
205         */
206         size_t goodAllocSize(size_t n) nothrow @nogc;
207 
208         /**
209         Decommit all physical memory associated with the buffer given as parameter,
210         but keep the range of virtual addresses.
211 
212         On POSIX systems `deallocate` calls `mmap` with `MAP_FIXED' a second time to decommit the memory.
213         On Windows, it uses `VirtualFree` with `MEM_DECOMMIT`.
214         */
215         void deallocate(void[] b) nothrow @nogc;
216 
217         /**
218         Returns `Ternary.yes` if the passed buffer is inside the range of virtual adresses.
219         Does not guarantee that the passed buffer is still valid.
220         */
221         Ternary owns(void[] buf) nothrow @nogc;
222 
223         /**
224         Removes the memory mapping causing all physical memory to be decommited and
225         the virtual address space to be reclaimed.
226         */
227         bool deallocateAll() nothrow @nogc;
228 
229         /**
230         Returns the available size for further allocations in bytes.
231         */
232         size_t getAvailableSize() nothrow @nogc;
233     }
234 
235 private:
236     size_t pageSize;
237     size_t numPages;
238 
239     // The start of the virtual address range
240     void* data;
241 
242     // Keeps track of there the next allocation should start
243     void* offset;
244 
245     // Number of pages which contain alive objects
246     size_t pagesUsed;
247 
248     // On allocation requests, we allocate an extra 'extraAllocPages' pages
249     // The address up to which we have permissions is stored in 'readWriteLimit'
250     void* readWriteLimit;
251     enum extraAllocPages = 1000;
252 
253 public:
254     enum uint alignment = 4096;
255 
256     // Inject common function implementations
257     mixin AscendingPageAllocatorImpl!false;
258 
259     /**
260     Rounds the allocation size to the next multiple of the page size.
261     The allocation only reserves a range of virtual pages but the actual
262     physical memory is allocated on demand, when accessing the memory.
263 
264     Params:
265     n = Bytes to allocate
266 
267     Returns:
268     `null` on failure or if the requested size exceeds the remaining capacity.
269     */
270     void[] allocate(size_t n) nothrow @nogc
271     {
272         import std.algorithm.comparison : min;
273 
274         immutable pagedBytes = numPages * pageSize;
275         size_t goodSize = goodAllocSize(n);
276 
277         // Requested exceeds the virtual memory range
278         if (goodSize > pagedBytes || offset - data > pagedBytes - goodSize)
279             return null;
280 
281         // Current allocation exceeds readable/writable memory area
282         if (offset + goodSize > readWriteLimit)
283         {
284             // Extend r/w memory range to new limit
285             void* newReadWriteLimit = min(data + pagedBytes,
286                 offset + goodSize + extraAllocPages * pageSize);
287             if (newReadWriteLimit != readWriteLimit)
288             {
289                 assert(newReadWriteLimit > readWriteLimit);
290                 if (!extendMemoryProtection(readWriteLimit, newReadWriteLimit - readWriteLimit))
291                     return null;
292 
293                 readWriteLimit = newReadWriteLimit;
294             }
295         }
296 
297         void* result = offset;
298         offset += goodSize;
299         pagesUsed += goodSize / pageSize;
300 
301         return cast(void[]) result[0 .. n];
302     }
303 
304     /**
305     Rounds the allocation size to the next multiple of the page size.
306     The allocation only reserves a range of virtual pages but the actual
307     physical memory is allocated on demand, when accessing the memory.
308 
309     The allocated memory is aligned to the specified alignment `a`.
310 
311     Params:
312     n = Bytes to allocate
313     a = Alignment
314 
315     Returns:
316     `null` on failure or if the requested size exceeds the remaining capacity.
317     */
318     void[] alignedAllocate(size_t n, uint a) nothrow @nogc
319     {
320         void* alignedStart = cast(void*) roundUpToMultipleOf(cast(size_t) offset, a);
321         assert(alignedStart.alignedAt(a));
322         immutable pagedBytes = numPages * pageSize;
323         size_t goodSize = goodAllocSize(n);
324         if (goodSize > pagedBytes ||
325             alignedStart - data > pagedBytes - goodSize)
326             return null;
327 
328         // Same logic as allocate, only that the buffer must be properly aligned
329         auto oldOffset = offset;
330         offset = alignedStart;
331         auto result = allocate(n);
332         if (!result)
333             offset = oldOffset;
334         return result;
335     }
336 
337     /**
338     If the passed buffer is not the last allocation, then `delta` can be
339     at most the number of bytes left on the last page.
340     Otherwise, we can expand the last allocation until the end of the virtual
341     address range.
342     */
343     bool expand(ref void[] b, size_t delta) nothrow @nogc
344     {
345         import std.algorithm.comparison : min;
346 
347         if (!delta) return true;
348         if (b is null) return false;
349 
350         size_t goodSize = goodAllocSize(b.length);
351         size_t bytesLeftOnPage = goodSize - b.length;
352 
353         // If this is not the last allocation, we can only expand until
354         // completely filling the last page covered by this buffer
355         if (b.ptr + goodSize != offset && delta > bytesLeftOnPage)
356             return false;
357 
358         size_t extraPages = 0;
359 
360         // If the extra `delta` bytes requested do not fit the last page
361         // compute how many extra pages are neeeded
362         if (delta > bytesLeftOnPage)
363         {
364             extraPages = goodAllocSize(delta - bytesLeftOnPage) / pageSize;
365         }
366         else
367         {
368             b = cast(void[]) b.ptr[0 .. b.length + delta];
369             return true;
370         }
371 
372         if (extraPages > numPages || offset - data > pageSize * (numPages - extraPages))
373             return false;
374 
375         void* newPtrEnd = b.ptr + goodSize + extraPages * pageSize;
376         if (newPtrEnd > readWriteLimit)
377         {
378             void* newReadWriteLimit = min(data + numPages * pageSize,
379                 newPtrEnd + extraAllocPages * pageSize);
380             if (newReadWriteLimit > readWriteLimit)
381             {
382                 if (!extendMemoryProtection(readWriteLimit, newReadWriteLimit - readWriteLimit))
383                     return false;
384 
385                 readWriteLimit = newReadWriteLimit;
386             }
387         }
388 
389         pagesUsed += extraPages;
390         offset += extraPages * pageSize;
391         b = cast(void[]) b.ptr[0 .. b.length + delta];
392         return true;
393     }
394 
395     /**
396     Returns `Ternary.yes` if the allocator does not contain any alive objects
397     and `Ternary.no` otherwise.
398     */
399     Ternary empty() nothrow @nogc
400     {
401         return Ternary(pagesUsed == 0);
402     }
403 
404     /**
405     Unmaps the whole virtual address range on destruction.
406     */
407     ~this() nothrow @nogc
408     {
409         if (data)
410             deallocateAll();
411     }
412 }
413 
414 ///
415 @system @nogc nothrow unittest
416 {
417     import core.memory : pageSize;
418 
419     size_t numPages = 100;
420     void[] buf;
421     void[] prevBuf = null;
422     AscendingPageAllocator a = AscendingPageAllocator(numPages * pageSize);
423 
424     foreach (i; 0 .. numPages)
425     {
426         // Allocation is rounded up to page size
427         buf = a.allocate(pageSize - 100);
428         assert(buf.length == pageSize - 100);
429 
430         // Allocations are served at increasing addresses
431         if (prevBuf)
432             assert(prevBuf.ptr + pageSize == buf.ptr);
433 
434         assert(a.deallocate(buf));
435         prevBuf = buf;
436     }
437 }
438 
439 /**
440 `SharedAscendingPageAllocator` is the threadsafe version of `AscendingPageAllocator`.
441 */
442 shared struct SharedAscendingPageAllocator
443 {
444     import std.typecons : Ternary;
445     import core.internal.spinlock : SpinLock;
446 
447     // Docs for mixin functions
448     version (StdDdoc)
449     {
450         /**
451         Rounds the mapping size to the next multiple of the page size and calls
452         the OS primitive responsible for creating memory mappings: `mmap` on POSIX and
453         `VirtualAlloc` on Windows.
454 
455         Params:
456         n = mapping size in bytes
457         */
458         this(size_t n) nothrow @nogc;
459 
460         /**
461         Rounds the requested size to the next multiple of the page size.
462         */
463         size_t goodAllocSize(size_t n) nothrow @nogc;
464 
465         /**
466         Decommit all physical memory associated with the buffer given as parameter,
467         but keep the range of virtual addresses.
468 
469         On POSIX systems `deallocate` calls `mmap` with `MAP_FIXED' a second time to decommit the memory.
470         On Windows, it uses `VirtualFree` with `MEM_DECOMMIT`.
471         */
472         void deallocate(void[] b) nothrow @nogc;
473 
474         /**
475         Returns `Ternary.yes` if the passed buffer is inside the range of virtual adresses.
476         Does not guarantee that the passed buffer is still valid.
477         */
478         Ternary owns(void[] buf) nothrow @nogc;
479 
480         /**
481         Removes the memory mapping causing all physical memory to be decommited and
482         the virtual address space to be reclaimed.
483         */
484         bool deallocateAll() nothrow @nogc;
485 
486         /**
487         Returns the available size for further allocations in bytes.
488         */
489         size_t getAvailableSize() nothrow @nogc;
490     }
491 
492 private:
493     size_t pageSize;
494     size_t numPages;
495 
496     // The start of the virtual address range
497     shared void* data;
498 
499     // Keeps track of there the next allocation should start
500     shared void* offset;
501 
502     // On allocation requests, we allocate an extra 'extraAllocPages' pages
503     // The address up to which we have permissions is stored in 'readWriteLimit'
504     shared void* readWriteLimit;
505     enum extraAllocPages = 1000;
506     SpinLock lock;
507 
508 public:
509     enum uint alignment = 4096;
510 
511     // Inject common function implementations
512     mixin AscendingPageAllocatorImpl!true;
513 
514     /**
515     Rounds the allocation size to the next multiple of the page size.
516     The allocation only reserves a range of virtual pages but the actual
517     physical memory is allocated on demand, when accessing the memory.
518 
519     Params:
520     n = Bytes to allocate
521 
522     Returns:
523     `null` on failure or if the requested size exceeds the remaining capacity.
524     */
525     void[] allocate(size_t n) nothrow @nogc
526     {
527         return allocateImpl(n, 1);
528     }
529 
530     /**
531     Rounds the allocation size to the next multiple of the page size.
532     The allocation only reserves a range of virtual pages but the actual
533     physical memory is allocated on demand, when accessing the memory.
534 
535     The allocated memory is aligned to the specified alignment `a`.
536 
537     Params:
538     n = Bytes to allocate
539     a = Alignment
540 
541     Returns:
542     `null` on failure or if the requested size exceeds the remaining capacity.
543     */
544     void[] alignedAllocate(size_t n, uint a) nothrow @nogc
545     {
546         // For regular `allocate` calls, `a` will be set to 1
547         return allocateImpl(n, a);
548     }
549 
550     private void[] allocateImpl(size_t n, uint a) nothrow @nogc
551     {
552         import std.algorithm.comparison : min;
553 
554         size_t localExtraAlloc;
555         void* localOffset;
556         immutable pagedBytes = numPages * pageSize;
557         size_t goodSize = goodAllocSize(n);
558 
559         if (goodSize > pagedBytes)
560             return null;
561 
562         lock.lock();
563         scope(exit) lock.unlock();
564 
565         localOffset = cast(void*) offset;
566         void* alignedStart = cast(void*) roundUpToMultipleOf(cast(size_t) localOffset, a);
567         assert(alignedStart.alignedAt(a));
568         if (alignedStart - data > pagedBytes - goodSize)
569             return null;
570 
571         localOffset = alignedStart + goodSize;
572         if (localOffset > readWriteLimit)
573         {
574             void* newReadWriteLimit = min(cast(void*) data + pagedBytes,
575                 cast(void*) localOffset + extraAllocPages * pageSize);
576             assert(newReadWriteLimit > readWriteLimit);
577             localExtraAlloc = newReadWriteLimit - readWriteLimit;
578             if (!extendMemoryProtection(cast(void*) readWriteLimit, localExtraAlloc))
579                 return null;
580             readWriteLimit = cast(shared(void*)) newReadWriteLimit;
581         }
582 
583         offset = cast(typeof(offset)) localOffset;
584         return cast(void[]) alignedStart[0 .. n];
585     }
586 
587     /**
588     If the passed buffer is not the last allocation, then `delta` can be
589     at most the number of bytes left on the last page.
590     Otherwise, we can expand the last allocation until the end of the virtual
591     address range.
592     */
593     bool expand(ref void[] b, size_t delta) nothrow @nogc
594     {
595         import std.algorithm.comparison : min;
596 
597         if (!delta) return true;
598         if (b is null) return false;
599 
600         void* localOffset;
601         size_t localExtraAlloc;
602         size_t goodSize = goodAllocSize(b.length);
603         size_t bytesLeftOnPage = goodSize - b.length;
604 
605         if (bytesLeftOnPage >= delta)
606         {
607             b = cast(void[]) b.ptr[0 .. b.length + delta];
608             return true;
609         }
610 
611         lock.lock();
612         scope(exit) lock.unlock();
613 
614         localOffset = cast(void*) offset;
615         if (b.ptr + goodSize != localOffset)
616             return false;
617 
618         size_t extraPages = goodAllocSize(delta - bytesLeftOnPage) / pageSize;
619         if (extraPages > numPages || localOffset - data > pageSize * (numPages - extraPages))
620             return false;
621 
622 
623         localOffset = b.ptr + goodSize + extraPages * pageSize;
624         if (localOffset > readWriteLimit)
625         {
626             void* newReadWriteLimit = min(cast(void*) data + numPages * pageSize,
627                 localOffset + extraAllocPages * pageSize);
628             assert(newReadWriteLimit > readWriteLimit);
629             localExtraAlloc = newReadWriteLimit - readWriteLimit;
630             if (!extendMemoryProtection(cast(void*) readWriteLimit, localExtraAlloc))
631                 return false;
632             readWriteLimit = cast(shared(void*)) newReadWriteLimit;
633         }
634 
635         offset = cast(typeof(offset)) localOffset;
636         b = cast(void[]) b.ptr[0 .. b.length + delta];
637         return true;
638     }
639 }
640 
641 ///
642 @system unittest
643 {
644     import core.memory : pageSize;
645     import core.thread : ThreadGroup;
646 
647     enum numThreads = 100;
648     shared SharedAscendingPageAllocator a = SharedAscendingPageAllocator(pageSize * numThreads);
649 
650     void fun()
651     {
652         void[] b = a.allocate(pageSize);
653         assert(b.length == pageSize);
654 
655         assert(a.deallocate(b));
656     }
657 
658     auto tg = new ThreadGroup;
659     foreach (i; 0 .. numThreads)
660     {
661         tg.create(&fun);
662     }
663     tg.joinAll();
664 }
665 
666 version (StdUnittest)
667 {
668     private static void testrw(void[] b) @nogc nothrow
669     {
670         ubyte* buf = cast(ubyte*) b.ptr;
671         buf[0] = 100;
672         assert(buf[0] == 100);
673         buf[b.length - 1] = 101;
674         assert(buf[b.length - 1] == 101);
675     }
676 }
677 
678 @system @nogc nothrow unittest
679 {
680     static void testAlloc(Allocator)(ref Allocator a) @nogc nothrow
681     {
682         void[] b1 = a.allocate(1);
683         assert(a.getAvailableSize() == 3 * pageSize);
684         testrw(b1);
685         void[] b2 = a.allocate(2);
686         assert(a.getAvailableSize() == 2 * pageSize);
687         testrw(b2);
688         void[] b3 = a.allocate(pageSize + 1);
689         assert(a.getAvailableSize() == 0);
690 
691         testrw(b3);
692         assert(b1.length == 1);
693         assert(b2.length == 2);
694         assert(b3.length == pageSize + 1);
695 
696         assert(a.offset - a.data == 4 * pageSize);
697         void[] b4 = a.allocate(4);
698         assert(!b4);
699 
700         a.deallocate(b1);
701         assert(a.data);
702         a.deallocate(b2);
703         assert(a.data);
704         a.deallocate(b3);
705     }
706 
707     AscendingPageAllocator a = AscendingPageAllocator(4 * pageSize);
708     shared SharedAscendingPageAllocator aa = SharedAscendingPageAllocator(4 * pageSize);
709 
710     testAlloc(a);
711     testAlloc(aa);
712 }
713 
714 @system @nogc nothrow unittest
715 {
716     size_t numPages = 26214;
717     AscendingPageAllocator a = AscendingPageAllocator(numPages * pageSize);
718     foreach (i; 0 .. numPages)
719     {
720         void[] buf = a.allocate(pageSize);
721         assert(buf.length == pageSize);
722         testrw(buf);
723         a.deallocate(buf);
724     }
725 
726     assert(!a.allocate(1));
727     assert(a.getAvailableSize() == 0);
728 }
729 
730 @system @nogc nothrow unittest
731 {
732     size_t numPages = 26214;
733     uint alignment = cast(uint) pageSize;
734     AscendingPageAllocator a = AscendingPageAllocator(numPages * pageSize);
735 
736     foreach (i; 0 .. numPages)
737     {
738         void[] buf = a.alignedAllocate(pageSize, alignment);
739         assert(buf.length == pageSize);
740         testrw(buf);
741         a.deallocate(buf);
742     }
743 
744     assert(!a.allocate(1));
745     assert(a.getAvailableSize() == 0);
746 }
747 
748 @system @nogc nothrow unittest
749 {
750     static void testAlloc(Allocator)(ref Allocator a) @nogc nothrow
751     {
752         import std.traits : hasMember;
753 
754         size_t numPages = 5;
755         uint alignment = cast(uint) pageSize;
756 
757         void[] b1 = a.allocate(pageSize / 2);
758         assert(b1.length == pageSize / 2);
759 
760         void[] b2 = a.alignedAllocate(pageSize / 2, alignment);
761         assert(a.expand(b1, pageSize / 2));
762         assert(a.expand(b1, 0));
763         assert(!a.expand(b1, 1));
764         testrw(b1);
765 
766         assert(a.expand(b2, pageSize / 2));
767         testrw(b2);
768         assert(b2.length == pageSize);
769 
770         assert(a.getAvailableSize() == pageSize * 3);
771 
772         void[] b3 = a.allocate(pageSize / 2);
773         assert(a.reallocate(b1, b1.length));
774         assert(a.reallocate(b2, b2.length));
775         assert(a.reallocate(b3, b3.length));
776 
777         assert(b3.length == pageSize / 2);
778         testrw(b3);
779         assert(a.expand(b3, pageSize / 4));
780         testrw(b3);
781         assert(a.expand(b3, 0));
782         assert(b3.length == pageSize / 2 + pageSize / 4);
783         assert(a.expand(b3, pageSize / 4 - 1));
784         testrw(b3);
785         assert(a.expand(b3, 0));
786         assert(b3.length == pageSize - 1);
787         assert(a.expand(b3, 2));
788         assert(a.expand(b3, 0));
789         assert(a.getAvailableSize() == pageSize);
790         assert(b3.length == pageSize + 1);
791         testrw(b3);
792 
793         assert(a.reallocate(b1, b1.length));
794         assert(a.reallocate(b2, b2.length));
795         assert(a.reallocate(b3, b3.length));
796 
797         assert(a.reallocate(b3, 2 * pageSize));
798         testrw(b3);
799         assert(a.reallocate(b1, pageSize - 1));
800         testrw(b1);
801         assert(a.expand(b1, 1));
802         testrw(b1);
803         assert(!a.expand(b1, 1));
804 
805         a.deallocate(b1);
806         a.deallocate(b2);
807         a.deallocate(b3);
808     }
809 
810     size_t numPages = 5;
811     uint alignment = cast(uint) pageSize;
812     AscendingPageAllocator a = AscendingPageAllocator(numPages * pageSize);
813     shared SharedAscendingPageAllocator aa = SharedAscendingPageAllocator(numPages * pageSize);
814 
815     testAlloc(a);
816     testAlloc(aa);
817 }
818 
819 @system @nogc nothrow unittest
820 {
821     size_t numPages = 21000;
822     enum testNum = 100;
823     enum allocPages = 10;
824     void[][testNum] buf;
825     AscendingPageAllocator a = AscendingPageAllocator(numPages * pageSize);
826 
827     for (int i = 0; i < numPages; i += testNum * allocPages)
828     {
829         foreach (j; 0 .. testNum)
830         {
831             buf[j] = a.allocate(pageSize * allocPages);
832             testrw(buf[j]);
833         }
834 
835         foreach (j; 0 .. testNum)
836         {
837             a.deallocate(buf[j]);
838         }
839     }
840 }
841 
842 @system @nogc nothrow unittest
843 {
844     size_t numPages = 21000;
845     enum testNum = 100;
846     enum allocPages = 10;
847     void[][testNum] buf;
848     shared SharedAscendingPageAllocator a = SharedAscendingPageAllocator(numPages * pageSize);
849 
850     for (int i = 0; i < numPages; i += testNum * allocPages)
851     {
852         foreach (j; 0 .. testNum)
853         {
854             buf[j] = a.allocate(pageSize * allocPages);
855             testrw(buf[j]);
856         }
857 
858         foreach (j; 0 .. testNum)
859         {
860             a.deallocate(buf[j]);
861         }
862     }
863 }
864 
865 @system @nogc nothrow unittest
866 {
867     enum numPages = 2;
868     AscendingPageAllocator a = AscendingPageAllocator(numPages * pageSize);
869     void[] b = a.allocate((numPages + 1) * pageSize);
870     assert(b is null);
871     b = a.allocate(1);
872     assert(b.length == 1);
873     assert(a.getAvailableSize() == pageSize);
874     a.deallocateAll();
875     assert(!a.data && !a.offset);
876 }
877 
878 @system @nogc nothrow unittest
879 {
880     enum numPages = 26;
881     AscendingPageAllocator a = AscendingPageAllocator(numPages * pageSize);
882     uint alignment = cast(uint) ((numPages / 2) * pageSize);
883     void[] b = a.alignedAllocate(pageSize, alignment);
884     assert(b.length == pageSize);
885     testrw(b);
886     assert(b.ptr.alignedAt(alignment));
887     a.deallocateAll();
888     assert(!a.data && !a.offset);
889 }
890 
891 @system @nogc nothrow unittest
892 {
893     enum numPages = 10;
894     AscendingPageAllocator a = AscendingPageAllocator(numPages * pageSize);
895     uint alignment = cast(uint) (2 * pageSize);
896 
897     void[] b1 = a.alignedAllocate(pageSize, alignment);
898     assert(b1.length == pageSize);
899     testrw(b1);
900     assert(b1.ptr.alignedAt(alignment));
901 
902     void[] b2 = a.alignedAllocate(pageSize, alignment);
903     assert(b2.length == pageSize);
904     testrw(b2);
905     assert(b2.ptr.alignedAt(alignment));
906 
907     void[] b3 = a.alignedAllocate(pageSize, alignment);
908     assert(b3.length == pageSize);
909     testrw(b3);
910     assert(b3.ptr.alignedAt(alignment));
911 
912     void[] b4 = a.allocate(pageSize);
913     assert(b4.length == pageSize);
914     testrw(b4);
915 
916     assert(a.deallocate(b1));
917     assert(a.deallocate(b2));
918     assert(a.deallocate(b3));
919     assert(a.deallocate(b4));
920 
921     a.deallocateAll();
922     assert(!a.data && !a.offset);
923 }
924 
925 @system unittest
926 {
927     import core.thread : ThreadGroup;
928     import std.algorithm.sorting : sort;
929     import core.internal.spinlock : SpinLock;
930 
931     enum numThreads = 100;
932     SpinLock lock = SpinLock(SpinLock.Contention.brief);
933     ulong[numThreads] ptrVals;
934     size_t count = 0;
935     shared SharedAscendingPageAllocator a = SharedAscendingPageAllocator(pageSize * numThreads);
936 
937     void fun()
938     {
939         void[] b = a.allocate(4000);
940         assert(b.length == 4000);
941 
942         assert(a.expand(b, 96));
943         assert(b.length == 4096);
944 
945         lock.lock();
946         ptrVals[count] = cast(ulong) b.ptr;
947         count++;
948         lock.unlock();
949     }
950 
951     auto tg = new ThreadGroup;
952     foreach (i; 0 .. numThreads)
953     {
954         tg.create(&fun);
955     }
956     tg.joinAll();
957 
958     ptrVals[].sort();
959     foreach (i; 0 .. numThreads - 1)
960     {
961         assert(ptrVals[i] + pageSize == ptrVals[i + 1]);
962     }
963 }
964 
965 @system unittest
966 {
967     import core.thread : ThreadGroup;
968     import std.algorithm.sorting : sort;
969     import core.internal.spinlock : SpinLock;
970 
971     SpinLock lock = SpinLock(SpinLock.Contention.brief);
972     enum numThreads = 100;
973     void[][numThreads] buf;
974     size_t count = 0;
975     shared SharedAscendingPageAllocator a = SharedAscendingPageAllocator(2 * pageSize * numThreads);
976 
977     void fun()
978     {
979         enum expand = 96;
980         void[] b = a.allocate(pageSize - expand);
981         assert(b.length == pageSize - expand);
982 
983         assert(a.expand(b, expand));
984         assert(b.length == pageSize);
985 
986         a.expand(b, pageSize);
987         assert(b.length == pageSize || b.length == pageSize * 2);
988 
989         lock.lock();
990         buf[count] = b;
991         count++;
992         lock.unlock();
993     }
994 
995     auto tg = new ThreadGroup;
996     foreach (i; 0 .. numThreads)
997     {
998         tg.create(&fun);
999     }
1000     tg.joinAll();
1001 
1002     sort!((a, b) => a.ptr < b.ptr)(buf[0 .. 100]);
1003     foreach (i; 0 .. numThreads - 1)
1004     {
1005         assert(buf[i].ptr + buf[i].length == buf[i + 1].ptr);
1006     }
1007 }