1 // Written in the D programming language.
2 /**
3 Source: $(PHOBOSSRC std/experimental/allocator/building_blocks/kernighan_ritchie.d)
4 */
5 module std.experimental.allocator.building_blocks.kernighan_ritchie;
6 import std.experimental.allocator.building_blocks.null_allocator :
7     NullAllocator;
8 
9 //debug = KRRegion;
10 debug(KRRegion) import std.stdio;
11 
12 // KRRegion
13 /**
14 `KRRegion` draws inspiration from the $(MREF_ALTTEXT region allocation
15 strategy, std,experimental,allocator,building_blocks,region) and also the
16 $(HTTP stackoverflow.com/questions/13159564/explain-this-implementation-of-malloc-from-the-kr-book,
17 famed allocator) described by Brian Kernighan and Dennis Ritchie in section 8.7
18 of the book $(HTTP amazon.com/exec/obidos/ASIN/0131103628/classicempire, "The C
19 Programming Language"), Second Edition, Prentice Hall, 1988.
20 
21 $(H4 `KRRegion` = `Region` + Kernighan-Ritchie Allocator)
22 
23 Initially, `KRRegion` starts in "region" mode: allocations are served from
24 the memory chunk in a region fashion. Thus, as long as there is enough memory
25 left, `KRRegion.allocate` has the performance profile of a region allocator.
26 Deallocation inserts (in $(BIGOH 1) time) the deallocated blocks in an
27 unstructured freelist, which is not read in region mode.
28 
29 Once the region cannot serve an `allocate` request, `KRRegion` switches
30 to "free list" mode. It sorts the list of previously deallocated blocks by
31 address and serves allocation requests off that free list. The allocation and
32 deallocation follow the pattern described by Kernighan and Ritchie.
33 
34 The recommended use of `KRRegion` is as a $(I region with deallocation). If the
35 `KRRegion` is dimensioned appropriately, it could often not enter free list
36 mode during its lifetime. Thus it is as fast as a simple region, whilst
37 offering deallocation at a small cost. When the region memory is  exhausted,
38 the previously deallocated memory is still usable, at a performance  cost. If
39 the region is not excessively large and fragmented, the linear  allocation and
40 deallocation cost may still be compensated for by the good locality
41 characteristics.
42 
43 If the chunk of memory managed is large, it may be desirable to switch
44 management to free list from the beginning. That way, memory may be used in a
45 more compact manner than region mode. To force free list mode, call $(D
46 switchToFreeList) shortly after construction or when deemed appropriate.
47 
48 The smallest size that can be allocated is two words (16 bytes on 64-bit
49 systems, 8 bytes on 32-bit systems). This is because the free list management
50 needs two words (one for the length, the other for the next pointer in the
51 singly-linked list).
52 
53 The `ParentAllocator` type parameter is the type of the allocator used to
54 allocate the memory chunk underlying the `KRRegion` object. Choosing the
55 default (`NullAllocator`) means the user is responsible for passing a buffer
56 at construction (and for deallocating it if necessary). Otherwise, `KRRegion`
57 automatically deallocates the buffer during destruction. For that reason, if
58 `ParentAllocator` is not `NullAllocator`, then `KRRegion` is not
59 copyable.
60 
61 $(H4 Implementation Details)
62 
63 In free list mode, `KRRegion` embeds a free blocks list onto the chunk of
64 memory. The free list is circular, coalesced, and sorted by address at all
65 times. Allocations and deallocations take time proportional to the number of
66 previously deallocated blocks. (In practice the cost may be lower, e.g. if
67 memory is deallocated in reverse order of allocation, all operations take
68 constant time.) Memory utilization is good (small control structure and no
69 per-allocation overhead). The disadvantages of freelist mode include proneness
70 to fragmentation, a minimum allocation size of two words, and linear worst-case
71 allocation and deallocation times.
72 
73 Similarities of `KRRegion` (in free list mode) with the
74 Kernighan-Ritchie allocator:
75 
76 $(UL
77 $(LI Free blocks have variable size and are linked in a singly-linked list.)
78 $(LI The freelist is maintained in increasing address order, which makes
79 coalescing easy.)
80 $(LI The strategy for finding the next available block is first fit.)
81 $(LI The free list is circular, with the last node pointing back to the first.)
82 $(LI Coalescing is carried during deallocation.)
83 )
84 
85 Differences from the Kernighan-Ritchie allocator:
86 
87 $(UL
88 $(LI Once the chunk is exhausted, the Kernighan-Ritchie allocator allocates
89 another chunk using operating system primitives. For better composability, $(D
90 KRRegion) just gets full (returns `null` on new allocation requests). The
91 decision to allocate more blocks is deferred to a higher-level entity. For an
92 example, see the example below using `AllocatorList` in conjunction with $(D
93 KRRegion).)
94 $(LI Allocated blocks do not hold a size prefix. This is because in D the size
95 information is available in client code at deallocation time.)
96 )
97 
98 */
99 struct KRRegion(ParentAllocator = NullAllocator)
100 {
101     import std.experimental.allocator.common : stateSize, alignedAt;
102     import std.traits : hasMember;
103     import std.typecons : Ternary;
104 
105     private static struct Node
106     {
107         import std.typecons : tuple, Tuple;
108 
109         Node* next;
110         size_t size;
111 
112         this(this) @disable;
113 
114         nothrow @nogc @trusted
115         void[] payload() inout
116         {
117             return (cast(ubyte*) &this)[0 .. size];
118         }
119 
120         nothrow @nogc @trusted
121         bool adjacent(in Node* right) const
122         {
123             assert(right);
124             auto p = payload;
125             return p.ptr < right && right < p.ptr + p.length + Node.sizeof;
126         }
127 
128         nothrow @nogc @trusted
129         bool coalesce(void* memoryEnd = null)
130         {
131             // Coalesce the last node before the memory end with any possible gap
132             if (memoryEnd
133                 && memoryEnd < payload.ptr + payload.length + Node.sizeof)
134             {
135                 size += memoryEnd - (payload.ptr + payload.length);
136                 return true;
137             }
138 
139             if (!adjacent(next)) return false;
140             size = (cast(ubyte*) next + next.size) - cast(ubyte*) &this;
141             next = next.next;
142             return true;
143         }
144 
145         nothrow @nogc @safe
146         Tuple!(void[], Node*) allocateHere(size_t bytes)
147         {
148             assert(bytes >= Node.sizeof);
149             assert(bytes % Node.alignof == 0);
150             assert(next);
151             assert(!adjacent(next));
152             if (size < bytes) return typeof(return)();
153             assert(size >= bytes);
154             immutable leftover = size - bytes;
155 
156             if (leftover >= Node.sizeof)
157             {
158                 // There's room for another node
159                 auto newNode = (() @trusted => cast(Node*) ((cast(ubyte*) &this) + bytes))();
160                 newNode.size = leftover;
161                 newNode.next = next == &this ? newNode : next;
162                 assert(next);
163                 return tuple(payload, newNode);
164             }
165 
166             // No slack space, just return next node
167             return tuple(payload, next == &this ? null : next);
168         }
169     }
170 
171     // state
172     /**
173     If `ParentAllocator` holds state, `parent` is a public member of type
174     `KRRegion`. Otherwise, `parent` is an `alias` for
175     `ParentAllocator.instance`.
176     */
177     static if (stateSize!ParentAllocator) ParentAllocator parent;
178     else alias parent = ParentAllocator.instance;
179     private void[] payload;
180     private Node* root;
181     nothrow @nogc @safe private bool regionMode() const { return bytesUsedRegionMode != size_t.max; }
182     nothrow @nogc @safe private void cancelRegionMode() { bytesUsedRegionMode = size_t.max; }
183     private size_t bytesUsedRegionMode = 0;
184 
185     auto byNodePtr()
186     {
187         static struct Range
188         {
189             Node* start, current;
190             @property bool empty() { return !current; }
191             @property Node* front() { return current; }
192             void popFront()
193             {
194                 assert(current && current.next);
195                 current = current.next;
196                 if (current == start) current = null;
197             }
198             @property Range save() { return this; }
199         }
200         import std.range : isForwardRange;
201         static assert(isForwardRange!Range);
202         return Range(root, root);
203     }
204 
205     string toString()
206     {
207         import std.format : format;
208         string s = "KRRegion@";
209         s ~= format("%s-%s(0x%s[%s] %s", &this, &this + 1,
210             payload.ptr, payload.length,
211             regionMode ? "(region)" : "(freelist)");
212 
213         Node* lastNode = null;
214         if (!regionMode)
215         {
216             foreach (node; byNodePtr)
217             {
218                 s ~= format(", %sfree(0x%s[%s])",
219                     lastNode && lastNode.adjacent(node) ? "+" : "",
220                     cast(void*) node, node.size);
221                 lastNode = node;
222             }
223         }
224         else
225         {
226             for (auto node = root; node; node = node.next)
227             {
228                 s ~= format(", %sfree(0x%s[%s])",
229                     lastNode && lastNode.adjacent(node) ? "+" : "",
230                     cast(void*) node, node.size);
231                 lastNode = node;
232             }
233         }
234 
235         s ~= ')';
236         return s;
237     }
238 
239     private void assertValid(string s)
240     {
241         assert(!regionMode);
242         if (!payload.ptr)
243         {
244             assert(!root, s);
245             return;
246         }
247         if (!root)
248         {
249             return;
250         }
251         assert(root >= payload.ptr, s);
252         assert(root < payload.ptr + payload.length, s);
253 
254         // Check that the list terminates
255         size_t n;
256         foreach (node; byNodePtr)
257         {
258             assert(node.next);
259             assert(!node.adjacent(node.next));
260             assert(n++ < payload.length / Node.sizeof, s);
261         }
262     }
263 
264     nothrow @nogc @safe
265     private Node* sortFreelist(Node* root)
266     {
267         // Find a monotonic run
268         auto last = root;
269         for (;;)
270         {
271             if (!last.next) return root;
272             if (last > last.next) break;
273             assert(last < last.next);
274             last = last.next;
275         }
276         auto tail = last.next;
277         last.next = null;
278         tail = sortFreelist(tail);
279         return merge(root, tail);
280     }
281 
282     nothrow @nogc @safe
283     private Node* merge(Node* left, Node* right)
284     {
285         assert(left != right);
286         if (!left) return right;
287         if (!right) return left;
288         if (left < right)
289         {
290             auto result = left;
291             result.next = merge(left.next, right);
292             return result;
293         }
294         auto result = right;
295         result.next = merge(left, right.next);
296         return result;
297     }
298 
299     nothrow @nogc @safe
300     private void coalesceAndMakeCircular()
301     {
302         for (auto n = root;;)
303         {
304             assert(!n.next || n < n.next);
305             if (!n.next)
306             {
307                 // Convert to circular
308                 n.next = root;
309                 break;
310             }
311             if (n.coalesce) continue; // possibly another coalesce
312             n = n.next;
313         }
314     }
315 
316     /**
317     Create a `KRRegion`. If `ParentAllocator` is not `NullAllocator`,
318     `KRRegion`'s destructor will call `parent.deallocate`.
319 
320     Params:
321     b = Block of memory to serve as support for the allocator. Memory must be
322     larger than two words and word-aligned.
323     n = Capacity desired. This constructor is defined only if $(D
324     ParentAllocator) is not `NullAllocator`.
325     */
326     this(ubyte[] b)
327     {
328         if (b.length < Node.sizeof)
329         {
330             // Init as empty
331             assert(root is null);
332             assert(payload is null);
333             return;
334         }
335         assert(b.length >= Node.sizeof);
336         assert(b.ptr.alignedAt(Node.alignof));
337         assert(b.length >= 2 * Node.sizeof);
338         payload = b;
339         root = cast(Node*) b.ptr;
340         // Initialize the free list with all list
341         assert(regionMode);
342         root.next = null;
343         root.size = b.length;
344         debug(KRRegion) writefln("KRRegion@%s: init with %s[%s]", &this,
345             b.ptr, b.length);
346     }
347 
348     /// Ditto
349     static if (!is(ParentAllocator == NullAllocator) && !stateSize!ParentAllocator)
350     this(size_t n)
351     {
352         assert(n > Node.sizeof);
353         this(cast(ubyte[])(parent.allocate(n)));
354     }
355 
356     /// Ditto
357     static if (!is(ParentAllocator == NullAllocator) && stateSize!ParentAllocator)
358     this(ParentAllocator parent, size_t n)
359     {
360         assert(n > Node.sizeof);
361         this.parent = parent;
362         this(cast(ubyte[])(parent.allocate(n)));
363     }
364 
365     /// Ditto
366     static if (!is(ParentAllocator == NullAllocator)
367         && hasMember!(ParentAllocator, "deallocate"))
368     ~this()
369     {
370         parent.deallocate(payload);
371     }
372 
373     /**
374     Forces free list mode. If already in free list mode, does nothing.
375     Otherwise, sorts the free list accumulated so far and switches strategy for
376     future allocations to KR style.
377     */
378     nothrow @nogc @safe
379     void switchToFreeList()
380     {
381         if (!regionMode) return;
382         cancelRegionMode;
383         if (!root) return;
384         root = sortFreelist(root);
385         coalesceAndMakeCircular;
386     }
387 
388     /*
389     Noncopyable
390     */
391     @disable this(this);
392 
393     /**
394     Word-level alignment.
395     */
396     enum alignment = Node.alignof;
397 
398     /**
399     Allocates `n` bytes. Allocation searches the list of available blocks
400     until a free block with `n` or more bytes is found (first fit strategy).
401     The block is split (if larger) and returned.
402 
403     Params: n = number of bytes to _allocate
404 
405     Returns: A word-aligned buffer of `n` bytes, or `null`.
406     */
407     nothrow @nogc @safe
408     void[] allocate(size_t n)
409     {
410         if (!n || !root) return null;
411         const actualBytes = goodAllocSize(n);
412 
413         // Try the region first
414         if (regionMode)
415         {
416             // Only look at the head of the freelist
417             if (root.size >= actualBytes)
418             {
419                 // Enough room for allocation
420                 bytesUsedRegionMode += actualBytes;
421                 void* result = root;
422                 immutable balance = root.size - actualBytes;
423                 if (balance >= Node.sizeof)
424                 {
425                     auto newRoot = (() @trusted => cast(Node*) ((cast(ubyte*) result) + actualBytes))();
426                     newRoot.next = root.next;
427                     newRoot.size = balance;
428                     root = newRoot;
429                 }
430                 else
431                 {
432                     root = null;
433                     switchToFreeList;
434                 }
435                 return (() @trusted => result[0 .. n])();
436             }
437 
438             // Not enough memory, switch to freelist mode and fall through
439             switchToFreeList;
440         }
441 
442         // Try to allocate from next after the iterating node
443         for (auto pnode = root;;)
444         {
445             assert(!pnode.adjacent(pnode.next));
446             auto k = pnode.next.allocateHere(actualBytes);
447             if (k[0] !is null)
448             {
449                 // awes
450                 assert(k[0].length >= n);
451                 if (root == pnode.next) root = k[1];
452                 pnode.next = k[1];
453                 return k[0][0 .. n];
454             }
455 
456             pnode = pnode.next;
457             if (pnode == root) break;
458         }
459         return null;
460     }
461 
462     /**
463     Deallocates `b`, which is assumed to have been previously allocated with
464     this allocator. Deallocation performs a linear search in the free list to
465     preserve its sorting order. It follows that blocks with higher addresses in
466     allocators with many free blocks are slower to deallocate.
467 
468     Params: b = block to be deallocated
469     */
470     nothrow @nogc
471     bool deallocate(void[] b)
472     {
473         debug(KRRegion) writefln("KRRegion@%s: deallocate(%s[%s])", &this,
474             b.ptr, b.length);
475         if (!b.ptr) return true;
476         assert(owns(b) == Ternary.yes);
477         assert(b.ptr.alignedAt(Node.alignof));
478 
479         // Insert back in the freelist, keeping it sorted by address. Do not
480         // coalesce at this time. Instead, do it lazily during allocation.
481         auto n = cast(Node*) b.ptr;
482         n.size = goodAllocSize(b.length);
483         auto memoryEnd = payload.ptr + payload.length;
484 
485         if (regionMode)
486         {
487             assert(root);
488             // Insert right after root
489             bytesUsedRegionMode -= n.size;
490             n.next = root.next;
491             root.next = n;
492             return true;
493         }
494 
495         if (!root)
496         {
497             // What a sight for sore eyes
498             root = n;
499             root.next = root;
500 
501             // If the first block freed is the last one allocated,
502             // maybe there's a gap after it.
503             root.coalesce(memoryEnd);
504             return true;
505         }
506 
507         version (assert) foreach (test; byNodePtr)
508         {
509             assert(test != n);
510         }
511         // Linear search
512         auto pnode = root;
513         do
514         {
515             assert(pnode && pnode.next);
516             assert(pnode != n);
517             assert(pnode.next != n);
518 
519             if (pnode < pnode.next)
520             {
521                 if (pnode > n || n > pnode.next) continue;
522                 // Insert in between pnode and pnode.next
523                 n.next = pnode.next;
524                 pnode.next = n;
525                 n.coalesce;
526                 pnode.coalesce;
527                 root = pnode;
528                 return true;
529             }
530             else if (pnode < n)
531             {
532                 // Insert at the end of the list
533                 // Add any possible gap at the end of n to the length of n
534                 n.next = pnode.next;
535                 pnode.next = n;
536                 n.coalesce(memoryEnd);
537                 pnode.coalesce;
538                 root = pnode;
539                 return true;
540             }
541             else if (n < pnode.next)
542             {
543                 // Insert at the front of the list
544                 n.next = pnode.next;
545                 pnode.next = n;
546                 n.coalesce;
547                 root = n;
548                 return true;
549             }
550         }
551         while ((pnode = pnode.next) != root);
552         assert(0, "Wrong parameter passed to deallocate");
553     }
554 
555     /**
556     Allocates all memory available to this allocator. If the allocator is empty,
557     returns the entire available block of memory. Otherwise, it still performs
558     a best-effort allocation: if there is no fragmentation (e.g. `allocate`
559     has been used but not `deallocate`), allocates and returns the only
560     available block of memory.
561 
562     The operation takes time proportional to the number of adjacent free blocks
563     at the front of the free list. These blocks get coalesced, whether
564     `allocateAll` succeeds or fails due to fragmentation.
565     */
566     nothrow @nogc @safe
567     void[] allocateAll()
568     {
569         if (regionMode) switchToFreeList;
570         if (root && root.next == root)
571             return allocate(root.size);
572         return null;
573     }
574 
575     ///
576     @system unittest
577     {
578         import std.experimental.allocator.gc_allocator : GCAllocator;
579         auto alloc = KRRegion!GCAllocator(1024 * 64);
580         const b1 = alloc.allocate(2048);
581         assert(b1.length == 2048);
582         const b2 = alloc.allocateAll;
583         assert(b2.length == 1024 * 62);
584     }
585 
586     /**
587     Deallocates all memory currently allocated, making the allocator ready for
588     other allocations. This is a $(BIGOH 1) operation.
589     */
590     pure nothrow @nogc
591     bool deallocateAll()
592     {
593         debug(KRRegion) assertValid("deallocateAll");
594         debug(KRRegion) scope(exit) assertValid("deallocateAll");
595         root = cast(Node*) payload.ptr;
596 
597         // Reset to regionMode
598         bytesUsedRegionMode = 0;
599         if (root)
600         {
601             root.next = null;
602             root.size = payload.length;
603         }
604         return true;
605     }
606 
607     /**
608     Checks whether the allocator is responsible for the allocation of `b`.
609     It does a simple $(BIGOH 1) range check. `b` should be a buffer either
610     allocated with `this` or obtained through other means.
611     */
612     pure nothrow @trusted @nogc
613     Ternary owns(void[] b)
614     {
615         debug(KRRegion) assertValid("owns");
616         debug(KRRegion) scope(exit) assertValid("owns");
617         return Ternary(b && payload && (&b[0] >= &payload[0])
618                        && (&b[0] < &payload[0] + payload.length));
619     }
620 
621     /**
622     Adjusts `n` to a size suitable for allocation (two words or larger,
623     word-aligned).
624     */
625     pure nothrow @safe @nogc
626     static size_t goodAllocSize(size_t n)
627     {
628         import std.experimental.allocator.common : roundUpToMultipleOf;
629         return n <= Node.sizeof
630             ? Node.sizeof : n.roundUpToMultipleOf(alignment);
631     }
632 
633     /**
634     Returns: `Ternary.yes` if the allocator is empty, `Ternary.no` otherwise.
635     Never returns `Ternary.unknown`.
636     */
637     pure nothrow @safe @nogc
638     Ternary empty()
639     {
640         if (regionMode)
641             return Ternary(bytesUsedRegionMode == 0);
642 
643         return Ternary(root && root.size == payload.length);
644     }
645 }
646 
647 /**
648 `KRRegion` is preferable to `Region` as a front for a general-purpose
649 allocator if `deallocate` is needed, yet the actual deallocation traffic is
650 relatively low. The example below shows a `KRRegion` using stack storage
651 fronting the GC allocator.
652 */
653 @system unittest
654 {
655     import std.experimental.allocator.building_blocks.fallback_allocator
656         : fallbackAllocator;
657     import std.experimental.allocator.gc_allocator : GCAllocator;
658     import std.typecons : Ternary;
659     // KRRegion fronting a general-purpose allocator
660     align(KRRegion!().alignment) ubyte[1024 * 128] buf;
661     auto alloc = fallbackAllocator(KRRegion!()(buf), GCAllocator.instance);
662     auto b = alloc.allocate(100);
663     assert(b.length == 100);
664     assert((() pure nothrow @safe @nogc => alloc.primary.owns(b))() == Ternary.yes);
665 }
666 
667 /**
668 The code below defines a scalable allocator consisting of 1 MB (or larger)
669 blocks fetched from the garbage-collected heap. Each block is organized as a
670 KR-style heap. More blocks are allocated and freed on a need basis.
671 
672 This is the closest example to the allocator introduced in the K$(AMP)R book.
673 It should perform slightly better because instead of searching through one
674 large free list, it searches through several shorter lists in LRU order. Also,
675 it actually returns memory to the operating system when possible.
676 */
677 @system unittest
678 {
679     import std.algorithm.comparison : max;
680     import std.experimental.allocator.building_blocks.allocator_list
681         : AllocatorList;
682     import std.experimental.allocator.mmap_allocator : MmapAllocator;
683     AllocatorList!(n => KRRegion!MmapAllocator(max(n * 16, 1024 * 1024))) alloc;
684 }
685 
686 @system unittest
687 {
688     import std.algorithm.comparison : max;
689     import std.experimental.allocator.building_blocks.allocator_list
690         : AllocatorList;
691     import std.experimental.allocator.mallocator : Mallocator;
692     import std.typecons : Ternary;
693     /*
694     Create a scalable allocator consisting of 1 MB (or larger) blocks fetched
695     from the garbage-collected heap. Each block is organized as a KR-style
696     heap. More blocks are allocated and freed on a need basis.
697     */
698     AllocatorList!(n => KRRegion!Mallocator(max(n * 16, 1024 * 1024)),
699         NullAllocator) alloc;
700     void[][50] array;
701     foreach (i; 0 .. array.length)
702     {
703         auto length = i * 10_000 + 1;
704         array[i] = alloc.allocate(length);
705         assert(array[i].ptr);
706         assert(array[i].length == length);
707     }
708     import std.random : randomShuffle;
709     randomShuffle(array[]);
710     foreach (i; 0 .. array.length)
711     {
712         assert(array[i].ptr);
713         assert((() pure nothrow @safe @nogc => alloc.owns(array[i]))() == Ternary.yes);
714         () nothrow @nogc { alloc.deallocate(array[i]); }();
715     }
716 }
717 
718 @system unittest
719 {
720     import std.algorithm.comparison : max;
721     import std.experimental.allocator.building_blocks.allocator_list
722         : AllocatorList;
723     import std.experimental.allocator.mmap_allocator : MmapAllocator;
724     import std.typecons : Ternary;
725     /*
726     Create a scalable allocator consisting of 1 MB (or larger) blocks fetched
727     from the garbage-collected heap. Each block is organized as a KR-style
728     heap. More blocks are allocated and freed on a need basis.
729     */
730     AllocatorList!((n) {
731         auto result = KRRegion!MmapAllocator(max(n * 2, 1024 * 1024));
732         return result;
733     }) alloc;
734     void[][99] array;
735     foreach (i; 0 .. array.length)
736     {
737         auto length = i * 10_000 + 1;
738         array[i] = alloc.allocate(length);
739         assert(array[i].ptr);
740         foreach (j; 0 .. i)
741         {
742             assert(array[i].ptr != array[j].ptr);
743         }
744         assert(array[i].length == length);
745     }
746     import std.random : randomShuffle;
747     randomShuffle(array[]);
748     foreach (i; 0 .. array.length)
749     {
750         assert((() pure nothrow @safe @nogc => alloc.owns(array[i]))() == Ternary.yes);
751         () nothrow @nogc { alloc.deallocate(array[i]); }();
752     }
753 }
754 
755 version (StdUnittest)
756 @system unittest
757 {
758     import std.algorithm.comparison : max;
759     import std.experimental.allocator.building_blocks.allocator_list
760         : AllocatorList;
761     import std.experimental.allocator.common : testAllocator;
762     import std.experimental.allocator.gc_allocator : GCAllocator;
763     testAllocator!(() => AllocatorList!(
764         n => KRRegion!GCAllocator(max(n * 16, 1024 * 1024)))());
765 }
766 
767 @system unittest
768 {
769     import std.experimental.allocator.gc_allocator : GCAllocator;
770 
771     auto alloc = KRRegion!GCAllocator(1024 * 1024);
772 
773     void[][] array;
774     foreach (i; 1 .. 4)
775     {
776         array ~= alloc.allocate(i);
777         assert(array[$ - 1].length == i);
778     }
779     () nothrow @nogc { alloc.deallocate(array[1]); }();
780     () nothrow @nogc { alloc.deallocate(array[0]); }();
781     () nothrow @nogc { alloc.deallocate(array[2]); }();
782     assert(alloc.allocateAll().length == 1024 * 1024);
783 }
784 
785 @system unittest
786 {
787     import std.experimental.allocator.gc_allocator : GCAllocator;
788     import std.typecons : Ternary;
789     auto alloc = KRRegion!()(
790                     cast(ubyte[])(GCAllocator.instance.allocate(1024 * 1024)));
791     const store = alloc.allocate(KRRegion!().sizeof);
792     auto p = cast(KRRegion!()* ) store.ptr;
793     import core.lifetime : emplace;
794     import core.stdc.string : memcpy;
795     import std.conv : text;
796 
797     memcpy(p, &alloc, alloc.sizeof);
798     emplace(&alloc);
799 
800     void[][100] array;
801     foreach (i; 0 .. array.length)
802     {
803         auto length = 100 * i + 1;
804         array[i] = p.allocate(length);
805         assert(array[i].length == length, text(array[i].length));
806         assert((() pure nothrow @safe @nogc => p.owns(array[i]))() == Ternary.yes);
807     }
808     import std.random : randomShuffle;
809     randomShuffle(array[]);
810     foreach (i; 0 .. array.length)
811     {
812         assert((() pure nothrow @safe @nogc => p.owns(array[i]))() == Ternary.yes);
813         () nothrow @nogc { p.deallocate(array[i]); }();
814     }
815     auto b = p.allocateAll();
816     assert(b.length == 1024 * 1024 - KRRegion!().sizeof, text(b.length));
817 }
818 
819 @system unittest
820 {
821     import std.typecons : Ternary;
822     import std.experimental.allocator.gc_allocator : GCAllocator;
823     auto alloc = KRRegion!()(
824                     cast(ubyte[])(GCAllocator.instance.allocate(1024 * 1024)));
825     auto p = alloc.allocateAll();
826     assert(p.length == 1024 * 1024);
827     assert((() nothrow @nogc => alloc.deallocateAll())());
828     assert(alloc.empty() == Ternary.yes);
829     p = alloc.allocateAll();
830     assert(p.length == 1024 * 1024);
831 }
832 
833 @system unittest
834 {
835     import std.random : randomCover;
836     import std.typecons : Ternary;
837 
838     // Both sequences must work on either system
839 
840     // A sequence of allocs which generates the error described in https://issues.dlang.org/show_bug.cgi?id=16564
841     // that is a gap at the end of buf from the perspective of the allocator
842 
843     // for 64 bit systems (leftover balance = 8 bytes < 16)
844     int[] sizes64 = [18904, 2008, 74904, 224, 111904, 1904, 52288, 8];
845 
846     // for 32 bit systems (leftover balance < 8)
847     int[] sizes32 = [81412, 107068, 49892, 23768];
848 
849 
850     void test(int[] sizes)
851     {
852         align(size_t.sizeof) ubyte[256 * 1024] buf;
853         auto a = KRRegion!()(buf);
854 
855         void[][] bufs;
856 
857         foreach (size; sizes)
858         {
859             bufs ~= a.allocate(size);
860         }
861 
862         foreach (b; bufs.randomCover)
863         {
864             () nothrow @nogc { a.deallocate(b); }();
865         }
866 
867         assert((() pure nothrow @safe @nogc => a.empty)() == Ternary.yes);
868     }
869 
870     test(sizes64);
871     test(sizes32);
872 }
873 
874 @system unittest
875 {
876     import std.typecons : Ternary;
877 
878     // For 64 bits, we allocate in multiples of 8, but the minimum alloc size is 16.
879     // This can create gaps.
880     // This test is an example of such a case. The gap is formed between the block
881     // allocated for the second value in sizes and the third. There is also a gap
882     // at the very end. (total lost 2 * word)
883 
884     int[] sizes64 = [2008, 18904, 74904, 224, 111904, 1904, 52288, 8];
885     int[] sizes32 = [81412, 107068, 49892, 23768];
886 
887     int word64 = 8;
888     int word32 = 4;
889 
890     void test(int[] sizes, int word)
891     {
892         align(size_t.sizeof) ubyte[256 * 1024] buf;
893         auto a = KRRegion!()(buf);
894 
895         void[][] bufs;
896 
897         foreach (size; sizes)
898         {
899             bufs ~= a.allocate(size);
900         }
901 
902         () nothrow @nogc { a.deallocate(bufs[1]); }();
903         bufs ~= a.allocate(sizes[1] - word);
904 
905         () nothrow @nogc { a.deallocate(bufs[0]); }();
906         foreach (i; 2 .. bufs.length)
907         {
908             () nothrow @nogc { a.deallocate(bufs[i]); }();
909         }
910 
911         assert((() pure nothrow @safe @nogc => a.empty)() == Ternary.yes);
912     }
913 
914     test(sizes64, word64);
915     test(sizes32, word32);
916 }
917 
918 @system unittest
919 {
920     import std.experimental.allocator.gc_allocator : GCAllocator;
921 
922     auto a = KRRegion!GCAllocator(1024 * 1024);
923     assert((() pure nothrow @safe @nogc => a.goodAllocSize(1))() == typeof(*a.root).sizeof);
924 }
925 
926 @system unittest
927 {   import std.typecons : Ternary;
928 
929     align(KRRegion!().alignment) ubyte[1024] b;
930     auto alloc = KRRegion!()(b);
931 
932     auto k = alloc.allocate(128);
933     assert(k.length == 128);
934     assert(alloc.empty == Ternary.no);
935     assert(alloc.deallocate(k));
936     assert(alloc.empty == Ternary.yes);
937 
938     k = alloc.allocate(512);
939     assert(k.length == 512);
940     assert(alloc.empty == Ternary.no);
941     assert(alloc.deallocate(k));
942     assert(alloc.empty == Ternary.yes);
943 
944     k = alloc.allocate(1024);
945     assert(k.length == 1024);
946     assert(alloc.empty == Ternary.no);
947     assert(alloc.deallocate(k));
948     assert(alloc.empty == Ternary.yes);
949 }