1 // Written in the D programming language. 2 /** 3 Source: $(PHOBOSSRC std/experimental/allocator/building_blocks/allocator_list.d) 4 */ 5 module std.experimental.allocator.building_blocks.allocator_list; 6 7 import core.memory : pageSize; 8 9 import std.experimental.allocator.building_blocks.null_allocator; 10 import std.experimental.allocator.common; 11 import std.experimental.allocator.gc_allocator; 12 13 // Turn this on for debugging 14 // debug = allocator_list; 15 16 /** 17 18 Given an $(LINK2 https://en.wikipedia.org/wiki/Factory_(object-oriented_programming), 19 object factory) of type `Factory` or a factory function 20 `factoryFunction`, and optionally also `BookkeepingAllocator` as a supplemental 21 allocator for bookkeeping, `AllocatorList` creates an allocator that lazily 22 creates as many allocators are needed for satisfying client allocation requests. 23 24 An embedded list builds a most-recently-used strategy: the most recent 25 allocators used in calls to either `allocate`, `owns` (successful calls 26 only), or `deallocate` are tried for new allocations in order of their most 27 recent use. Thus, although core operations take in theory $(BIGOH k) time for 28 `k` allocators in current use, in many workloads the factor is sublinear. 29 Details of the actual strategy may change in future releases. 30 31 `AllocatorList` is primarily intended for coarse-grained handling of 32 allocators, i.e. the number of allocators in the list is expected to be 33 relatively small compared to the number of allocations handled by each 34 allocator. However, the per-allocator overhead is small so using 35 `AllocatorList` with a large number of allocators should be satisfactory as long 36 as the most-recently-used strategy is fast enough for the application. 37 38 `AllocatorList` makes an effort to return allocated memory back when no 39 longer used. It does so by destroying empty allocators. However, in order to 40 avoid thrashing (excessive creation/destruction of allocators under certain use 41 patterns), it keeps unused allocators for a while. 42 43 Params: 44 factoryFunction = A function or template function (including function literals). 45 New allocators are created by calling `factoryFunction(n)` with strictly 46 positive numbers `n`. Delegates that capture their enviroment are not created 47 amid concerns regarding garbage creation for the environment. When the factory 48 needs state, a `Factory` object should be used. 49 50 BookkeepingAllocator = Allocator used for storing bookkeeping data. The size of 51 bookkeeping data is proportional to the number of allocators. If $(D 52 BookkeepingAllocator) is `NullAllocator`, then `AllocatorList` is 53 "ouroboros-style", i.e. it keeps the bookkeeping data in memory obtained from 54 the allocators themselves. Note that for ouroboros-style management, the size 55 `n` passed to `make` will be occasionally different from the size 56 requested by client code. 57 58 Factory = Type of a factory object that returns new allocators on a need 59 basis. For an object `sweatshop` of type `Factory`, `sweatshop(n)` should 60 return an allocator able to allocate at least `n` bytes (i.e. `Factory` must 61 define `opCall(size_t)` to return an allocator object). Usually the capacity of 62 allocators created should be much larger than `n` such that an allocator can 63 be used for many subsequent allocations. `n` is passed only to ensure the 64 minimum necessary for the next allocation. The factory object is allowed to hold 65 state, which will be stored inside `AllocatorList` as a direct `public` member 66 called `factory`. 67 68 */ 69 struct AllocatorList(Factory, BookkeepingAllocator = GCAllocator) 70 { 71 import core.lifetime : emplace; 72 import std.experimental.allocator.building_blocks.stats_collector 73 : StatsCollector, Options; 74 import std.traits : hasMember; 75 import std.typecons : Ternary; 76 77 private enum ouroboros = is(BookkeepingAllocator == NullAllocator); 78 79 /** 80 Alias for `typeof(Factory()(1))`, i.e. the type of the individual 81 allocators. 82 */ 83 alias Allocator = typeof(Factory.init(1)); 84 // Allocator used internally 85 private alias SAllocator = StatsCollector!(Allocator, Options.bytesUsed); 86 87 private static struct Node 88 { 89 // Allocator in this node 90 SAllocator a; 91 Node* next; 92 93 @disable this(this); 94 95 // Is this node unused? 96 void setUnused() { next = &this; } 97 bool unused() const { return next is &this; } 98 99 // Just forward everything to the allocator 100 alias a this; 101 } 102 103 /** 104 If `BookkeepingAllocator` is not `NullAllocator`, `bkalloc` is 105 defined and accessible. 106 */ 107 108 // State is stored in an array, but it has a list threaded through it by 109 // means of "nextIdx". 110 111 // state 112 static if (!ouroboros) 113 { 114 static if (stateSize!BookkeepingAllocator) BookkeepingAllocator bkalloc; 115 else alias bkalloc = BookkeepingAllocator.instance; 116 } 117 static if (stateSize!Factory) 118 { 119 Factory factory; 120 } 121 private Node[] allocators; 122 private Node* root; 123 124 static if (stateSize!Factory) 125 { 126 private auto make(size_t n) { return factory(n); } 127 } 128 else 129 { 130 private auto make(size_t n) { Factory f; return f(n); } 131 } 132 133 /** 134 Constructs an `AllocatorList` given a factory object. This constructor is 135 defined only if `Factory` has state. 136 */ 137 static if (stateSize!Factory) 138 this(ref Factory plant) 139 { 140 factory = plant; 141 } 142 /// Ditto 143 static if (stateSize!Factory) 144 this(Factory plant) 145 { 146 factory = plant; 147 } 148 149 static if (hasMember!(Allocator, "deallocateAll") 150 && hasMember!(Allocator, "owns")) 151 ~this() 152 { 153 deallocateAll; 154 } 155 156 /** 157 The alignment offered. 158 */ 159 enum uint alignment = Allocator.alignment; 160 161 /** 162 Allocate a block of size `s`. First tries to allocate from the existing 163 list of already-created allocators. If neither can satisfy the request, 164 creates a new allocator by calling `make(s)` and delegates the request 165 to it. However, if the allocation fresh off a newly created allocator 166 fails, subsequent calls to `allocate` will not cause more calls to $(D 167 make). 168 */ 169 void[] allocate(size_t s) 170 { 171 for (auto p = &root, n = *p; n; p = &n.next, n = *p) 172 { 173 auto result = n.allocate(s); 174 if (result.length != s) continue; 175 // Bring to front if not already 176 if (root != n) 177 { 178 *p = n.next; 179 n.next = root; 180 root = n; 181 } 182 return result; 183 } 184 185 // Add a new allocator 186 if (auto a = addAllocator(s)) 187 { 188 auto result = a.allocate(s); 189 assert(owns(result) == Ternary.yes || !result.ptr); 190 return result; 191 } 192 return null; 193 } 194 195 static if (hasMember!(Allocator, "allocateZeroed")) 196 package(std) void[] allocateZeroed()(size_t s) 197 { 198 for (auto p = &root, n = *p; n; p = &n.next, n = *p) 199 { 200 auto result = n.allocateZeroed(s); 201 if (result.length != s) continue; 202 // Bring to front if not already 203 if (root != n) 204 { 205 *p = n.next; 206 n.next = root; 207 root = n; 208 } 209 return result; 210 } 211 212 // Add a new allocator 213 if (auto a = addAllocator(s)) 214 { 215 auto result = a.allocateZeroed(s); 216 assert(owns(result) == Ternary.yes || !result.ptr); 217 return result; 218 } 219 return null; 220 } 221 222 /** 223 Allocate a block of size `s` with alignment `a`. First tries to allocate 224 from the existing list of already-created allocators. If neither can 225 satisfy the request, creates a new allocator by calling `make(s + a - 1)` 226 and delegates the request to it. However, if the allocation fresh off a 227 newly created allocator fails, subsequent calls to `alignedAllocate` 228 will not cause more calls to `make`. 229 */ 230 static if (hasMember!(Allocator, "alignedAllocate")) 231 void[] alignedAllocate(size_t s, uint theAlignment) 232 { 233 import std.algorithm.comparison : max; 234 import core.checkedint : addu; 235 236 if (theAlignment == 0 || s == 0) 237 return null; 238 239 for (auto p = &root, n = *p; n; p = &n.next, n = *p) 240 { 241 auto result = n.alignedAllocate(s, theAlignment); 242 if (result.length != s) continue; 243 // Bring to front if not already 244 if (root != n) 245 { 246 *p = n.next; 247 n.next = root; 248 root = n; 249 } 250 return result; 251 } 252 253 bool overflow = false; 254 size_t maxSize = addu(s - 1, cast(size_t) theAlignment, overflow); 255 assert(!overflow, "Requested size is too large"); 256 if (overflow) 257 return null; 258 259 // Add a new allocator 260 if (auto a = addAllocator(maxSize)) 261 { 262 auto result = a.alignedAllocate(s, theAlignment); 263 assert(owns(result) == Ternary.yes || !result.ptr); 264 return result; 265 } 266 return null; 267 } 268 269 private void moveAllocators(void[] newPlace) 270 { 271 assert(newPlace.ptr.alignedAt(Node.alignof)); 272 assert(newPlace.length % Node.sizeof == 0); 273 auto newAllocators = cast(Node[]) newPlace; 274 assert(allocators.length <= newAllocators.length); 275 276 // Move allocators 277 foreach (i, ref e; allocators) 278 { 279 if (e.unused) 280 { 281 newAllocators[i].setUnused; 282 continue; 283 } 284 import core.stdc.string : memcpy; 285 memcpy(&newAllocators[i].a, &e.a, e.a.sizeof); 286 if (e.next) 287 { 288 newAllocators[i].next = newAllocators.ptr 289 + (e.next - allocators.ptr); 290 } 291 else 292 { 293 newAllocators[i].next = null; 294 } 295 } 296 297 // Mark the unused portion as unused 298 foreach (i; allocators.length .. newAllocators.length) 299 { 300 newAllocators[i].setUnused; 301 } 302 auto toFree = allocators; 303 304 // Change state 305 root = newAllocators.ptr + (root - allocators.ptr); 306 allocators = newAllocators; 307 308 // Free the olden buffer 309 static if (ouroboros) 310 { 311 static if (hasMember!(Allocator, "deallocate") 312 && hasMember!(Allocator, "owns")) 313 deallocate(toFree); 314 } 315 else 316 { 317 bkalloc.deallocate(toFree); 318 } 319 } 320 321 static if (ouroboros) 322 private Node* addAllocator(size_t atLeastBytes) 323 { 324 void[] t = allocators; 325 static if (hasMember!(Allocator, "expand") 326 && hasMember!(Allocator, "owns")) 327 { 328 immutable bool expanded = t && this.expand(t, Node.sizeof); 329 } 330 else 331 { 332 enum expanded = false; 333 } 334 if (expanded) 335 { 336 import core.stdc.string : memcpy; 337 assert(t.length % Node.sizeof == 0); 338 assert(t.ptr.alignedAt(Node.alignof)); 339 allocators = cast(Node[]) t; 340 allocators[$ - 1].setUnused; 341 auto newAlloc = SAllocator(make(atLeastBytes)); 342 memcpy(&allocators[$ - 1].a, &newAlloc, newAlloc.sizeof); 343 emplace(&newAlloc); 344 } 345 else 346 { 347 immutable toAlloc = (allocators.length + 1) * Node.sizeof 348 + atLeastBytes + 128; 349 auto newAlloc = SAllocator(make(toAlloc)); 350 auto newPlace = newAlloc.allocate( 351 (allocators.length + 1) * Node.sizeof); 352 if (!newPlace) return null; 353 moveAllocators(newPlace); 354 import core.stdc.string : memcpy; 355 memcpy(&allocators[$ - 1].a, &newAlloc, newAlloc.sizeof); 356 emplace(&newAlloc); 357 assert(allocators[$ - 1].owns(allocators) == Ternary.yes); 358 } 359 // Insert as new root 360 if (root != &allocators[$ - 1]) 361 { 362 allocators[$ - 1].next = root; 363 root = &allocators[$ - 1]; 364 } 365 else 366 { 367 // This is the first one 368 root.next = null; 369 } 370 assert(!root.unused); 371 return root; 372 } 373 374 static if (!ouroboros) 375 private Node* addAllocator(size_t atLeastBytes) 376 { 377 void[] t = allocators; 378 static if (hasMember!(BookkeepingAllocator, "expand")) 379 immutable bool expanded = bkalloc.expand(t, Node.sizeof); 380 else 381 immutable bool expanded = false; 382 if (expanded) 383 { 384 assert(t.length % Node.sizeof == 0); 385 assert(t.ptr.alignedAt(Node.alignof)); 386 allocators = cast(Node[]) t; 387 allocators[$ - 1].setUnused; 388 } 389 else 390 { 391 // Could not expand, create a new block 392 t = bkalloc.allocate((allocators.length + 1) * Node.sizeof); 393 assert(t.length % Node.sizeof == 0); 394 if (!t.ptr) return null; 395 moveAllocators(t); 396 } 397 assert(allocators[$ - 1].unused); 398 auto newAlloc = SAllocator(make(atLeastBytes)); 399 import core.stdc.string : memcpy; 400 memcpy(&allocators[$ - 1].a, &newAlloc, newAlloc.sizeof); 401 emplace(&newAlloc); 402 // Creation succeeded, insert as root 403 if (allocators.length == 1) 404 allocators[$ - 1].next = null; 405 else 406 allocators[$ - 1].next = root; 407 assert(allocators[$ - 1].a.bytesUsed == 0); 408 root = &allocators[$ - 1]; 409 return root; 410 } 411 412 /** 413 Defined only if `Allocator` defines `owns`. Tries each allocator in 414 turn, in most-recently-used order. If the owner is found, it is moved to 415 the front of the list as a side effect under the assumption it will be used 416 soon. 417 418 Returns: `Ternary.yes` if one allocator was found to return `Ternary.yes`, 419 `Ternary.no` if all component allocators returned `Ternary.no`, and 420 `Ternary.unknown` if no allocator returned `Ternary.yes` and at least one 421 returned `Ternary.unknown`. 422 */ 423 static if (hasMember!(Allocator, "owns")) 424 Ternary owns(void[] b) 425 { 426 auto result = Ternary.no; 427 for (auto p = &root, n = *p; n; p = &n.next, n = *p) 428 { 429 immutable t = n.owns(b); 430 if (t != Ternary.yes) 431 { 432 if (t == Ternary.unknown) result = t; 433 continue; 434 } 435 // Move the owner to front, speculating it'll be used 436 if (n != root) 437 { 438 *p = n.next; 439 n.next = root; 440 root = n; 441 } 442 return Ternary.yes; 443 } 444 return result; 445 } 446 447 /** 448 Defined only if `Allocator.expand` is defined. Finds the owner of `b` 449 and calls `expand` for it. The owner is not brought to the head of the 450 list. 451 */ 452 static if (hasMember!(Allocator, "expand") 453 && hasMember!(Allocator, "owns")) 454 bool expand(ref void[] b, size_t delta) 455 { 456 if (!b) return delta == 0; 457 for (auto p = &root, n = *p; n; p = &n.next, n = *p) 458 { 459 if (n.owns(b) == Ternary.yes) return n.expand(b, delta); 460 } 461 return false; 462 } 463 464 /** 465 Defined only if `Allocator.reallocate` is defined. Finds the owner of 466 `b` and calls `reallocate` for it. If that fails, calls the global 467 `reallocate`, which allocates a new block and moves memory. 468 */ 469 static if (hasMember!(Allocator, "reallocate")) 470 bool reallocate(ref void[] b, size_t s) 471 { 472 // First attempt to reallocate within the existing node 473 if (!b.ptr) 474 { 475 b = allocate(s); 476 return b.length == s; 477 } 478 for (auto p = &root, n = *p; n; p = &n.next, n = *p) 479 { 480 if (n.owns(b) == Ternary.yes) return n.reallocate(b, s); 481 } 482 // Failed, but we may find new memory in a new node. 483 return .reallocate(this, b, s); 484 } 485 486 /** 487 Defined if `Allocator.deallocate` and `Allocator.owns` are defined. 488 */ 489 static if (hasMember!(Allocator, "deallocate") 490 && hasMember!(Allocator, "owns")) 491 bool deallocate(void[] b) 492 { 493 if (!b.ptr) return true; 494 assert(allocators.length); 495 assert(owns(b) == Ternary.yes); 496 bool result; 497 for (auto p = &root, n = *p; ; p = &n.next, n = *p) 498 { 499 assert(n); 500 if (n.owns(b) != Ternary.yes) continue; 501 result = n.deallocate(b); 502 // Bring to front 503 if (n != root) 504 { 505 *p = n.next; 506 n.next = root; 507 root = n; 508 } 509 if (n.empty != Ternary.yes) return result; 510 break; 511 } 512 // Hmmm... should we return this allocator back to the wild? Let's 513 // decide if there are TWO empty allocators we can release ONE. This 514 // is to avoid thrashing. 515 // Note that loop starts from the second element. 516 for (auto p = &root.next, n = *p; n; p = &n.next, n = *p) 517 { 518 if (n.unused || n.empty != Ternary.yes) continue; 519 // Used and empty baby, nuke it! 520 n.a.destroy; 521 *p = n.next; 522 n.setUnused; 523 break; 524 } 525 return result; 526 } 527 528 /** 529 Defined only if `Allocator.owns` and `Allocator.deallocateAll` are 530 defined. 531 */ 532 static if (ouroboros && hasMember!(Allocator, "deallocateAll") 533 && hasMember!(Allocator, "owns")) 534 bool deallocateAll() 535 { 536 Node* special; 537 foreach (ref n; allocators) 538 { 539 if (n.unused) continue; 540 if (n.owns(allocators) == Ternary.yes) 541 { 542 special = &n; 543 continue; 544 } 545 n.a.deallocateAll; 546 n.a.destroy; 547 } 548 assert(special || !allocators.ptr); 549 if (special) 550 { 551 static if (stateSize!SAllocator) 552 { 553 import core.stdc.string : memcpy; 554 SAllocator specialCopy; 555 assert(special.a.sizeof == specialCopy.sizeof); 556 memcpy(&specialCopy, &special.a, specialCopy.sizeof); 557 emplace(&special.a); 558 specialCopy.deallocateAll(); 559 } 560 else 561 { 562 special.deallocateAll(); 563 } 564 } 565 allocators = null; 566 root = null; 567 return true; 568 } 569 570 static if (!ouroboros && hasMember!(Allocator, "deallocateAll") 571 && hasMember!(Allocator, "owns")) 572 bool deallocateAll() 573 { 574 foreach (ref n; allocators) 575 { 576 if (n.unused) continue; 577 n.a.deallocateAll; 578 n.a.destroy; 579 } 580 bkalloc.deallocate(allocators); 581 allocators = null; 582 root = null; 583 return true; 584 } 585 586 /** 587 Returns `Ternary.yes` if no allocators are currently active, 588 `Ternary.no` otherwise. This methods never returns `Ternary.unknown`. 589 */ 590 pure nothrow @safe @nogc 591 Ternary empty() const 592 { 593 return Ternary(!allocators.length); 594 } 595 } 596 597 /// Ditto 598 template AllocatorList(alias factoryFunction, 599 BookkeepingAllocator = GCAllocator) 600 { 601 alias A = typeof(factoryFunction(1)); 602 static assert( 603 // is a template function (including literals) 604 is(typeof({A function(size_t) @system x = factoryFunction!size_t;})) 605 || 606 // or a function (including literals) 607 is(typeof({A function(size_t) @system x = factoryFunction;})) 608 , 609 "Only function names and function literals that take size_t" 610 ~ " and return an allocator are accepted, not " 611 ~ typeof(factoryFunction).stringof 612 ); 613 static struct Factory 614 { 615 A opCall(size_t n) { return factoryFunction(n); } 616 } 617 alias AllocatorList = .AllocatorList!(Factory, BookkeepingAllocator); 618 } 619 620 /// 621 version (Posix) @system unittest 622 { 623 import std.algorithm.comparison : max; 624 import std.experimental.allocator.building_blocks.free_list : ContiguousFreeList; 625 import std.experimental.allocator.building_blocks.null_allocator : NullAllocator; 626 import std.experimental.allocator.building_blocks.region : Region; 627 import std.experimental.allocator.building_blocks.segregator : Segregator; 628 import std.experimental.allocator.gc_allocator : GCAllocator; 629 import std.experimental.allocator.mmap_allocator : MmapAllocator; 630 631 // Ouroboros allocator list based upon 4MB regions, fetched directly from 632 // mmap. All memory is released upon destruction. 633 alias A1 = AllocatorList!((n) => Region!MmapAllocator(max(n, 1024 * 4096)), 634 NullAllocator); 635 636 // Allocator list based upon 4MB regions, fetched from the garbage 637 // collector. All memory is released upon destruction. 638 alias A2 = AllocatorList!((n) => Region!GCAllocator(max(n, 1024 * 4096))); 639 640 // Ouroboros allocator list based upon 4MB regions, fetched from the garbage 641 // collector. Memory is left to the collector. 642 alias A3 = AllocatorList!( 643 (n) => Region!NullAllocator(new ubyte[max(n, 1024 * 4096)]), 644 NullAllocator); 645 646 // Allocator list that creates one freelist for all objects 647 alias A4 = 648 Segregator!( 649 64, AllocatorList!( 650 (n) => ContiguousFreeList!(NullAllocator, 0, 64)( 651 cast(ubyte[])(GCAllocator.instance.allocate(4096)))), 652 GCAllocator); 653 654 A4 a; 655 auto small = a.allocate(64); 656 assert(small); 657 a.deallocate(small); 658 auto b1 = a.allocate(1024 * 8192); 659 assert(b1 !is null); // still works due to overdimensioning 660 b1 = a.allocate(1024 * 10); 661 assert(b1.length == 1024 * 10); 662 } 663 664 @system unittest 665 { 666 // Create an allocator based upon 4MB regions, fetched from the GC heap. 667 import std.algorithm.comparison : max; 668 import std.experimental.allocator.building_blocks.region : Region; 669 AllocatorList!((n) => Region!GCAllocator(new ubyte[max(n, 1024 * 4096)]), 670 NullAllocator) a; 671 const b1 = a.allocate(1024 * 8192); 672 assert(b1 !is null); // still works due to overdimensioning 673 const b2 = a.allocate(1024 * 10); 674 assert(b2.length == 1024 * 10); 675 a.deallocateAll(); 676 } 677 678 @system unittest 679 { 680 // Create an allocator based upon 4MB regions, fetched from the GC heap. 681 import std.algorithm.comparison : max; 682 import std.experimental.allocator.building_blocks.region : BorrowedRegion; 683 AllocatorList!((n) => BorrowedRegion!()(new ubyte[max(n, 1024 * 4096)])) a; 684 auto b1 = a.alignedAllocate(1024 * 8192, 1024); 685 assert(b1 !is null); // still works due to overdimensioning 686 assert(b1.length == 1024 * 8192); 687 assert(b1.ptr.alignedAt(1024)); 688 assert(a.allocators.length == 1); 689 690 b1 = a.alignedAllocate(0, 1024); 691 assert(b1.length == 0); 692 assert(a.allocators.length == 1); 693 694 b1 = a.allocate(1024 * 10); 695 assert(b1.length == 1024 * 10); 696 697 assert(a.reallocate(b1, 1024)); 698 assert(b1.length == 1024); 699 700 a.deallocateAll(); 701 } 702 703 @system unittest 704 { 705 import core.exception : AssertError; 706 import std.exception : assertThrown; 707 708 // Create an allocator based upon 4MB regions, fetched from the GC heap. 709 import std.algorithm.comparison : max; 710 import std.experimental.allocator.building_blocks.region : BorrowedRegion; 711 AllocatorList!((n) => BorrowedRegion!()(new ubyte[max(n, 1024 * 4096)])) a; 712 auto b1 = a.alignedAllocate(0, 1); 713 assert(b1 is null); 714 715 b1 = a.alignedAllocate(1, 0); 716 assert(b1 is null); 717 718 b1 = a.alignedAllocate(0, 0); 719 assert(b1 is null); 720 721 assertThrown!AssertError(a.alignedAllocate(size_t.max, 1024)); 722 a.deallocateAll(); 723 } 724 725 @system unittest 726 { 727 import std.typecons : Ternary; 728 729 // Create an allocator based upon 4MB regions, fetched from the GC heap. 730 import std.algorithm.comparison : max; 731 import std.experimental.allocator.building_blocks.region : BorrowedRegion; 732 AllocatorList!((n) => BorrowedRegion!()(new ubyte[max(n, 1024 * 4096)])) a; 733 auto b0 = a.alignedAllocate(1, 1024); 734 assert(b0.length == 1); 735 assert(b0.ptr.alignedAt(1024)); 736 assert(a.allocators.length == 1); 737 738 auto b1 = a.alignedAllocate(1024 * 4096, 1024); 739 assert(b1.length == 1024 * 4096); 740 assert(b1.ptr.alignedAt(1024)); 741 assert(a.allocators.length == 2); 742 743 auto b2 = a.alignedAllocate(1024, 128); 744 assert(b2.length == 1024); 745 assert(b2.ptr.alignedAt(128)); 746 assert(a.allocators.length == 2); 747 748 auto b3 = a.allocate(1024); 749 assert(b3.length == 1024); 750 assert(a.allocators.length == 2); 751 752 auto b4 = a.allocate(1024 * 4096); 753 assert(b4.length == 1024 * 4096); 754 assert(a.allocators.length == 3); 755 756 assert(a.root.empty == Ternary.no); 757 assert(a.deallocate(b4)); 758 assert(a.root.empty == Ternary.yes); 759 760 assert(a.deallocate(b1)); 761 a.deallocateAll(); 762 } 763 764 @system unittest 765 { 766 // Create an allocator based upon 4MB regions, fetched from the GC heap. 767 import std.algorithm.comparison : max; 768 import std.experimental.allocator.building_blocks.region : BorrowedRegion; 769 AllocatorList!((n) => BorrowedRegion!()(new ubyte[max(n, 1024 * 4096)])) a; 770 auto b1 = a.allocate(1024 * 8192); 771 assert(b1 !is null); // still works due to overdimensioning 772 b1 = a.allocate(1024 * 10); 773 assert(b1.length == 1024 * 10); 774 assert(a.reallocate(b1, 1024)); 775 assert(b1.length == 1024); 776 a.deallocateAll(); 777 } 778 779 @system unittest 780 { 781 import std.algorithm.comparison : max; 782 import std.experimental.allocator.building_blocks.region : BorrowedRegion; 783 import std.experimental.allocator.mallocator : Mallocator; 784 import std.typecons : Ternary; 785 AllocatorList!((n) => BorrowedRegion!()(new ubyte[max(n, 1024 * 4096)]), Mallocator) a; 786 auto b1 = a.allocate(1024 * 8192); 787 assert(b1 !is null); 788 b1 = a.allocate(1024 * 10); 789 assert(b1.length == 1024 * 10); 790 assert((() pure nothrow @safe @nogc => a.expand(b1, 10))()); 791 assert(b1.length == 1025 * 10); 792 a.allocate(1024 * 4095); 793 assert((() pure nothrow @safe @nogc => a.empty)() == Ternary.no); 794 // Ensure deallocateAll infers from parent 795 assert((() nothrow @nogc => a.deallocateAll())()); 796 assert((() pure nothrow @safe @nogc => a.empty)() == Ternary.yes); 797 } 798 799 @system unittest 800 { 801 import std.experimental.allocator.building_blocks.region : Region; 802 enum bs = GCAllocator.alignment; 803 AllocatorList!((n) => Region!GCAllocator(256 * bs)) a; 804 auto b1 = a.allocate(192 * bs); 805 assert(b1.length == 192 * bs); 806 assert(a.allocators.length == 1); 807 auto b2 = a.allocate(64 * bs); 808 assert(b2.length == 64 * bs); 809 assert(a.allocators.length == 1); 810 auto b3 = a.allocate(192 * bs); 811 assert(b3.length == 192 * bs); 812 assert(a.allocators.length == 2); 813 // Ensure deallocate inherits from parent allocators 814 () nothrow @nogc { a.deallocate(b1); }(); 815 b1 = a.allocate(64 * bs); 816 assert(b1.length == 64 * bs); 817 assert(a.allocators.length == 2); 818 a.deallocateAll(); 819 } 820 821 @system unittest 822 { 823 import std.experimental.allocator.building_blocks.ascending_page_allocator : AscendingPageAllocator; 824 import std.experimental.allocator.mallocator : Mallocator; 825 import std.algorithm.comparison : max; 826 import std.typecons : Ternary; 827 828 static void testrw(void[] b) 829 { 830 ubyte* buf = cast(ubyte*) b.ptr; 831 for (int i = 0; i < b.length; i += pageSize) 832 { 833 buf[i] = cast(ubyte) (i % 256); 834 assert(buf[i] == cast(ubyte) (i % 256)); 835 } 836 } 837 838 enum numPages = 2; 839 AllocatorList!((n) => AscendingPageAllocator(max(n, numPages * pageSize)), Mallocator) a; 840 841 void[] b1 = a.allocate(1); 842 assert(b1.length == 1); 843 b1 = a.allocate(2); 844 assert(b1.length == 2); 845 testrw(b1); 846 assert(a.root.a.parent.getAvailableSize() == 0); 847 848 void[] b2 = a.allocate((numPages + 1) * pageSize); 849 assert(b2.length == (numPages + 1) * pageSize); 850 testrw(b2); 851 852 void[] b3 = a.allocate(3); 853 assert(b3.length == 3); 854 testrw(b3); 855 856 void[] b4 = a.allocate(0); 857 assert(b4.length == 0); 858 859 assert(a.allocators.length == 3); 860 assert(a.owns(b1) == Ternary.yes); 861 assert(a.owns(b2) == Ternary.yes); 862 assert(a.owns(b3) == Ternary.yes); 863 864 assert(a.expand(b1, pageSize - b1.length)); 865 assert(b1.length == pageSize); 866 assert(!a.expand(b1, 1)); 867 assert(!a.expand(b2, 1)); 868 869 testrw(b1); 870 testrw(b2); 871 testrw(b3); 872 873 assert(a.deallocate(b1)); 874 assert(a.deallocate(b2)); 875 876 assert(a.deallocateAll()); 877 } 878 879 @system unittest 880 { 881 import std.experimental.allocator.building_blocks.ascending_page_allocator : AscendingPageAllocator; 882 import std.experimental.allocator.mallocator : Mallocator; 883 import std.algorithm.comparison : max; 884 import std.typecons : Ternary; 885 886 static void testrw(void[] b) 887 { 888 ubyte* buf = cast(ubyte*) b.ptr; 889 for (int i = 0; i < b.length; i += pageSize) 890 { 891 buf[i] = cast(ubyte) (i % 256); 892 assert(buf[i] == cast(ubyte) (i % 256)); 893 } 894 } 895 896 enum numPages = 2; 897 AllocatorList!((n) => AscendingPageAllocator(max(n, numPages * pageSize)), NullAllocator) a; 898 899 void[] b1 = a.allocate(1); 900 assert(b1.length == 1); 901 b1 = a.allocate(2); 902 assert(b1.length == 2); 903 testrw(b1); 904 905 void[] b2 = a.allocate((numPages + 1) * pageSize); 906 assert(b2.length == (numPages + 1) * pageSize); 907 testrw(b2); 908 909 void[] b3 = a.allocate(3); 910 assert(b3.length == 3); 911 testrw(b3); 912 913 void[] b4 = a.allocate(0); 914 assert(b4.length == 0); 915 916 assert(a.allocators.length == 3); 917 assert(a.owns(b1) == Ternary.yes); 918 assert(a.owns(b2) == Ternary.yes); 919 assert(a.owns(b3) == Ternary.yes); 920 921 assert(a.expand(b1, pageSize - b1.length)); 922 assert(b1.length == pageSize); 923 assert(!a.expand(b1, 1)); 924 assert(!a.expand(b2, 1)); 925 926 testrw(b1); 927 testrw(b2); 928 testrw(b3); 929 930 assert(a.deallocate(b1)); 931 assert(a.deallocate(b2)); 932 933 const alignment = cast(uint) (70 * pageSize); 934 b3 = a.alignedAllocate(70 * pageSize, alignment); 935 assert(b3.length == 70 * pageSize); 936 assert(b3.ptr.alignedAt(alignment)); 937 testrw(b3); 938 assert(a.allocators.length == 4); 939 assert(a.deallocate(b3)); 940 941 942 assert(a.deallocateAll()); 943 } 944 945 @system unittest 946 { 947 import std.experimental.allocator.building_blocks.ascending_page_allocator : AscendingPageAllocator; 948 import std.experimental.allocator.mallocator : Mallocator; 949 import std.algorithm.comparison : max; 950 import std.typecons : Ternary; 951 952 static void testrw(void[] b) 953 { 954 ubyte* buf = cast(ubyte*) b.ptr; 955 for (int i = 0; i < b.length; i += pageSize) 956 { 957 buf[i] = cast(ubyte) (i % 256); 958 assert(buf[i] == cast(ubyte) (i % 256)); 959 } 960 } 961 962 enum numPages = 5; 963 AllocatorList!((n) => AscendingPageAllocator(max(n, numPages * pageSize)), NullAllocator) a; 964 const alignment = cast(uint) (2 * pageSize); 965 auto b = a.alignedAllocate(1, alignment); 966 assert(b.length == 1); 967 assert(a.expand(b, pageSize - 1)); 968 assert(b.ptr.alignedAt(alignment)); 969 assert(b.length == pageSize); 970 971 b = a.allocate(pageSize); 972 assert(b.length == pageSize); 973 assert(a.allocators.length == 1); 974 975 assert(a.allocate(pageSize * 5).length == pageSize * 5); 976 assert(a.allocators.length == 2); 977 978 assert(a.deallocateAll()); 979 } 980 981 @system unittest 982 { 983 import std.experimental.allocator.building_blocks.ascending_page_allocator : AscendingPageAllocator; 984 import std.algorithm.comparison : max; 985 986 enum maxIter = 100; 987 enum numPages = 10; 988 const chunkSize = pageSize / 8; 989 990 AllocatorList!((n) => AscendingPageAllocator(max(n, numPages * pageSize)), NullAllocator) a; 991 foreach (i; 0 .. maxIter) 992 { 993 auto b1 = a.allocate(chunkSize); 994 assert(b1.length == chunkSize); 995 996 assert(a.deallocate(b1)); 997 } 998 999 assert(a.deallocateAll()); 1000 }