1 // Written in the D programming language. 2 /** 3 Source: $(PHOBOSSRC std/experimental/allocator/building_blocks/allocator_list.d) 4 */ 5 module std.experimental.allocator.building_blocks.allocator_list; 6 7 import core.memory : pageSize; 8 9 import std.experimental.allocator.building_blocks.null_allocator; 10 import std.experimental.allocator.common; 11 import std.experimental.allocator.gc_allocator; 12 13 // Turn this on for debugging 14 // debug = allocator_list; 15 16 /** 17 18 Given an $(LINK2 https://en.wikipedia.org/wiki/Factory_(object-oriented_programming), 19 object factory) of type `Factory` or a factory function 20 `factoryFunction`, and optionally also `BookkeepingAllocator` as a supplemental 21 allocator for bookkeeping, `AllocatorList` creates an allocator that lazily 22 creates as many allocators are needed for satisfying client allocation requests. 23 24 An embedded list builds a most-recently-used strategy: the most recent 25 allocators used in calls to either `allocate`, `owns` (successful calls 26 only), or `deallocate` are tried for new allocations in order of their most 27 recent use. Thus, although core operations take in theory $(BIGOH k) time for 28 `k` allocators in current use, in many workloads the factor is sublinear. 29 Details of the actual strategy may change in future releases. 30 31 `AllocatorList` is primarily intended for coarse-grained handling of 32 allocators, i.e. the number of allocators in the list is expected to be 33 relatively small compared to the number of allocations handled by each 34 allocator. However, the per-allocator overhead is small so using 35 `AllocatorList` with a large number of allocators should be satisfactory as long 36 as the most-recently-used strategy is fast enough for the application. 37 38 `AllocatorList` makes an effort to return allocated memory back when no 39 longer used. It does so by destroying empty allocators. However, in order to 40 avoid thrashing (excessive creation/destruction of allocators under certain use 41 patterns), it keeps unused allocators for a while. 42 43 The shared version of `AllocatorList` is `SharedAllocatorList`, which has 44 identical semantics to its single-threaded version. Both `BookkeepingAllocator` 45 and `Allocator` provided by `factoryFunction` must be shared, in order to 46 ensure corectness. 47 48 Params: 49 factoryFunction = A function or template function (including function literals). 50 New allocators are created by calling `factoryFunction(n)` with strictly 51 positive numbers `n`. Delegates that capture their enviroment are not created 52 amid concerns regarding garbage creation for the environment. When the factory 53 needs state, a `Factory` object should be used. 54 55 BookkeepingAllocator = Allocator used for storing bookkeeping data. The size of 56 bookkeeping data is proportional to the number of allocators. If $(D 57 BookkeepingAllocator) is `NullAllocator`, then `AllocatorList` is 58 "ouroboros-style", i.e. it keeps the bookkeeping data in memory obtained from 59 the allocators themselves. Note that for ouroboros-style management, the size 60 `n` passed to `make` will be occasionally different from the size 61 requested by client code. 62 63 Factory = Type of a factory object that returns new allocators on a need 64 basis. For an object `sweatshop` of type `Factory`, `sweatshop(n)` should 65 return an allocator able to allocate at least `n` bytes (i.e. `Factory` must 66 define `opCall(size_t)` to return an allocator object). Usually the capacity of 67 allocators created should be much larger than `n` such that an allocator can 68 be used for many subsequent allocations. `n` is passed only to ensure the 69 minimum necessary for the next allocation. The factory object is allowed to hold 70 state, which will be stored inside `AllocatorList` as a direct `public` member 71 called `factory`. 72 73 */ 74 struct AllocatorList(Factory, BookkeepingAllocator = GCAllocator) 75 { 76 import core.lifetime : emplace; 77 import std.experimental.allocator.building_blocks.stats_collector 78 : StatsCollector, Options; 79 import std.traits : hasMember; 80 import std.typecons : Ternary; 81 82 private enum ouroboros = is(BookkeepingAllocator == NullAllocator); 83 84 /** 85 Alias for `typeof(Factory()(1))`, i.e. the type of the individual 86 allocators. 87 */ 88 alias Allocator = typeof(Factory.init(1)); 89 // Allocator used internally 90 private alias SAllocator = StatsCollector!(Allocator, Options.bytesUsed); 91 92 private static struct Node 93 { 94 // Allocator in this node 95 SAllocator a; 96 Node* next; 97 98 @disable this(this); 99 100 // Is this node unused? 101 void setUnused() { next = &this; } 102 bool unused() const { return next is &this; } 103 104 // Just forward everything to the allocator 105 alias a this; 106 } 107 108 /** 109 If `BookkeepingAllocator` is not `NullAllocator`, `bkalloc` is 110 defined and accessible. 111 */ 112 113 // State is stored in an array, but it has a list threaded through it by 114 // means of "nextIdx". 115 116 // state 117 static if (!ouroboros) 118 { 119 static if (stateSize!BookkeepingAllocator) BookkeepingAllocator bkalloc; 120 else alias bkalloc = BookkeepingAllocator.instance; 121 } 122 static if (stateSize!Factory) 123 { 124 Factory factory; 125 } 126 private Node[] allocators; 127 private Node* root; 128 129 static if (stateSize!Factory) 130 { 131 private auto make(size_t n) { return factory(n); } 132 } 133 else 134 { 135 private auto make(size_t n) { Factory f; return f(n); } 136 } 137 138 /** 139 Constructs an `AllocatorList` given a factory object. This constructor is 140 defined only if `Factory` has state. 141 */ 142 static if (stateSize!Factory) 143 this(ref Factory plant) 144 { 145 factory = plant; 146 } 147 /// Ditto 148 static if (stateSize!Factory) 149 this(Factory plant) 150 { 151 factory = plant; 152 } 153 154 static if (hasMember!(Allocator, "deallocateAll") 155 && hasMember!(Allocator, "owns")) 156 ~this() 157 { 158 deallocateAll; 159 } 160 161 /** 162 The alignment offered. 163 */ 164 enum uint alignment = Allocator.alignment; 165 166 /** 167 Allocate a block of size `s`. First tries to allocate from the existing 168 list of already-created allocators. If neither can satisfy the request, 169 creates a new allocator by calling `make(s)` and delegates the request 170 to it. However, if the allocation fresh off a newly created allocator 171 fails, subsequent calls to `allocate` will not cause more calls to $(D 172 make). 173 */ 174 void[] allocate(size_t s) 175 { 176 for (auto p = &root, n = *p; n; p = &n.next, n = *p) 177 { 178 auto result = n.allocate(s); 179 if (result.length != s) continue; 180 // Bring to front if not already 181 if (root != n) 182 { 183 *p = n.next; 184 n.next = root; 185 root = n; 186 } 187 return result; 188 } 189 190 // Add a new allocator 191 if (auto a = addAllocator(s)) 192 { 193 auto result = a.allocate(s); 194 assert(owns(result) == Ternary.yes || !result.ptr); 195 return result; 196 } 197 return null; 198 } 199 200 static if (hasMember!(Allocator, "allocateZeroed")) 201 package(std) void[] allocateZeroed()(size_t s) 202 { 203 for (auto p = &root, n = *p; n; p = &n.next, n = *p) 204 { 205 auto result = n.allocateZeroed(s); 206 if (result.length != s) continue; 207 // Bring to front if not already 208 if (root != n) 209 { 210 *p = n.next; 211 n.next = root; 212 root = n; 213 } 214 return result; 215 } 216 217 // Add a new allocator 218 if (auto a = addAllocator(s)) 219 { 220 auto result = a.allocateZeroed(s); 221 assert(owns(result) == Ternary.yes || !result.ptr); 222 return result; 223 } 224 return null; 225 } 226 227 /** 228 Allocate a block of size `s` with alignment `a`. First tries to allocate 229 from the existing list of already-created allocators. If neither can 230 satisfy the request, creates a new allocator by calling `make(s + a - 1)` 231 and delegates the request to it. However, if the allocation fresh off a 232 newly created allocator fails, subsequent calls to `alignedAllocate` 233 will not cause more calls to `make`. 234 */ 235 static if (hasMember!(Allocator, "alignedAllocate")) 236 void[] alignedAllocate(size_t s, uint theAlignment) 237 { 238 import std.algorithm.comparison : max; 239 import core.checkedint : addu; 240 241 if (theAlignment == 0 || s == 0) 242 return null; 243 244 for (auto p = &root, n = *p; n; p = &n.next, n = *p) 245 { 246 auto result = n.alignedAllocate(s, theAlignment); 247 if (result.length != s) continue; 248 // Bring to front if not already 249 if (root != n) 250 { 251 *p = n.next; 252 n.next = root; 253 root = n; 254 } 255 return result; 256 } 257 258 bool overflow = false; 259 size_t maxSize = addu(s - 1, cast(size_t) theAlignment, overflow); 260 assert(!overflow, "Requested size is too large"); 261 if (overflow) 262 return null; 263 264 // Add a new allocator 265 if (auto a = addAllocator(maxSize)) 266 { 267 auto result = a.alignedAllocate(s, theAlignment); 268 assert(owns(result) == Ternary.yes || !result.ptr); 269 return result; 270 } 271 return null; 272 } 273 274 private void moveAllocators(void[] newPlace) 275 { 276 assert(newPlace.ptr.alignedAt(Node.alignof)); 277 assert(newPlace.length % Node.sizeof == 0); 278 auto newAllocators = cast(Node[]) newPlace; 279 assert(allocators.length <= newAllocators.length); 280 281 // Move allocators 282 foreach (i, ref e; allocators) 283 { 284 if (e.unused) 285 { 286 newAllocators[i].setUnused; 287 continue; 288 } 289 import core.stdc.string : memcpy; 290 memcpy(&newAllocators[i].a, &e.a, e.a.sizeof); 291 if (e.next) 292 { 293 newAllocators[i].next = newAllocators.ptr 294 + (e.next - allocators.ptr); 295 } 296 else 297 { 298 newAllocators[i].next = null; 299 } 300 } 301 302 // Mark the unused portion as unused 303 foreach (i; allocators.length .. newAllocators.length) 304 { 305 newAllocators[i].setUnused; 306 } 307 auto toFree = allocators; 308 309 // Change state 310 root = newAllocators.ptr + (root - allocators.ptr); 311 allocators = newAllocators; 312 313 // Free the olden buffer 314 static if (ouroboros) 315 { 316 static if (hasMember!(Allocator, "deallocate") 317 && hasMember!(Allocator, "owns")) 318 deallocate(toFree); 319 } 320 else 321 { 322 bkalloc.deallocate(toFree); 323 } 324 } 325 326 static if (ouroboros) 327 private Node* addAllocator(size_t atLeastBytes) 328 { 329 void[] t = allocators; 330 static if (hasMember!(Allocator, "expand") 331 && hasMember!(Allocator, "owns")) 332 { 333 immutable bool expanded = t && this.expand(t, Node.sizeof); 334 } 335 else 336 { 337 enum expanded = false; 338 } 339 if (expanded) 340 { 341 import core.stdc.string : memcpy; 342 assert(t.length % Node.sizeof == 0); 343 assert(t.ptr.alignedAt(Node.alignof)); 344 allocators = cast(Node[]) t; 345 allocators[$ - 1].setUnused; 346 auto newAlloc = SAllocator(make(atLeastBytes)); 347 memcpy(&allocators[$ - 1].a, &newAlloc, newAlloc.sizeof); 348 emplace(&newAlloc); 349 } 350 else 351 { 352 immutable toAlloc = (allocators.length + 1) * Node.sizeof 353 + atLeastBytes + 128; 354 auto newAlloc = SAllocator(make(toAlloc)); 355 auto newPlace = newAlloc.allocate( 356 (allocators.length + 1) * Node.sizeof); 357 if (!newPlace) return null; 358 moveAllocators(newPlace); 359 import core.stdc.string : memcpy; 360 memcpy(&allocators[$ - 1].a, &newAlloc, newAlloc.sizeof); 361 emplace(&newAlloc); 362 assert(allocators[$ - 1].owns(allocators) == Ternary.yes); 363 } 364 // Insert as new root 365 if (root != &allocators[$ - 1]) 366 { 367 allocators[$ - 1].next = root; 368 root = &allocators[$ - 1]; 369 } 370 else 371 { 372 // This is the first one 373 root.next = null; 374 } 375 assert(!root.unused); 376 return root; 377 } 378 379 static if (!ouroboros) 380 private Node* addAllocator(size_t atLeastBytes) 381 { 382 void[] t = allocators; 383 static if (hasMember!(BookkeepingAllocator, "expand")) 384 immutable bool expanded = bkalloc.expand(t, Node.sizeof); 385 else 386 immutable bool expanded = false; 387 if (expanded) 388 { 389 assert(t.length % Node.sizeof == 0); 390 assert(t.ptr.alignedAt(Node.alignof)); 391 allocators = cast(Node[]) t; 392 allocators[$ - 1].setUnused; 393 } 394 else 395 { 396 // Could not expand, create a new block 397 t = bkalloc.allocate((allocators.length + 1) * Node.sizeof); 398 assert(t.length % Node.sizeof == 0); 399 if (!t.ptr) return null; 400 moveAllocators(t); 401 } 402 assert(allocators[$ - 1].unused); 403 auto newAlloc = SAllocator(make(atLeastBytes)); 404 import core.stdc.string : memcpy; 405 memcpy(&allocators[$ - 1].a, &newAlloc, newAlloc.sizeof); 406 emplace(&newAlloc); 407 // Creation succeeded, insert as root 408 if (allocators.length == 1) 409 allocators[$ - 1].next = null; 410 else 411 allocators[$ - 1].next = root; 412 assert(allocators[$ - 1].a.bytesUsed == 0); 413 root = &allocators[$ - 1]; 414 return root; 415 } 416 417 /** 418 Defined only if `Allocator` defines `owns`. Tries each allocator in 419 turn, in most-recently-used order. If the owner is found, it is moved to 420 the front of the list as a side effect under the assumption it will be used 421 soon. 422 423 Returns: `Ternary.yes` if one allocator was found to return `Ternary.yes`, 424 `Ternary.no` if all component allocators returned `Ternary.no`, and 425 `Ternary.unknown` if no allocator returned `Ternary.yes` and at least one 426 returned `Ternary.unknown`. 427 */ 428 static if (hasMember!(Allocator, "owns")) 429 Ternary owns(void[] b) 430 { 431 auto result = Ternary.no; 432 for (auto p = &root, n = *p; n; p = &n.next, n = *p) 433 { 434 immutable t = n.owns(b); 435 if (t != Ternary.yes) 436 { 437 if (t == Ternary.unknown) result = t; 438 continue; 439 } 440 // Move the owner to front, speculating it'll be used 441 if (n != root) 442 { 443 *p = n.next; 444 n.next = root; 445 root = n; 446 } 447 return Ternary.yes; 448 } 449 return result; 450 } 451 452 /** 453 Defined only if `Allocator.expand` is defined. Finds the owner of `b` 454 and calls `expand` for it. The owner is not brought to the head of the 455 list. 456 */ 457 static if (hasMember!(Allocator, "expand") 458 && hasMember!(Allocator, "owns")) 459 bool expand(ref void[] b, size_t delta) 460 { 461 if (!b) return delta == 0; 462 for (auto p = &root, n = *p; n; p = &n.next, n = *p) 463 { 464 if (n.owns(b) == Ternary.yes) return n.expand(b, delta); 465 } 466 return false; 467 } 468 469 /** 470 Defined only if `Allocator.reallocate` is defined. Finds the owner of 471 `b` and calls `reallocate` for it. If that fails, calls the global 472 `reallocate`, which allocates a new block and moves memory. 473 */ 474 static if (hasMember!(Allocator, "reallocate")) 475 bool reallocate(ref void[] b, size_t s) 476 { 477 // First attempt to reallocate within the existing node 478 if (!b.ptr) 479 { 480 b = allocate(s); 481 return b.length == s; 482 } 483 for (auto p = &root, n = *p; n; p = &n.next, n = *p) 484 { 485 if (n.owns(b) == Ternary.yes) return n.reallocate(b, s); 486 } 487 // Failed, but we may find new memory in a new node. 488 return .reallocate(this, b, s); 489 } 490 491 /** 492 Defined if `Allocator.deallocate` and `Allocator.owns` are defined. 493 */ 494 static if (hasMember!(Allocator, "deallocate") 495 && hasMember!(Allocator, "owns")) 496 bool deallocate(void[] b) 497 { 498 if (!b.ptr) return true; 499 assert(allocators.length); 500 assert(owns(b) == Ternary.yes); 501 bool result; 502 for (auto p = &root, n = *p; ; p = &n.next, n = *p) 503 { 504 assert(n); 505 if (n.owns(b) != Ternary.yes) continue; 506 result = n.deallocate(b); 507 // Bring to front 508 if (n != root) 509 { 510 *p = n.next; 511 n.next = root; 512 root = n; 513 } 514 if (n.empty != Ternary.yes) return result; 515 break; 516 } 517 // Hmmm... should we return this allocator back to the wild? Let's 518 // decide if there are TWO empty allocators we can release ONE. This 519 // is to avoid thrashing. 520 // Note that loop starts from the second element. 521 for (auto p = &root.next, n = *p; n; p = &n.next, n = *p) 522 { 523 if (n.unused || n.empty != Ternary.yes) continue; 524 // Used and empty baby, nuke it! 525 n.a.destroy; 526 *p = n.next; 527 n.setUnused; 528 break; 529 } 530 return result; 531 } 532 533 /** 534 Defined only if `Allocator.owns` and `Allocator.deallocateAll` are 535 defined. 536 */ 537 static if (ouroboros && hasMember!(Allocator, "deallocateAll") 538 && hasMember!(Allocator, "owns")) 539 bool deallocateAll() 540 { 541 Node* special; 542 foreach (ref n; allocators) 543 { 544 if (n.unused) continue; 545 if (n.owns(allocators) == Ternary.yes) 546 { 547 special = &n; 548 continue; 549 } 550 n.a.deallocateAll; 551 n.a.destroy; 552 } 553 assert(special || !allocators.ptr); 554 if (special) 555 { 556 static if (stateSize!SAllocator) 557 { 558 import core.stdc.string : memcpy; 559 SAllocator specialCopy; 560 assert(special.a.sizeof == specialCopy.sizeof); 561 memcpy(&specialCopy, &special.a, specialCopy.sizeof); 562 emplace(&special.a); 563 specialCopy.deallocateAll(); 564 } 565 else 566 { 567 special.deallocateAll(); 568 } 569 } 570 allocators = null; 571 root = null; 572 return true; 573 } 574 575 static if (!ouroboros && hasMember!(Allocator, "deallocateAll") 576 && hasMember!(Allocator, "owns")) 577 bool deallocateAll() 578 { 579 foreach (ref n; allocators) 580 { 581 if (n.unused) continue; 582 n.a.deallocateAll; 583 n.a.destroy; 584 } 585 bkalloc.deallocate(allocators); 586 allocators = null; 587 root = null; 588 return true; 589 } 590 591 /** 592 Returns `Ternary.yes` if no allocators are currently active, 593 `Ternary.no` otherwise. This methods never returns `Ternary.unknown`. 594 */ 595 pure nothrow @safe @nogc 596 Ternary empty() const 597 { 598 return Ternary(!allocators.length); 599 } 600 } 601 602 /// Ditto 603 template AllocatorList(alias factoryFunction, 604 BookkeepingAllocator = GCAllocator) 605 { 606 alias A = typeof(factoryFunction(1)); 607 static assert( 608 // is a template function (including literals) 609 is(typeof({A function(size_t) @system x = factoryFunction!size_t;})) 610 || 611 // or a function (including literals) 612 is(typeof({A function(size_t) @system x = factoryFunction;})) 613 , 614 "Only function names and function literals that take size_t" 615 ~ " and return an allocator are accepted, not " 616 ~ typeof(factoryFunction).stringof 617 ); 618 static struct Factory 619 { 620 A opCall(size_t n) { return factoryFunction(n); } 621 } 622 alias AllocatorList = .AllocatorList!(Factory, BookkeepingAllocator); 623 } 624 625 /// 626 version (Posix) @system unittest 627 { 628 import std.algorithm.comparison : max; 629 import std.experimental.allocator.building_blocks.free_list : ContiguousFreeList; 630 import std.experimental.allocator.building_blocks.null_allocator : NullAllocator; 631 import std.experimental.allocator.building_blocks.region : Region; 632 import std.experimental.allocator.building_blocks.segregator : Segregator; 633 import std.experimental.allocator.gc_allocator : GCAllocator; 634 import std.experimental.allocator.mmap_allocator : MmapAllocator; 635 636 // Ouroboros allocator list based upon 4MB regions, fetched directly from 637 // mmap. All memory is released upon destruction. 638 alias A1 = AllocatorList!((n) => Region!MmapAllocator(max(n, 1024 * 4096)), 639 NullAllocator); 640 641 // Allocator list based upon 4MB regions, fetched from the garbage 642 // collector. All memory is released upon destruction. 643 alias A2 = AllocatorList!((n) => Region!GCAllocator(max(n, 1024 * 4096))); 644 645 // Ouroboros allocator list based upon 4MB regions, fetched from the garbage 646 // collector. Memory is left to the collector. 647 alias A3 = AllocatorList!( 648 (n) => Region!NullAllocator(new ubyte[max(n, 1024 * 4096)]), 649 NullAllocator); 650 651 // Allocator list that creates one freelist for all objects 652 alias A4 = 653 Segregator!( 654 64, AllocatorList!( 655 (n) => ContiguousFreeList!(NullAllocator, 0, 64)( 656 cast(ubyte[])(GCAllocator.instance.allocate(4096)))), 657 GCAllocator); 658 659 A4 a; 660 auto small = a.allocate(64); 661 assert(small); 662 a.deallocate(small); 663 auto b1 = a.allocate(1024 * 8192); 664 assert(b1 !is null); // still works due to overdimensioning 665 b1 = a.allocate(1024 * 10); 666 assert(b1.length == 1024 * 10); 667 } 668 669 /// Ditto 670 shared struct SharedAllocatorList(Factory, BookkeepingAllocator = GCAllocator) 671 { 672 import std.typecons : Ternary; 673 import std.traits : hasMember; 674 import core.internal.spinlock : SpinLock; 675 676 private: 677 // Forward all calls to 'impl' and protect them by the lock below 678 AllocatorList!(Factory, BookkeepingAllocator) impl; 679 SpinLock lock = SpinLock(SpinLock.Contention.brief); 680 681 // This could be potentially removed in the future, 682 // should a successor to <https://github.com/dlang/druntime/pull/2156> 683 // or a solution to <https://github.com/dlang/dmd/issues/17128> get merged. 684 static ref T assumeUnshared(T)(ref shared T val) @trusted @nogc pure nothrow 685 { 686 return *cast(T*) &val; 687 } 688 689 // Debug function used for testing 690 version (unittest) 691 auto allocators() 692 { 693 return impl.allocators; 694 } 695 696 // Everything is inherited from the 'AllocatorList' implementation 697 public: 698 699 /* 700 Note: This does not work well with rvalues because it copies them once more. 701 Probably not a problem here because all parameters are cheap. 702 <https://github.com/dlang/phobos/pull/6465/files#r189629862> 703 */ 704 705 /** 706 The alignment offered. 707 */ 708 enum alignment = impl.alignment; 709 710 /** 711 Allocate a block of size `s`. First tries to allocate from the existing 712 list of already-created allocators. If neither can satisfy the request, 713 creates a new allocator by calling `make(s)` and delegates the request 714 to it. However, if the allocation fresh off a newly created allocator 715 fails, subsequent calls to `allocate` will not cause more calls to $(D 716 make). 717 */ 718 static if (hasMember!(typeof(impl), "allocate")) 719 void[] allocate(size_t s) 720 { 721 lock.lock(); 722 scope(exit) lock.unlock(); 723 724 return assumeUnshared(impl).allocate(s); 725 } 726 727 /** 728 Allocate a block of size `s` with alignment `a`. First tries to allocate 729 from the existing list of already-created allocators. If neither can 730 satisfy the request, creates a new allocator by calling `make(s + a - 1)` 731 and delegates the request to it. However, if the allocation fresh off a 732 newly created allocator fails, subsequent calls to `alignedAllocate` 733 will not cause more calls to `make`. 734 */ 735 static if (hasMember!(typeof(impl), "alignedAllocate")) 736 void[] alignedAllocate(size_t s, uint a) 737 { 738 lock.lock(); 739 scope(exit) lock.unlock(); 740 741 return assumeUnshared(impl).alignedAllocate(s, a); 742 } 743 744 /** 745 Defined if `Allocator.deallocate` and `Allocator.owns` are defined. 746 */ 747 static if (hasMember!(typeof(impl), "deallocate")) 748 bool deallocate(void[] b) 749 { 750 lock.lock(); 751 scope(exit) lock.unlock(); 752 753 return assumeUnshared(impl).deallocate(b); 754 } 755 756 /** 757 Defined only if `Allocator` defines `owns`. Tries each allocator in 758 turn, in most-recently-used order. If the owner is found, it is moved to 759 the front of the list as a side effect under the assumption it will be used 760 soon. 761 762 Returns: `Ternary.yes` if one allocator was found to return `Ternary.yes`, 763 `Ternary.no` if all component allocators returned `Ternary.no`, and 764 `Ternary.unknown` if no allocator returned `Ternary.yes` and at least one 765 returned `Ternary.unknown`. 766 */ 767 static if (hasMember!(typeof(impl), "owns")) 768 Ternary owns(void[] b) 769 { 770 lock.lock(); 771 scope(exit) lock.unlock(); 772 773 return assumeUnshared(impl).owns(b); 774 } 775 776 /** 777 Defined only if `Allocator.expand` is defined. Finds the owner of `b` 778 and calls `expand` for it. The owner is not brought to the head of the 779 list. 780 */ 781 static if (hasMember!(typeof(impl), "expand")) 782 bool expand(ref void[] b, size_t delta) 783 { 784 lock.lock(); 785 scope(exit) lock.unlock(); 786 787 return assumeUnshared(impl).expand(b, delta); 788 } 789 790 /** 791 Defined only if `Allocator.reallocate` is defined. Finds the owner of 792 `b` and calls `reallocate` for it. If that fails, calls the global 793 `reallocate`, which allocates a new block and moves memory. 794 */ 795 static if (hasMember!(typeof(impl), "reallocate")) 796 bool reallocate(ref void[] b, size_t s) 797 { 798 lock.lock(); 799 scope(exit) lock.unlock(); 800 801 return assumeUnshared(impl).reallocate(b, s); 802 } 803 804 /** 805 Defined only if `Allocator.owns` and `Allocator.deallocateAll` are 806 defined. 807 */ 808 static if (hasMember!(typeof(impl), "deallocateAll")) 809 bool deallocateAll() 810 { 811 lock.lock(); 812 scope(exit) lock.unlock(); 813 814 return assumeUnshared(impl).deallocateAll(); 815 } 816 817 /** 818 Returns `Ternary.yes` if no allocators are currently active, 819 `Ternary.no` otherwise. This methods never returns `Ternary.unknown`. 820 */ 821 static if (hasMember!(typeof(impl), "empty")) 822 Ternary empty() 823 { 824 lock.lock(); 825 scope(exit) lock.unlock(); 826 827 return assumeUnshared(impl).empty(); 828 } 829 } 830 831 /// Ditto 832 template SharedAllocatorList(alias factoryFunction, 833 BookkeepingAllocator = GCAllocator) 834 { 835 alias A = typeof(factoryFunction(1)); 836 static assert( 837 // is a template function (including literals) 838 is(typeof({A function(size_t) @system x = factoryFunction!size_t;})) 839 || 840 // or a function (including literals) 841 is(typeof({A function(size_t) @system x = factoryFunction;})) 842 , 843 "Only function names and function literals that take size_t" 844 ~ " and return an allocator are accepted, not " 845 ~ typeof(factoryFunction).stringof 846 ); 847 static struct Factory 848 { 849 A opCall(size_t n) { return factoryFunction(n); } 850 } 851 alias SharedAllocatorList = .SharedAllocatorList!(Factory, BookkeepingAllocator); 852 } 853 854 @system unittest 855 { 856 import std.algorithm.comparison : max; 857 import std.experimental.allocator.building_blocks.region : Region, SharedRegion; 858 859 static void testAlloc(Allocator)(ref Allocator a) 860 { 861 const b1 = a.allocate(1024 * 8192); 862 assert(b1 !is null); // still works due to overdimensioning 863 const b2 = a.allocate(1024 * 10); 864 assert(b2.length == 1024 * 10); 865 a.deallocateAll(); 866 } 867 868 // Create an allocator based upon 4MB regions, fetched from the GC heap. 869 AllocatorList!((n) => Region!GCAllocator(new ubyte[max(n, 1024 * 4096)]), 870 NullAllocator) reg1; 871 872 SharedAllocatorList!((n) => SharedRegion!GCAllocator(new ubyte[max(n, 1024 * 4096)]), 873 NullAllocator) reg2; 874 875 testAlloc(reg1); 876 testAlloc(reg2); 877 } 878 879 @system unittest 880 { 881 import std.algorithm.comparison : max; 882 import std.experimental.allocator.building_blocks.region : BorrowedRegion, SharedBorrowedRegion; 883 884 static void testAlloc(Allocator)(ref Allocator a) 885 { 886 auto b1 = a.alignedAllocate(1024 * 8192, 1024); 887 assert(b1 !is null); // still works due to overdimensioning 888 assert(b1.length == 1024 * 8192); 889 assert(b1.ptr.alignedAt(1024)); 890 assert(a.allocators.length == 1); 891 892 b1 = a.alignedAllocate(0, 1024); 893 assert(b1.length == 0); 894 assert(a.allocators.length == 1); 895 896 b1 = a.allocate(1024 * 10); 897 assert(b1.length == 1024 * 10); 898 899 assert(a.reallocate(b1, 1024)); 900 assert(b1.length == 1024); 901 902 a.deallocateAll(); 903 } 904 905 // Create an allocator based upon 4MB regions, fetched from the GC heap. 906 AllocatorList!((n) => BorrowedRegion!()(new ubyte[max(n, 1024 * 4096)])) a1; 907 SharedAllocatorList!((n) => SharedBorrowedRegion!()(new ubyte[max(n, 1024 * 4096)])) a2; 908 909 testAlloc(a1); 910 testAlloc(a2); 911 } 912 913 @system unittest 914 { 915 import core.exception : AssertError; 916 import std.exception : assertThrown; 917 import std.algorithm.comparison : max; 918 import std.experimental.allocator.building_blocks.region : BorrowedRegion, SharedBorrowedRegion; 919 920 static void testAlloc(Allocator)(ref Allocator a) 921 { 922 auto b1 = a.alignedAllocate(0, 1); 923 assert(b1 is null); 924 925 b1 = a.alignedAllocate(1, 0); 926 assert(b1 is null); 927 928 b1 = a.alignedAllocate(0, 0); 929 assert(b1 is null); 930 931 assertThrown!AssertError(a.alignedAllocate(size_t.max, 1024)); 932 933 // FIXME: This special-casing might note be necessary. 934 // At the moment though, this call would take potentially forever 935 // for the `SharedAllocatorList` from below. 936 static if (!is(Allocator == shared)) 937 { 938 a.deallocateAll(); 939 } 940 } 941 942 // Create an allocator based upon 4MB regions, fetched from the GC heap. 943 AllocatorList!((n) => BorrowedRegion!()(new ubyte[max(n, 1024 * 4096)])) a1; 944 SharedAllocatorList!((n) => SharedBorrowedRegion!()(new ubyte[max(n, 1024 * 4096)])) a2; 945 946 testAlloc(a1); 947 testAlloc(a2); 948 } 949 950 @system unittest 951 { 952 import std.typecons : Ternary; 953 954 // Create an allocator based upon 4MB regions, fetched from the GC heap. 955 import std.algorithm.comparison : max; 956 import std.experimental.allocator.building_blocks.region : BorrowedRegion, SharedBorrowedRegion; 957 958 static void testAlloc(Allocator)(ref Allocator a) 959 { 960 auto b0 = a.alignedAllocate(1, 1024); 961 assert(b0.length == 1); 962 assert(b0.ptr.alignedAt(1024)); 963 assert(a.allocators.length == 1); 964 965 auto b1 = a.alignedAllocate(1024 * 4096, 1024); 966 assert(b1.length == 1024 * 4096); 967 assert(b1.ptr.alignedAt(1024)); 968 assert(a.allocators.length == 2); 969 970 auto b2 = a.alignedAllocate(1024, 128); 971 assert(b2.length == 1024); 972 assert(b2.ptr.alignedAt(128)); 973 assert(a.allocators.length == 2); 974 975 auto b3 = a.allocate(1024); 976 assert(b3.length == 1024); 977 assert(a.allocators.length == 2); 978 979 auto b4 = a.allocate(1024 * 4096); 980 assert(b4.length == 1024 * 4096); 981 assert(a.allocators.length == 3); 982 983 static if (!is(Allocator == shared)) 984 { 985 assert(a.root.empty == Ternary.no); 986 assert(a.deallocate(b4)); 987 assert(a.root.empty == Ternary.yes); 988 989 assert(a.deallocate(b1)); 990 } 991 992 a.deallocateAll(); 993 } 994 995 AllocatorList!((n) => BorrowedRegion!()(new ubyte[max(n, 1024 * 4096)])) a1; 996 SharedAllocatorList!((n) => SharedBorrowedRegion!()(new ubyte[max(n, 1024 * 4096)])) a2; 997 998 testAlloc(a1); 999 testAlloc(a2); 1000 } 1001 1002 @system unittest 1003 { 1004 import std.algorithm.comparison : max; 1005 import std.experimental.allocator.building_blocks.region : BorrowedRegion, SharedBorrowedRegion; 1006 1007 static void testAlloc(Allocator)(ref Allocator a) 1008 { 1009 auto b1 = a.allocate(1024 * 8192); 1010 assert(b1 !is null); // still works due to overdimensioning 1011 b1 = a.allocate(1024 * 10); 1012 assert(b1.length == 1024 * 10); 1013 assert(a.reallocate(b1, 1024)); 1014 assert(b1.length == 1024); 1015 a.deallocateAll(); 1016 } 1017 1018 // Create an allocator based upon 4MB regions, fetched from the GC heap. 1019 AllocatorList!((n) => BorrowedRegion!()(new ubyte[max(n, 1024 * 4096)])) a1; 1020 SharedAllocatorList!((n) => SharedBorrowedRegion!()(new ubyte[max(n, 1024 * 4096)])) a2; 1021 1022 testAlloc(a1); 1023 testAlloc(a2); 1024 } 1025 1026 @system unittest 1027 { 1028 import std.algorithm.comparison : max; 1029 import std.experimental.allocator.building_blocks.region : BorrowedRegion; 1030 import std.experimental.allocator.mallocator : Mallocator; 1031 import std.typecons : Ternary; 1032 AllocatorList!((n) => BorrowedRegion!()(new ubyte[max(n, 1024 * 4096)]), Mallocator) a; 1033 auto b1 = a.allocate(1024 * 8192); 1034 assert(b1 !is null); 1035 b1 = a.allocate(1024 * 10); 1036 assert(b1.length == 1024 * 10); 1037 assert((() pure nothrow @safe @nogc => a.expand(b1, 10))()); 1038 assert(b1.length == 1025 * 10); 1039 a.allocate(1024 * 4095); 1040 assert((() pure nothrow @safe @nogc => a.empty)() == Ternary.no); 1041 // Ensure deallocateAll infers from parent 1042 assert((() nothrow @nogc => a.deallocateAll())()); 1043 assert((() pure nothrow @safe @nogc => a.empty)() == Ternary.yes); 1044 } 1045 1046 @system unittest 1047 { 1048 import std.experimental.allocator.building_blocks.region : Region, SharedRegion; 1049 enum bs = GCAllocator.alignment; 1050 1051 static void testAlloc(Allocator)(ref Allocator a) 1052 { 1053 auto b1 = a.allocate(192 * bs); 1054 assert(b1.length == 192 * bs); 1055 assert(a.allocators.length == 1); 1056 auto b2 = a.allocate(64 * bs); 1057 assert(b2.length == 64 * bs); 1058 assert(a.allocators.length == 1); 1059 auto b3 = a.allocate(192 * bs); 1060 assert(b3.length == 192 * bs); 1061 assert(a.allocators.length == 2); 1062 // Ensure deallocate inherits from parent allocators 1063 () nothrow @nogc { a.deallocate(b1); }(); 1064 b1 = a.allocate(64 * bs); 1065 assert(b1.length == 64 * bs); 1066 assert(a.allocators.length == 2); 1067 a.deallocateAll(); 1068 } 1069 1070 AllocatorList!((n) => Region!GCAllocator(256 * bs)) a1; 1071 SharedAllocatorList!((n) => SharedRegion!GCAllocator(256 * bs)) a2; 1072 1073 testAlloc(a1); 1074 testAlloc(a2); 1075 } 1076 1077 @system unittest 1078 { 1079 import std.experimental.allocator.building_blocks.ascending_page_allocator : AscendingPageAllocator; 1080 import std.experimental.allocator.mallocator : Mallocator; 1081 import std.algorithm.comparison : max; 1082 import std.typecons : Ternary; 1083 1084 static void testrw(void[] b) 1085 { 1086 ubyte* buf = cast(ubyte*) b.ptr; 1087 for (int i = 0; i < b.length; i += pageSize) 1088 { 1089 buf[i] = cast(ubyte) (i % 256); 1090 assert(buf[i] == cast(ubyte) (i % 256)); 1091 } 1092 } 1093 1094 enum numPages = 2; 1095 AllocatorList!((n) => AscendingPageAllocator(max(n, numPages * pageSize)), Mallocator) a; 1096 1097 void[] b1 = a.allocate(1); 1098 assert(b1.length == 1); 1099 b1 = a.allocate(2); 1100 assert(b1.length == 2); 1101 testrw(b1); 1102 assert(a.root.a.parent.getAvailableSize() == 0); 1103 1104 void[] b2 = a.allocate((numPages + 1) * pageSize); 1105 assert(b2.length == (numPages + 1) * pageSize); 1106 testrw(b2); 1107 1108 void[] b3 = a.allocate(3); 1109 assert(b3.length == 3); 1110 testrw(b3); 1111 1112 void[] b4 = a.allocate(0); 1113 assert(b4.length == 0); 1114 1115 assert(a.allocators.length == 3); 1116 assert(a.owns(b1) == Ternary.yes); 1117 assert(a.owns(b2) == Ternary.yes); 1118 assert(a.owns(b3) == Ternary.yes); 1119 1120 assert(a.expand(b1, pageSize - b1.length)); 1121 assert(b1.length == pageSize); 1122 assert(!a.expand(b1, 1)); 1123 assert(!a.expand(b2, 1)); 1124 1125 testrw(b1); 1126 testrw(b2); 1127 testrw(b3); 1128 1129 assert(a.deallocate(b1)); 1130 assert(a.deallocate(b2)); 1131 1132 assert(a.deallocateAll()); 1133 } 1134 1135 @system unittest 1136 { 1137 import std.experimental.allocator.building_blocks.ascending_page_allocator : 1138 AscendingPageAllocator, SharedAscendingPageAllocator; 1139 import std.experimental.allocator.mallocator : Mallocator; 1140 import std.algorithm.comparison : max; 1141 import std.typecons : Ternary; 1142 1143 enum pageSize = 4096; 1144 enum numPages = 2; 1145 1146 static void testrw(void[] b) 1147 { 1148 ubyte* buf = cast(ubyte*) b.ptr; 1149 for (int i = 0; i < b.length; i += pageSize) 1150 { 1151 buf[i] = cast(ubyte) (i % 256); 1152 assert(buf[i] == cast(ubyte) (i % 256)); 1153 } 1154 } 1155 1156 static void testAlloc(Allocator)(ref Allocator a) 1157 { 1158 void[] b1 = a.allocate(1); 1159 assert(b1.length == 1); 1160 b1 = a.allocate(2); 1161 assert(b1.length == 2); 1162 testrw(b1); 1163 1164 void[] b2 = a.allocate((numPages + 1) * pageSize); 1165 assert(b2.length == (numPages + 1) * pageSize); 1166 testrw(b2); 1167 1168 void[] b3 = a.allocate(3); 1169 assert(b3.length == 3); 1170 testrw(b3); 1171 1172 void[] b4 = a.allocate(0); 1173 assert(b4.length == 0); 1174 1175 assert(a.allocators.length == 3); 1176 assert(a.owns(b1) == Ternary.yes); 1177 assert(a.owns(b2) == Ternary.yes); 1178 assert(a.owns(b3) == Ternary.yes); 1179 1180 assert(a.expand(b1, pageSize - b1.length)); 1181 assert(b1.length == pageSize); 1182 assert(!a.expand(b1, 1)); 1183 assert(!a.expand(b2, 1)); 1184 1185 testrw(b1); 1186 testrw(b2); 1187 testrw(b3); 1188 1189 assert(a.deallocate(b1)); 1190 assert(a.deallocate(b2)); 1191 1192 const alignment = cast(uint) (70 * pageSize); 1193 b3 = a.alignedAllocate(70 * pageSize, alignment); 1194 assert(b3.length == 70 * pageSize); 1195 assert(b3.ptr.alignedAt(alignment)); 1196 testrw(b3); 1197 assert(a.allocators.length == 4); 1198 assert(a.deallocate(b3)); 1199 1200 1201 assert(a.deallocateAll()); 1202 } 1203 1204 AllocatorList!((n) => AscendingPageAllocator(max(n, numPages * pageSize)), NullAllocator) a1; 1205 SharedAllocatorList!((n) => SharedAscendingPageAllocator(max(n, numPages * pageSize)), NullAllocator) a2; 1206 } 1207 1208 @system unittest 1209 { 1210 import std.experimental.allocator.building_blocks.ascending_page_allocator : AscendingPageAllocator; 1211 import std.experimental.allocator.mallocator : Mallocator; 1212 import std.algorithm.comparison : max; 1213 import std.typecons : Ternary; 1214 1215 static void testrw(void[] b) 1216 { 1217 ubyte* buf = cast(ubyte*) b.ptr; 1218 for (int i = 0; i < b.length; i += pageSize) 1219 { 1220 buf[i] = cast(ubyte) (i % 256); 1221 assert(buf[i] == cast(ubyte) (i % 256)); 1222 } 1223 } 1224 1225 enum numPages = 5; 1226 AllocatorList!((n) => AscendingPageAllocator(max(n, numPages * pageSize)), NullAllocator) a; 1227 const alignment = cast(uint) (2 * pageSize); 1228 auto b = a.alignedAllocate(1, alignment); 1229 assert(b.length == 1); 1230 assert(a.expand(b, pageSize - 1)); 1231 assert(b.ptr.alignedAt(alignment)); 1232 assert(b.length == pageSize); 1233 1234 b = a.allocate(pageSize); 1235 assert(b.length == pageSize); 1236 assert(a.allocators.length == 1); 1237 1238 assert(a.allocate(pageSize * 5).length == pageSize * 5); 1239 assert(a.allocators.length == 2); 1240 1241 assert(a.deallocateAll()); 1242 } 1243 1244 @system unittest 1245 { 1246 import std.experimental.allocator.building_blocks.ascending_page_allocator : AscendingPageAllocator; 1247 import std.algorithm.comparison : max; 1248 1249 enum maxIter = 100; 1250 enum numPages = 10; 1251 const chunkSize = pageSize / 8; 1252 1253 AllocatorList!((n) => AscendingPageAllocator(max(n, numPages * pageSize)), NullAllocator) a; 1254 foreach (i; 0 .. maxIter) 1255 { 1256 auto b1 = a.allocate(chunkSize); 1257 assert(b1.length == chunkSize); 1258 1259 assert(a.deallocate(b1)); 1260 } 1261 1262 assert(a.deallocateAll()); 1263 } 1264 1265 @system unittest 1266 { 1267 import std.experimental.allocator.building_blocks.ascending_page_allocator : AscendingPageAllocator; 1268 import std.experimental.allocator.mallocator : Mallocator; 1269 import std.algorithm.comparison : max; 1270 import std.typecons : Ternary; 1271 1272 enum pageSize = 4096; 1273 1274 static void testrw(void[] b) 1275 { 1276 ubyte* buf = cast(ubyte*) b.ptr; 1277 for (int i = 0; i < b.length; i += pageSize) 1278 { 1279 buf[i] = cast(ubyte) (i % 256); 1280 assert(buf[i] == cast(ubyte) (i % 256)); 1281 } 1282 } 1283 1284 enum numPages = 5; 1285 AllocatorList!((n) => AscendingPageAllocator(max(n, numPages * pageSize)), NullAllocator) a; 1286 auto b = a.alignedAllocate(1, pageSize * 2); 1287 assert(b.length == 1); 1288 assert(a.expand(b, 4095)); 1289 assert(b.ptr.alignedAt(2 * 4096)); 1290 assert(b.length == 4096); 1291 1292 b = a.allocate(4096); 1293 assert(b.length == 4096); 1294 assert(a.allocators.length == 1); 1295 1296 assert(a.allocate(4096 * 5).length == 4096 * 5); 1297 assert(a.allocators.length == 2); 1298 1299 assert(a.deallocateAll()); 1300 } 1301 1302 @system unittest 1303 { 1304 import std.experimental.allocator.building_blocks.region : SharedRegion; 1305 import core.thread : ThreadGroup; 1306 import std.algorithm.comparison : max; 1307 1308 enum numThreads = 10; 1309 SharedAllocatorList!((n) => SharedRegion!(GCAllocator)(new ubyte[max(n, 1024)])) a; 1310 1311 void fun() 1312 { 1313 void[] b1 = a.allocate(1024); 1314 assert(b1.length == 1024); 1315 1316 void[] b2 = a.alignedAllocate(1024, 1024); 1317 assert(b2.length == 1024); 1318 assert(b2.ptr.alignedAt(1024)); 1319 1320 assert(a.deallocate(b1)); 1321 assert(a.deallocate(b2)); 1322 } 1323 1324 auto tg = new ThreadGroup; 1325 foreach (i; 0 .. numThreads) 1326 { 1327 tg.create(&fun); 1328 } 1329 tg.joinAll(); 1330 1331 assert(a.deallocateAll()); 1332 } 1333 1334 @system unittest 1335 { 1336 import std.experimental.allocator.mallocator : Mallocator; 1337 import std.experimental.allocator.building_blocks.ascending_page_allocator : SharedAscendingPageAllocator; 1338 import core.thread : ThreadGroup; 1339 import std.algorithm.comparison : max; 1340 1341 enum numThreads = 100; 1342 enum pageSize = 4096; 1343 enum numPages = 10; 1344 SharedAllocatorList!((n) => SharedAscendingPageAllocator(max(n, pageSize * numPages)), Mallocator) a; 1345 1346 void fun() 1347 { 1348 void[] b1 = a.allocate(512); 1349 assert(b1.length == 512); 1350 assert(a.expand(b1, 512)); 1351 assert(b1.length == 1024); 1352 1353 void[] b2 = a.alignedAllocate(1024, 4096); 1354 assert(b2.length == 1024); 1355 assert(b2.ptr.alignedAt(1024)); 1356 1357 assert(a.deallocate(b1)); 1358 assert(a.deallocate(b2)); 1359 } 1360 1361 auto tg = new ThreadGroup; 1362 foreach (i; 0 .. numThreads) 1363 { 1364 tg.create(&fun); 1365 } 1366 tg.joinAll(); 1367 1368 assert(a.deallocateAll()); 1369 }