2 * This module contains the garbage collector implementation.
4 * Copyright: Copyright (C) 2001-2007 Digital Mars, www.digitalmars.com.
7 * This software is provided 'as-is', without any express or implied
8 * warranty. In no event will the authors be held liable for any damages
9 * arising from the use of this software.
11 * Permission is granted to anyone to use this software for any purpose,
12 * including commercial applications, and to alter it and redistribute it
13 * freely, in both source and binary form, subject to the following
16 * o The origin of this software must not be misrepresented; you must not
17 * claim that you wrote the original software. If you use this software
18 * in a product, an acknowledgment in the product documentation would be
19 * appreciated but is not required.
20 * o Altered source versions must be plainly marked as such, and must not
21 * be misrepresented as being the original software.
22 * o This notice may not be removed or altered from any source
24 * Authors: Walter Bright, David Friedman, Sean Kelly
29 // D Programming Language Garbage Collector implementation
31 /************** Debugging ***************************/
33 //debug = COLLECT_PRINTF; // turn on printf's
34 //debug = PTRCHECK; // more pointer checking
35 //debug = PTRCHECK2; // thorough but slow pointer checking
37 /*************** Configuration *********************/
39 version = STACKGROWSDOWN; // growing the stack means subtracting from the stack pointer
40 // (use for Intel X86 CPUs)
41 // else growing the stack means adding to the stack pointer
43 /***************************************************/
45 import rt.gc.cdgc.bits: GCBits;
46 import rt.gc.cdgc.stats: GCStats, Stats;
47 import dynarray = rt.gc.cdgc.dynarray;
48 import os = rt.gc.cdgc.os;
49 import opts = rt.gc.cdgc.opts;
51 import cstdlib = tango.stdc.stdlib;
52 import cstring = tango.stdc.string;
53 import cstdio = tango.stdc.stdio;
56 * This is a small optimization that proved it's usefulness. For small chunks
57 * or memory memset() seems to be slower (probably because of the call) that
58 * simply doing a simple loop to set the memory.
60 void memset(void* dst, int c, size_t n)
62 // This number (32) has been determined empirically
64 cstring.memset(dst, c, n);
67 auto p = cast(ubyte*)(dst);
74 // BUG: The following import will likely not work, since the gcc
75 // subdirectory is elsewhere. Instead, perhaps the functions
76 // could be declared directly or some other resolution could
78 static import gcc.builtins; // for __builtin_unwind_int
88 package enum BlkAttr : uint
90 FINALIZE = 0b0000_0001,
91 NO_SCAN = 0b0000_0010,
92 NO_MOVE = 0b0000_0100,
93 ALL_BITS = 0b1111_1111
96 package bool has_pointermap(uint attrs)
98 return !opts.options.conservative && !(attrs & BlkAttr.NO_SCAN);
103 alias void delegate(Object) DEvent;
104 alias void delegate( void*, void* ) scanFn;
105 enum { OPFAIL = ~cast(size_t)0 }
109 version (DigitalMars) version(OSX)
110 oid _d_osx_image_init();
112 void* rt_stackBottom();
114 void rt_finalize( void* p, bool det = true );
115 void rt_attachDisposeEvent(Object h, DEvent e);
116 bool rt_detachDisposeEvent(Object h, DEvent e);
117 void rt_scanStaticData( scanFn scan );
120 bool thread_needLock();
121 void thread_suspendAll();
122 void thread_resumeAll();
123 void thread_scanAll( scanFn fn, void* curStackTop = null );
125 void onOutOfMemoryError();
133 POOLSIZE = (4096*256),
147 B_PAGE, // start of large alloc
148 B_PAGEPLUS, // continuation of large alloc
168 int opCmp(in Range other)
170 if (pbot < other.pbot)
173 return cast(int)(pbot > other.pbot);
178 const uint binsize[B_MAX] = [ 16,32,64,128,256,512,1024,2048,4096 ];
179 const uint notbinsize[B_MAX] = [ ~(16u-1),~(32u-1),~(64u-1),~(128u-1),~(256u-1),
180 ~(512u-1),~(1024u-1),~(2048u-1),~(4096u-1) ];
183 /* ============================ GC =============================== */
186 class GCLock {} // just a dummy so we can get a global lock
197 // !=0 means don't scan stack
202 /// Turn off collections if > 0
205 /// min(pool.baseAddr)
207 /// max(pool.topAddr)
210 /// Free list for each size
211 List*[B_MAX] free_list;
213 dynarray.DynArray!(void*) roots;
214 dynarray.DynArray!(Range) ranges;
215 dynarray.DynArray!(Pool*) pools;
220 // call locked if necessary
221 private T locked(T, alias Code)()
223 if (thread_needLock())
224 synchronized (gc.lock) return Code();
233 assert (gc !is null);
235 for (size_t i = 0; i < gc.pools.length; i++) {
236 Pool* pool = gc.pools[i];
239 assert(gc.min_addr == pool.baseAddr);
240 if (i + 1 < gc.pools.length)
241 assert(*pool < *gc.pools[i + 1]);
242 else if (i + 1 == gc.pools.length)
243 assert(gc.max_addr == pool.topAddr);
246 gc.roots.Invariant();
247 gc.ranges.Invariant();
249 for (size_t i = 0; i < gc.ranges.length; i++) {
250 assert(gc.ranges[i].pbot);
251 assert(gc.ranges[i].ptop);
252 assert(gc.ranges[i].pbot <= gc.ranges[i].ptop);
255 for (size_t i = 0; i < B_PAGE; i++) {
256 for (List *list = gc.free_list[i]; list; list = list.next) {
257 auto pool = list.pool;
258 assert (pool !is null);
259 auto p = cast(byte*) list;
260 assert (p >= pool.baseAddr);
261 assert (p < pool.topAddr);
262 assert (pool.freebits.test((p - pool.baseAddr) / 16));
271 * Find Pool that pointer is in.
272 * Return null if not in a Pool.
273 * Assume pools is sorted.
275 Pool* findPool(void* p)
277 if (p < gc.min_addr || p >= gc.max_addr)
279 if (gc.pools.length == 0)
281 if (gc.pools.length == 1)
283 /// The pooltable[] is sorted by address, so do a binary search
285 size_t high = gc.pools.length - 1;
286 while (low <= high) {
287 size_t mid = (low + high) / 2;
288 auto pool = gc.pools[mid];
289 if (p < pool.baseAddr)
291 else if (p >= pool.topAddr)
302 * Determine the base address of the block containing p. If p is not a gc
303 * allocated pointer, return null.
305 BlkInfo getInfo(void* p)
308 Pool* pool = findPool(p);
312 info.base = pool.findBase(p);
313 if (info.base is null)
315 info.size = pool.findSize(info.base);
316 size_t bit_i = (info.base - pool.baseAddr) / 16;
317 info.attr = getAttr(pool, bit_i);
318 if (has_pointermap(info.attr)) {
319 info.size -= size_t.sizeof; // PointerMap bitmask
320 // Points to the PointerMap bitmask pointer, not user data
321 if (p >= (info.base + info.size)) {
325 if (opts.options.sentinel) {
326 info.base = sentinel_add(info.base);
327 // points to sentinel data, not user data
328 if (p < info.base || p >= sentinel_post(info.base))
330 info.size -= SENTINEL_EXTRA;
337 * Compute bin for size.
339 Bins findBin(size_t size)
383 * Allocate a new pool of at least size bytes.
384 * Sort it into pools.
385 * Mark all memory in the pool as B_FREE.
386 * Return the actual number of bytes reserved or 0 on error.
388 size_t reserve(size_t size)
391 size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
392 Pool* pool = newPool(npages);
396 return pool.npages * PAGESIZE;
401 * Minimizes physical memory usage by returning free pools to the OS.
409 for (n = 0; n < gc.pools.length; n++)
412 for (pn = 0; pn < pool.npages; pn++)
414 if (cast(Bins)pool.pagetable[pn] != B_FREE)
417 if (pn < pool.npages)
421 gc.pools.remove_at(n);
424 gc.min_addr = gc.pools[0].baseAddr;
425 gc.max_addr = gc.pools[gc.pools.length - 1].topAddr;
430 * Allocate a chunk of memory that is larger than a page.
431 * Return null if out of memory.
433 void* bigAlloc(size_t size, out Pool* pool)
442 npages = (size + PAGESIZE - 1) / PAGESIZE;
446 // This code could use some refinement when repeatedly
447 // allocating very large arrays.
449 for (n = 0; n < gc.pools.length; n++)
452 pn = pool.allocPages(npages);
467 freedpages = fullcollectshell();
468 if (freedpages >= gc.pools.length * ((POOLSIZE / PAGESIZE) / 4))
473 // Release empty pools to prevent bloat
476 pool = newPool(npages);
482 pn = pool.allocPages(npages);
483 assert(pn != OPFAIL);
486 // Release empty pools to prevent bloat
489 pool = newPool(npages);
492 pn = pool.allocPages(npages);
493 assert(pn != OPFAIL);
503 size_t bit_i = pn * (PAGESIZE / 16);
504 pool.freebits.clear(bit_i);
505 pool.pagetable[pn] = B_PAGE;
507 memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
508 p = pool.baseAddr + pn * PAGESIZE;
509 memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
510 if (opts.options.mem_stomp)
511 memset(p, 0xF1, size);
515 return null; // let mallocNoSync handle the error
520 * Allocate a new pool with at least npages in it.
521 * Sort it into pools.
522 * Return null if failed.
524 Pool *newPool(size_t npages)
526 // Minimum of POOLSIZE
527 if (npages < POOLSIZE/PAGESIZE)
528 npages = POOLSIZE/PAGESIZE;
529 else if (npages > POOLSIZE/PAGESIZE)
531 // Give us 150% of requested size, so there's room to extend
532 auto n = npages + (npages >> 1);
533 if (n < size_t.max/PAGESIZE)
537 // Allocate successively larger pools up to 8 megs
540 size_t n = gc.pools.length;
542 n = 8; // cap pool size at 8 megs
543 n *= (POOLSIZE / PAGESIZE);
548 auto pool = cast(Pool*) cstdlib.calloc(1, Pool.sizeof);
551 pool.initialize(npages);
558 auto inserted_pool = *gc.pools.insert_sorted!("*a < *b")(pool);
559 if (inserted_pool is null) {
563 assert (inserted_pool is pool);
564 gc.min_addr = gc.pools[0].baseAddr;
565 gc.max_addr = gc.pools[gc.pools.length - 1].topAddr;
571 * Allocate a page of bin's.
575 int allocPage(Bins bin)
580 for (size_t n = 0; n < gc.pools.length; n++)
583 pn = pool.allocPages(1);
590 pool.pagetable[pn] = cast(ubyte)bin;
592 // Convert page to free list
593 size_t size = binsize[bin];
594 auto list_head = &gc.free_list[bin];
596 byte* p = pool.baseAddr + pn * PAGESIZE;
597 byte* ptop = p + PAGESIZE;
598 size_t bit_i = pn * (PAGESIZE / 16);
599 size_t bit_stride = size / 16;
600 for (; p < ptop; p += size, bit_i += bit_stride)
602 List* l = cast(List *) p;
606 // TODO: maybe this can be optimized to be set in chunks
607 pool.freebits.set(bit_i);
614 * Search a range of memory values and mark any pointers into the GC pool using
615 * type information (bitmask of pointer locations).
617 void mark_range(void *pbot, void *ptop, size_t* pm_bitmask)
619 // TODO: make our own assert because assert uses the GC
620 assert (pbot <= ptop);
622 const BITS_PER_WORD = size_t.sizeof * 8;
624 void **p1 = cast(void **)pbot;
625 void **p2 = cast(void **)ptop;
627 bool changes = false;
629 size_t type_size = pm_bitmask[0];
630 size_t* pm_bits = pm_bitmask + 1;
631 bool has_type_info = type_size != 1 || pm_bits[0] != 1 || pm_bits[1] != 0;
633 //printf("marking range: %p -> %p\n", pbot, ptop);
634 for (; p1 + type_size <= p2; p1 += type_size) {
635 for (size_t n = 0; n < type_size; n++) {
636 // scan bit set for this word
638 !(pm_bits[n / BITS_PER_WORD] & (1 << (n % BITS_PER_WORD))))
643 if (p < gc.min_addr || p >= gc.max_addr)
646 if ((cast(size_t)p & ~(PAGESIZE-1)) == pcache)
649 Pool* pool = findPool(p);
652 size_t offset = cast(size_t)(p - pool.baseAddr);
654 size_t pn = offset / PAGESIZE;
655 Bins bin = cast(Bins)pool.pagetable[pn];
657 // Cache B_PAGE, B_PAGEPLUS and B_FREE lookups
659 pcache = cast(size_t)p & ~(PAGESIZE-1);
661 // Adjust bit to be at start of allocated memory block
663 bit_i = (offset & notbinsize[bin]) / 16;
664 else if (bin == B_PAGEPLUS)
670 while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
671 bit_i = pn * (PAGESIZE / 16);
673 else // Don't mark bits in B_FREE pages
676 if (!pool.mark.test(bit_i))
678 pool.mark.set(bit_i);
679 if (!pool.noscan.test(bit_i))
681 pool.scan.set(bit_i);
689 gc.any_changes = true;
693 * Return number of full pages free'd.
695 size_t fullcollectshell()
697 gc.stats.collection_started();
699 gc.stats.collection_finished();
701 // The purpose of the 'shell' is to ensure all the registers
702 // get put on the stack so they'll be scanned
707 gcc.builtins.__builtin_unwind_init();
714 uint eax,ecx,edx,ebx,ebp,esi,edi;
727 else version (X86_64)
729 ulong rax,rbx,rcx,rdx,rbp,rsi,rdi,r8,r9,r10,r11,r12,r13,r14,r15;
752 static assert( false, "Architecture not supported." );
763 result = fullcollect(sp);
786 size_t fullcollect(void *stackTop)
788 debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n");
790 // we always need to stop the world to make threads save the CPU registers
791 // in the stack and prepare themselves for thread_scanAll()
793 gc.stats.world_stopped();
795 if (opts.options.fork) {
796 cstdio.fflush(null); // avoid duplicated FILE* output
797 os.pid_t child_pid = os.fork();
798 assert (child_pid != -1); // don't accept errors in non-release mode
800 case -1: // if fork() fails, fallback to stop-the-world
801 opts.options.fork = false;
803 case 0: // child process (i.e. the collectors mark phase)
806 break; // bogus, will never reach here
807 default: // parent process (i.e. the mutator)
808 // start the world again and wait for the mark phase to finish
810 gc.stats.world_started();
812 os.pid_t wait_pid = os.waitpid(child_pid, &status, 0);
813 assert (wait_pid == child_pid);
819 // if we reach here, we are using the standard stop-the-world collection
822 gc.stats.world_started();
831 void mark(void *stackTop)
833 debug(COLLECT_PRINTF) printf("\tmark()\n");
835 gc.any_changes = false;
837 for (size_t n = 0; n < gc.pools.length; n++)
839 Pool* pool = gc.pools[n];
840 pool.mark.copy(&pool.freebits);
844 /// Marks a range of memory in conservative mode.
845 void mark_conservative_range(void* pbot, void* ptop)
847 mark_range(pbot, ptop, PointerMap.init.bits.ptr);
850 rt_scanStaticData(&mark_conservative_range);
854 // Scan stacks and registers for each paused thread
855 thread_scanAll(&mark_conservative_range, stackTop);
859 debug(COLLECT_PRINTF) printf("scan roots[]\n");
860 mark_conservative_range(gc.roots.ptr, gc.roots.ptr + gc.roots.length);
863 debug(COLLECT_PRINTF) printf("scan ranges[]\n");
864 for (size_t n = 0; n < gc.ranges.length; n++)
866 debug(COLLECT_PRINTF) printf("\t%x .. %x\n", gc.ranges[n].pbot, gc.ranges[n].ptop);
867 mark_conservative_range(gc.ranges[n].pbot, gc.ranges[n].ptop);
870 debug(COLLECT_PRINTF) printf("\tscan heap\n");
871 while (gc.any_changes)
873 gc.any_changes = false;
874 for (size_t n = 0; n < gc.pools.length; n++)
880 Pool* pool = gc.pools[n];
882 bbase = pool.scan.base();
883 btop = bbase + pool.scan.nwords;
884 for (b = bbase; b < btop;)
900 o = pool.baseAddr + (b - bbase) * 32 * 16;
901 if (!(bitm & 0xFFFF))
906 for (; bitm; o += 16, bitm >>= 1)
911 pn = cast(size_t)(o - pool.baseAddr) / PAGESIZE;
912 bin = cast(Bins)pool.pagetable[pn];
914 if (opts.options.conservative)
915 mark_conservative_range(o, o + binsize[bin]);
917 auto end_of_blk = cast(size_t**)(o +
918 binsize[bin] - size_t.sizeof);
919 size_t* pm_bitmask = *end_of_blk;
920 mark_range(o, end_of_blk, pm_bitmask);
923 else if (bin == B_PAGE || bin == B_PAGEPLUS)
925 if (bin == B_PAGEPLUS)
927 while (pool.pagetable[pn - 1] != B_PAGE)
931 while (pn + u < pool.npages &&
932 pool.pagetable[pn + u] == B_PAGEPLUS)
935 size_t blk_size = u * PAGESIZE;
936 if (opts.options.conservative)
937 mark_conservative_range(o, o + blk_size);
939 auto end_of_blk = cast(size_t**)(o + blk_size -
941 size_t* pm_bitmask = *end_of_blk;
942 mark_range(o, end_of_blk, pm_bitmask);
957 // Free up everything not marked
958 debug(COLLECT_PRINTF) printf("\tsweep\n");
961 size_t freedpages = 0;
963 for (size_t n = 0; n < gc.pools.length; n++)
965 Pool* pool = gc.pools[n];
967 uint* bbase = pool.mark.base();
969 for (pn = 0; pn < pool.npages; pn++, bbase += PAGESIZE / (32 * 16))
971 Bins bin = cast(Bins)pool.pagetable[pn];
975 auto size = binsize[bin];
976 byte* p = pool.baseAddr + pn * PAGESIZE;
977 byte* ptop = p + PAGESIZE;
978 size_t bit_i = pn * (PAGESIZE/16);
979 size_t bit_stride = size / 16;
981 version(none) // BUG: doesn't work because freebits() must also be cleared
983 // If free'd entire page
984 if (bbase[0] == 0 && bbase[1] == 0 && bbase[2] == 0 &&
985 bbase[3] == 0 && bbase[4] == 0 && bbase[5] == 0 &&
986 bbase[6] == 0 && bbase[7] == 0)
988 for (; p < ptop; p += size, bit_i += bit_stride)
990 if (pool.finals.testClear(bit_i)) {
991 if (opts.options.sentinel)
992 rt_finalize(sentinel_add(p), false/*gc.no_stack > 0*/);
994 rt_finalize(p, false/*gc.no_stack > 0*/);
996 clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
998 if (opts.options.mem_stomp)
999 memset(p, 0xF3, size);
1001 pool.pagetable[pn] = B_FREE;
1006 for (; p < ptop; p += size, bit_i += bit_stride)
1008 if (!pool.mark.test(bit_i))
1010 if (opts.options.sentinel)
1011 sentinel_Invariant(sentinel_add(p));
1013 pool.freebits.set(bit_i);
1014 if (pool.finals.testClear(bit_i)) {
1015 if (opts.options.sentinel)
1016 rt_finalize(sentinel_add(p), false/*gc.no_stack > 0*/);
1018 rt_finalize(p, false/*gc.no_stack > 0*/);
1020 clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
1022 if (opts.options.mem_stomp)
1023 memset(p, 0xF3, size);
1029 else if (bin == B_PAGE)
1031 size_t bit_stride = PAGESIZE / 16;
1032 size_t bit_i = pn * bit_stride;
1033 if (!pool.mark.test(bit_i))
1035 byte *p = pool.baseAddr + pn * PAGESIZE;
1036 if (opts.options.sentinel)
1037 sentinel_Invariant(sentinel_add(p));
1038 if (pool.finals.testClear(bit_i)) {
1039 if (opts.options.sentinel)
1040 rt_finalize(sentinel_add(p), false/*gc.no_stack > 0*/);
1042 rt_finalize(p, false/*gc.no_stack > 0*/);
1044 clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
1046 debug(COLLECT_PRINTF) printf("\tcollecting big %x\n", p);
1047 pool.pagetable[pn] = B_FREE;
1048 pool.freebits.set(bit_i);
1050 if (opts.options.mem_stomp)
1051 memset(p, 0xF3, PAGESIZE);
1052 while (pn + 1 < pool.npages && pool.pagetable[pn + 1] == B_PAGEPLUS)
1055 pool.pagetable[pn] = B_FREE;
1056 bit_i += bit_stride;
1057 pool.freebits.set(bit_i);
1060 if (opts.options.mem_stomp)
1063 memset(p, 0xF3, PAGESIZE);
1072 gc.free_list[] = null;
1074 // Free complete pages, rebuild free list
1075 debug(COLLECT_PRINTF) printf("\tfree complete pages\n");
1076 size_t recoveredpages = 0;
1077 for (size_t n = 0; n < gc.pools.length; n++)
1079 Pool* pool = gc.pools[n];
1080 for (size_t pn = 0; pn < pool.npages; pn++)
1082 Bins bin = cast(Bins)pool.pagetable[pn];
1088 size_t size = binsize[bin];
1089 size_t bit_stride = size / 16;
1090 size_t bit_base = pn * (PAGESIZE / 16);
1091 size_t bit_top = bit_base + (PAGESIZE / 16);
1095 for (; bit_i < bit_top; bit_i += bit_stride)
1097 if (!pool.freebits.test(bit_i))
1100 // we don't need to explicitly set the freebit here because all
1101 // freebits were already set, including the bit used for the
1102 // whole freed page (bit_base).
1103 pool.pagetable[pn] = B_FREE;
1108 p = pool.baseAddr + pn * PAGESIZE;
1109 for (u = 0; u < PAGESIZE; u += size)
1111 bit_i = bit_base + u / 16;
1112 if (pool.freebits.test(bit_i))
1114 assert ((p+u) >= pool.baseAddr);
1115 assert ((p+u) < pool.topAddr);
1116 List* list = cast(List*) (p + u);
1117 // avoid unnecesary writes (it really saves time)
1118 if (list.next != gc.free_list[bin])
1119 list.next = gc.free_list[bin];
1120 if (list.pool != pool)
1122 gc.free_list[bin] = list;
1129 debug(COLLECT_PRINTF) printf("recovered pages = %d\n", recoveredpages);
1130 debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedpages, gc.pools.length);
1132 return freedpages + recoveredpages;
1139 uint getAttr(Pool* pool, size_t bit_i)
1147 if (pool.finals.test(bit_i))
1148 attrs |= BlkAttr.FINALIZE;
1149 if (pool.noscan.test(bit_i))
1150 attrs |= BlkAttr.NO_SCAN;
1151 // if (pool.nomove.test(bit_i))
1152 // attrs |= BlkAttr.NO_MOVE;
1160 void setAttr(Pool* pool, size_t bit_i, uint mask)
1167 if (mask & BlkAttr.FINALIZE)
1169 pool.finals.set(bit_i);
1171 if (mask & BlkAttr.NO_SCAN)
1173 pool.noscan.set(bit_i);
1175 // if (mask & BlkAttr.NO_MOVE)
1177 // if (!pool.nomove.nbits)
1178 // pool.nomove.alloc(pool.mark.nbits);
1179 // pool.nomove.set(bit_i);
1187 void clrAttr(Pool* pool, size_t bit_i, uint mask)
1194 if (mask & BlkAttr.FINALIZE)
1195 pool.finals.clear(bit_i);
1196 if (mask & BlkAttr.NO_SCAN)
1197 pool.noscan.clear(bit_i);
1198 // if (mask & BlkAttr.NO_MOVE && pool.nomove.nbits)
1199 // pool.nomove.clear(bit_i);
1207 gc.stack_bottom = cast(char*)&dummy;
1208 opts.parse(cstdlib.getenv("D_GC_OPTS"));
1209 // If we are going to fork, make sure we have the needed OS support
1210 if (opts.options.fork)
1211 opts.options.fork = os.HAVE_SHARED && os.HAVE_FORK;
1212 gc.lock = GCLock.classinfo;
1214 setStackBottom(rt_stackBottom());
1215 gc.stats = Stats(gc);
1222 private void *malloc(size_t size, uint attrs, size_t* pm_bitmask)
1226 gc.stats.malloc_started(size, attrs, pm_bitmask);
1228 gc.stats.malloc_finished(p);
1233 if (opts.options.sentinel)
1234 size += SENTINEL_EXTRA;
1236 bool has_pm = has_pointermap(attrs);
1238 size += size_t.sizeof;
1241 // Cache previous binsize lookup - Dave Fladebo.
1242 static size_t lastsize = -1;
1243 static Bins lastbin;
1244 if (size == lastsize)
1248 bin = findBin(size);
1254 size_t bit_i = void;
1255 size_t capacity = void; // to figure out where to store the bitmask
1258 p = gc.free_list[bin];
1261 if (!allocPage(bin) && !gc.disabled) // try to find a new page
1263 if (!thread_needLock())
1265 /* Then we haven't locked it yet. Be sure
1266 * and gc.lock for a collection, since a finalizer
1267 * may start a new thread.
1269 synchronized (gc.lock)
1274 else if (!fullcollectshell()) // collect to find a new page
1279 if (!gc.free_list[bin] && !allocPage(bin))
1281 newPool(1); // allocate new pool to find a new page
1282 // TODO: hint allocPage() to use the pool we just created
1283 int result = allocPage(bin);
1285 onOutOfMemoryError();
1287 p = gc.free_list[bin];
1289 capacity = binsize[bin];
1291 // Return next item from free list
1292 List* list = cast(List*) p;
1293 assert ((cast(byte*)list) >= list.pool.baseAddr);
1294 assert ((cast(byte*)list) < list.pool.topAddr);
1295 gc.free_list[bin] = list.next;
1297 bit_i = (p - pool.baseAddr) / 16;
1298 assert (pool.freebits.test(bit_i));
1299 pool.freebits.clear(bit_i);
1300 if (!(attrs & BlkAttr.NO_SCAN))
1301 memset(p + size, 0, capacity - size);
1302 if (opts.options.mem_stomp)
1303 memset(p, 0xF0, size);
1307 p = bigAlloc(size, pool);
1309 onOutOfMemoryError();
1310 assert (pool !is null);
1311 // Round the size up to the number of pages needed to store it
1312 size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
1313 capacity = npages * PAGESIZE;
1314 bit_i = (p - pool.baseAddr) / 16;
1317 // Store the bit mask AFTER SENTINEL_POST
1318 // TODO: store it BEFORE, so the bitmask is protected too
1320 auto end_of_blk = cast(size_t**)(p + capacity - size_t.sizeof);
1321 *end_of_blk = pm_bitmask;
1322 size -= size_t.sizeof;
1325 if (opts.options.sentinel) {
1326 size -= SENTINEL_EXTRA;
1327 p = sentinel_add(p);
1328 sentinel_init(p, size);
1332 setAttr(pool, bit_i, attrs);
1333 assert (bin >= B_PAGE || !pool.freebits.test(bit_i));
1343 private void *calloc(size_t size, uint attrs, size_t* pm_bitmask)
1347 void *p = malloc(size, attrs, pm_bitmask);
1356 private void *realloc(void *p, size_t size, uint attrs,
1369 p = malloc(size, attrs, pm_bitmask);
1373 Pool* pool = findPool(p);
1377 // Set or retrieve attributes as appropriate
1378 auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
1380 clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
1381 setAttr(pool, bit_i, attrs);
1384 attrs = getAttr(pool, bit_i);
1386 void* blk_base_addr = pool.findBase(p);
1387 size_t blk_size = pool.findSize(p);
1388 bool has_pm = has_pointermap(attrs);
1389 size_t pm_bitmask_size = 0;
1391 pm_bitmask_size = size_t.sizeof;
1392 // Retrieve pointer map bit mask if appropriate
1393 if (pm_bitmask is null) {
1394 auto end_of_blk = cast(size_t**)(blk_base_addr +
1395 blk_size - size_t.sizeof);
1396 pm_bitmask = *end_of_blk;
1400 if (opts.options.sentinel)
1402 sentinel_Invariant(p);
1403 size_t sentinel_stored_size = *sentinel_size(p);
1404 if (sentinel_stored_size != size)
1406 void* p2 = malloc(size, attrs, pm_bitmask);
1407 if (sentinel_stored_size < size)
1408 size = sentinel_stored_size;
1409 cstring.memcpy(p2, p, size);
1415 size += pm_bitmask_size;
1416 if (blk_size >= PAGESIZE && size >= PAGESIZE)
1418 auto psz = blk_size / PAGESIZE;
1419 auto newsz = (size + PAGESIZE - 1) / PAGESIZE;
1423 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
1428 if (opts.options.mem_stomp)
1429 memset(p + size - pm_bitmask_size, 0xF2,
1430 blk_size - size - pm_bitmask_size);
1431 pool.freePages(pagenum + newsz, psz - newsz);
1432 auto new_blk_size = (PAGESIZE * newsz);
1433 // update the size cache, assuming that is very likely the
1434 // size of this block will be queried in the near future
1435 pool.update_cache(p, new_blk_size);
1437 auto end_of_blk = cast(size_t**)(blk_base_addr +
1438 new_blk_size - pm_bitmask_size);
1439 *end_of_blk = pm_bitmask;
1443 else if (pagenum + newsz <= pool.npages)
1445 // Attempt to expand in place
1446 for (size_t i = pagenum + psz; 1;)
1448 if (i == pagenum + newsz)
1450 if (opts.options.mem_stomp)
1451 memset(p + blk_size - pm_bitmask_size,
1452 0xF0, size - blk_size
1454 memset(pool.pagetable + pagenum +
1455 psz, B_PAGEPLUS, newsz - psz);
1456 auto new_blk_size = (PAGESIZE * newsz);
1457 // update the size cache, assuming that is very
1458 // likely the size of this block will be queried in
1460 pool.update_cache(p, new_blk_size);
1462 auto end_of_blk = cast(size_t**)(
1463 blk_base_addr + new_blk_size -
1465 *end_of_blk = pm_bitmask;
1469 if (i == pool.npages)
1473 if (pool.pagetable[i] != B_FREE)
1479 // if new size is bigger or less than half
1480 if (blk_size < size || blk_size > size * 2)
1482 size -= pm_bitmask_size;
1483 blk_size -= pm_bitmask_size;
1484 void* p2 = malloc(size, attrs, pm_bitmask);
1485 if (blk_size < size)
1487 cstring.memcpy(p2, p, size);
1497 * Attempt to in-place enlarge the memory block pointed to by p by at least
1498 * min_size beyond its current capacity, up to a maximum of max_size. This
1499 * does not attempt to move the memory block (like realloc() does).
1502 * 0 if could not extend p,
1503 * total size of entire memory block if successful.
1505 private size_t extend(void* p, size_t minsize, size_t maxsize)
1508 assert( minsize <= maxsize );
1512 if (opts.options.sentinel)
1515 Pool* pool = findPool(p);
1519 // Retrieve attributes
1520 auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
1521 uint attrs = getAttr(pool, bit_i);
1523 void* blk_base_addr = pool.findBase(p);
1524 size_t blk_size = pool.findSize(p);
1525 bool has_pm = has_pointermap(attrs);
1526 size_t* pm_bitmask = null;
1527 size_t pm_bitmask_size = 0;
1529 pm_bitmask_size = size_t.sizeof;
1530 // Retrieve pointer map bit mask
1531 auto end_of_blk = cast(size_t**)(blk_base_addr +
1532 blk_size - size_t.sizeof);
1533 pm_bitmask = *end_of_blk;
1535 minsize += size_t.sizeof;
1536 maxsize += size_t.sizeof;
1539 if (blk_size < PAGESIZE)
1540 return 0; // cannot extend buckets
1542 auto psz = blk_size / PAGESIZE;
1543 auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE;
1544 auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE;
1546 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
1549 for (sz = 0; sz < maxsz; sz++)
1551 auto i = pagenum + psz + sz;
1552 if (i == pool.npages)
1554 if (pool.pagetable[i] != B_FREE)
1564 size_t new_size = (psz + sz) * PAGESIZE;
1566 if (opts.options.mem_stomp)
1567 memset(p + blk_size - pm_bitmask_size, 0xF0,
1568 new_size - blk_size - pm_bitmask_size);
1569 memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
1572 // update the size cache, assuming that is very likely the size of this
1573 // block will be queried in the near future
1574 pool.update_cache(p, new_size);
1577 new_size -= size_t.sizeof;
1578 auto end_of_blk = cast(size_t**)(blk_base_addr + new_size);
1579 *end_of_blk = pm_bitmask;
1588 private void free(void *p)
1597 // Find which page it is in
1599 if (!pool) // if not one of ours
1601 if (opts.options.sentinel) {
1602 sentinel_Invariant(p);
1603 p = sentinel_sub(p);
1605 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1606 bit_i = cast(size_t)(p - pool.baseAddr) / 16;
1607 clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
1609 bin = cast(Bins)pool.pagetable[pagenum];
1610 if (bin == B_PAGE) // if large alloc
1615 pool.freebits.set(bit_i);
1616 size_t bit_stride = PAGESIZE / 16;
1617 while (++n < pool.npages && pool.pagetable[n] == B_PAGEPLUS) {
1619 bit_i += bit_stride;
1620 pool.freebits.set(bit_i);
1622 if (opts.options.mem_stomp)
1623 memset(p, 0xF2, npages * PAGESIZE);
1624 pool.freePages(pagenum, npages);
1625 // just in case we were caching this pointer
1626 pool.clear_cache(p);
1631 List* list = cast(List*) p;
1633 if (opts.options.mem_stomp)
1634 memset(p, 0xF2, binsize[bin]);
1636 list.next = gc.free_list[bin];
1638 gc.free_list[bin] = list;
1639 pool.freebits.set(bit_i);
1645 * Determine the allocated size of pointer p. If p is an interior pointer
1646 * or not a gc allocated pointer, return 0.
1648 private size_t sizeOf(void *p)
1652 if (opts.options.sentinel)
1653 p = sentinel_sub(p);
1655 Pool* pool = findPool(p);
1659 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
1660 uint attrs = getAttr(pool, biti);
1662 size_t size = pool.findSize(p);
1663 size_t pm_bitmask_size = 0;
1664 if (has_pointermap(attrs))
1665 pm_bitmask_size = size_t.sizeof;
1667 if (opts.options.sentinel) {
1668 // Check for interior pointer
1670 // 1) size is a power of 2 for less than PAGESIZE values
1671 // 2) base of memory pool is aligned on PAGESIZE boundary
1672 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
1674 return size - SENTINEL_EXTRA - pm_bitmask_size;
1677 if (p == gc.p_cache)
1678 return gc.size_cache;
1680 // Check for interior pointer
1682 // 1) size is a power of 2 for less than PAGESIZE values
1683 // 2) base of memory pool is aligned on PAGESIZE boundary
1684 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
1688 gc.size_cache = size - pm_bitmask_size;
1690 return gc.size_cache;
1696 * Verify that pointer p:
1697 * 1) belongs to this memory pool
1698 * 2) points to the start of an allocated piece of memory
1699 * 3) is not on a free list
1701 private void checkNoSync(void *p)
1705 if (opts.options.sentinel)
1706 sentinel_Invariant(p);
1714 if (opts.options.sentinel)
1715 p = sentinel_sub(p);
1718 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1719 bin = cast(Bins)pool.pagetable[pagenum];
1720 assert(bin <= B_PAGE);
1721 size = binsize[bin];
1722 assert((cast(size_t)p & (size - 1)) == 0);
1728 // Check that p is not on a free list
1729 for (List* list = gc.free_list[bin]; list; list = list.next)
1731 assert(cast(void*)list != p);
1742 private void setStackBottom(void *p)
1744 version (STACKGROWSDOWN)
1746 //p = (void *)((uint *)p + 4);
1747 if (p > gc.stack_bottom)
1749 gc.stack_bottom = p;
1754 //p = (void *)((uint *)p - 4);
1755 if (p < gc.stack_bottom)
1757 gc.stack_bottom = cast(char*)p;
1764 * Retrieve statistics about garbage collection.
1765 * Useful for debugging and tuning.
1767 private GCStats getStats()
1777 for (n = 0; n < gc.pools.length; n++)
1779 Pool* pool = gc.pools[n];
1780 psize += pool.npages * PAGESIZE;
1781 for (size_t j = 0; j < pool.npages; j++)
1783 Bins bin = cast(Bins)pool.pagetable[j];
1786 else if (bin == B_PAGE)
1788 else if (bin < B_PAGE)
1793 for (n = 0; n < B_PAGE; n++)
1795 for (List* list = gc.free_list[n]; list; list = list.next)
1796 flsize += binsize[n];
1799 usize = bsize - flsize;
1801 stats.poolsize = psize;
1802 stats.usedsize = bsize - flsize;
1803 stats.freelistsize = flsize;
1807 /******************* weak-reference support *********************/
1809 private struct WeakPointer
1813 void ondestroy(Object r)
1815 assert(r is reference);
1816 // lock for memory consistency (parallel readers)
1817 // also ensures that weakpointerDestroy can be called while another
1818 // thread is freeing the reference with "delete"
1819 return locked!(void, () {
1826 * Create a weak pointer to the given object.
1827 * Returns a pointer to an opaque struct allocated in C memory.
1829 void* weakpointerCreate( Object r )
1833 // must be allocated in C memory
1834 // 1. to hide the reference from the GC
1835 // 2. the GC doesn't scan delegates added by rt_attachDisposeEvent
1837 auto wp = cast(WeakPointer*)(cstdlib.malloc(WeakPointer.sizeof));
1839 onOutOfMemoryError();
1841 rt_attachDisposeEvent(r, &wp.ondestroy);
1848 * Destroy a weak pointer returned by weakpointerCreate().
1849 * If null is passed, nothing happens.
1851 void weakpointerDestroy( void* p )
1855 auto wp = cast(WeakPointer*)p;
1856 // must be extra careful about the GC or parallel threads
1857 // finalizing the reference at the same time
1858 return locked!(void, () {
1860 rt_detachDisposeEvent(wp.reference, &wp.ondestroy);
1867 * Query a weak pointer and return either the object passed to
1868 * weakpointerCreate, or null if it was free'd in the meantime.
1869 * If null is passed, null is returned.
1871 Object weakpointerGet( void* p )
1875 // NOTE: could avoid the lock by using Fawzi style GC counters but
1876 // that'd require core.sync.Atomic and lots of care about memory
1877 // consistency it's an optional optimization see
1878 // http://dsource.org/projects/tango/browser/trunk/user/tango/core/Lifetime.d?rev=5100#L158
1879 return locked!(Object, () {
1880 return (cast(WeakPointer*)p).reference;
1886 /* ============================ Pool =============================== */
1893 GCBits mark; // entries already scanned, or should not be scanned
1894 GCBits scan; // entries that need to be scanned
1895 GCBits freebits; // entries that are on the free list
1896 GCBits finals; // entries that need finalizer run on them
1897 GCBits noscan; // entries that should not be scanned
1902 /// Cache for findSize()
1906 void clear_cache(void* ptr = null)
1908 if (ptr is null || ptr is this.cached_ptr) {
1909 this.cached_ptr = null;
1910 this.cached_size = 0;
1914 void update_cache(void* ptr, size_t size)
1916 this.cached_ptr = ptr;
1917 this.cached_size = size;
1920 void initialize(size_t npages)
1922 size_t poolsize = npages * PAGESIZE;
1923 assert(poolsize >= POOLSIZE);
1924 baseAddr = cast(byte *) os.alloc(poolsize);
1926 // Some of the code depends on page alignment of memory pools
1927 assert((cast(size_t)baseAddr & (PAGESIZE - 1)) == 0);
1934 topAddr = baseAddr + poolsize;
1936 size_t nbits = cast(size_t)poolsize / 16;
1938 // if the GC will run in parallel in a fork()ed process, we need to
1939 // share the mark bits
1940 os.Vis vis = os.Vis.PRIV;
1941 if (opts.options.fork)
1942 vis = os.Vis.SHARED;
1943 mark.alloc(nbits, vis); // shared between mark and sweep
1944 freebits.alloc(nbits); // not used by the mark phase
1945 scan.alloc(nbits); // only used in the mark phase
1946 finals.alloc(nbits); // not used by the mark phase
1947 noscan.alloc(nbits); // mark phase *MUST* have a snapshot
1949 pagetable = cast(ubyte*) cstdlib.malloc(npages);
1951 onOutOfMemoryError();
1952 memset(pagetable, B_FREE, npages);
1954 this.npages = npages;
1966 result = os.dealloc(baseAddr, npages * PAGESIZE);
1974 // See Gcx.Dtor() for the rationale of the null check.
1976 cstdlib.free(pagetable);
1978 os.Vis vis = os.Vis.PRIV;
1979 if (opts.options.fork)
1980 vis = os.Vis.SHARED;
1999 //freebits.Invariant();
2000 //finals.Invariant();
2001 //noscan.Invariant();
2005 //if (baseAddr + npages * PAGESIZE != topAddr)
2006 //printf("baseAddr = %p, npages = %d, topAddr = %p\n", baseAddr, npages, topAddr);
2007 assert(baseAddr + npages * PAGESIZE == topAddr);
2010 for (size_t i = 0; i < npages; i++)
2012 Bins bin = cast(Bins)pagetable[i];
2013 assert(bin < B_MAX);
2019 * Allocate n pages from Pool.
2020 * Returns OPFAIL on failure.
2022 size_t allocPages(size_t n)
2028 for (i = 0; i < npages; i++)
2030 if (pagetable[i] == B_FREE)
2043 * Free npages pages starting with pagenum.
2045 void freePages(size_t pagenum, size_t npages)
2047 memset(&pagetable[pagenum], B_FREE, npages);
2052 * Find base address of block containing pointer p.
2053 * Returns null if the pointer doesn't belong to this pool
2055 void* findBase(void *p)
2057 size_t offset = cast(size_t)(p - this.baseAddr);
2058 size_t pagenum = offset / PAGESIZE;
2059 Bins bin = cast(Bins)this.pagetable[pagenum];
2060 // Adjust bit to be at start of allocated memory block
2062 return this.baseAddr + (offset & notbinsize[bin]);
2063 if (bin == B_PAGEPLUS) {
2065 --pagenum, offset -= PAGESIZE;
2066 } while (cast(Bins)this.pagetable[pagenum] == B_PAGEPLUS);
2067 return this.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
2069 // we are in a B_FREE page
2075 * Find size of pointer p.
2076 * Returns 0 if p doesn't belong to this pool if if it's block size is less
2079 size_t findSize(void *p)
2081 size_t pagenum = cast(size_t)(p - this.baseAddr) / PAGESIZE;
2082 Bins bin = cast(Bins)this.pagetable[pagenum];
2084 return binsize[bin];
2085 if (this.cached_ptr == p)
2086 return this.cached_size;
2087 size_t i = pagenum + 1;
2088 for (; i < this.npages; i++)
2089 if (this.pagetable[i] != B_PAGEPLUS)
2091 this.cached_ptr = p;
2092 this.cached_size = (i - pagenum) * PAGESIZE;
2093 return this.cached_size;
2098 * Used for sorting pools
2100 int opCmp(in Pool other)
2102 if (baseAddr < other.baseAddr)
2105 return cast(int)(baseAddr > other.baseAddr);
2110 /* ============================ SENTINEL =============================== */
2113 const size_t SENTINEL_PRE = cast(size_t) 0xF4F4F4F4F4F4F4F4UL; // 32 or 64 bits
2114 const ubyte SENTINEL_POST = 0xF5; // 8 bits
2115 const uint SENTINEL_EXTRA = 2 * size_t.sizeof + 1;
2118 size_t* sentinel_size(void *p) { return &(cast(size_t *)p)[-2]; }
2119 size_t* sentinel_pre(void *p) { return &(cast(size_t *)p)[-1]; }
2120 ubyte* sentinel_post(void *p) { return &(cast(ubyte *)p)[*sentinel_size(p)]; }
2123 void sentinel_init(void *p, size_t size)
2125 *sentinel_size(p) = size;
2126 *sentinel_pre(p) = SENTINEL_PRE;
2127 *sentinel_post(p) = SENTINEL_POST;
2131 void sentinel_Invariant(void *p)
2133 if (*sentinel_pre(p) != SENTINEL_PRE ||
2134 *sentinel_post(p) != SENTINEL_POST)
2139 void *sentinel_add(void *p)
2141 return p + 2 * size_t.sizeof;
2145 void *sentinel_sub(void *p)
2147 return p - 2 * size_t.sizeof;
2152 /* ============================ C Public Interface ======================== */
2155 private int _termCleanupLevel=1;
2159 /// sets the cleanup level done by gc
2162 /// 2: fullCollect ignoring stack roots (might crash daemonThreads)
2163 /// result !=0 if the value was invalid
2164 int gc_setTermCleanupLevel(int cLevel)
2166 if (cLevel<0 || cLevel>2) return cLevel;
2167 _termCleanupLevel=cLevel;
2171 /// returns the cleanup level done by gc
2172 int gc_getTermCleanupLevel()
2174 return _termCleanupLevel;
2179 scope (exit) assert (Invariant());
2180 gc = cast(GC*) cstdlib.calloc(1, GC.sizeof);
2183 version (DigitalMars) version(OSX) {
2184 _d_osx_image_init();
2186 // NOTE: The GC must initialize the thread library
2187 // before its first collection.
2193 assert (Invariant());
2194 if (_termCleanupLevel<1) {
2196 } else if (_termCleanupLevel==2){
2197 // a more complete cleanup
2198 // NOTE: There may be daemons threads still running when this routine is
2199 // called. If so, cleaning memory out from under then is a good
2200 // way to make them crash horribly.
2201 // Often this probably doesn't matter much since the app is
2202 // supposed to be shutting down anyway, but for example tests might
2203 // crash (and be considerd failed even if the test was ok).
2204 // thus this is not the default and should be enabled by
2205 // I'm disabling cleanup for now until I can think about it some
2208 // not really a 'collect all' -- still scans static data area, roots,
2210 return locked!(void, () {
2216 // default (safe) clenup
2217 return locked!(void, () {
2225 return locked!(void, () {
2226 assert (Invariant()); scope (exit) assert (Invariant());
2227 assert (gc.disabled > 0);
2234 return locked!(void, () {
2235 assert (Invariant()); scope (exit) assert (Invariant());
2242 return locked!(void, () {
2243 assert (Invariant()); scope (exit) assert (Invariant());
2251 return locked!(void, () {
2252 assert (Invariant()); scope (exit) assert (Invariant());
2257 uint gc_getAttr(void* p)
2261 return locked!(uint, () {
2262 assert (Invariant()); scope (exit) assert (Invariant());
2263 Pool* pool = findPool(p);
2266 auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
2267 return getAttr(pool, bit_i);
2271 uint gc_setAttr(void* p, uint attrs)
2275 return locked!(uint, () {
2276 assert (Invariant()); scope (exit) assert (Invariant());
2277 Pool* pool = findPool(p);
2280 auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
2281 uint old_attrs = getAttr(pool, bit_i);
2282 setAttr(pool, bit_i, attrs);
2287 uint gc_clrAttr(void* p, uint attrs)
2291 return locked!(uint, () {
2292 assert (Invariant()); scope (exit) assert (Invariant());
2293 Pool* pool = findPool(p);
2296 auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
2297 uint old_attrs = getAttr(pool, bit_i);
2298 clrAttr(pool, bit_i, attrs);
2303 void* gc_malloc(size_t size, uint attrs = 0,
2304 PointerMap ptrmap = PointerMap.init)
2308 return locked!(void*, () {
2309 assert (Invariant()); scope (exit) assert (Invariant());
2310 return malloc(size, attrs, ptrmap.bits.ptr);
2314 void* gc_calloc(size_t size, uint attrs = 0,
2315 PointerMap ptrmap = PointerMap.init)
2319 return locked!(void*, () {
2320 assert (Invariant()); scope (exit) assert (Invariant());
2321 return calloc(size, attrs, ptrmap.bits.ptr);
2325 void* gc_realloc(void* p, size_t size, uint attrs = 0,
2326 PointerMap ptrmap = PointerMap.init)
2328 return locked!(void*, () {
2329 assert (Invariant()); scope (exit) assert (Invariant());
2330 return realloc(p, size, attrs, ptrmap.bits.ptr);
2334 size_t gc_extend(void* p, size_t min_size, size_t max_size)
2336 return locked!(size_t, () {
2337 assert (Invariant()); scope (exit) assert (Invariant());
2338 return extend(p, min_size, max_size);
2342 size_t gc_reserve(size_t size)
2346 return locked!(size_t, () {
2347 assert (Invariant()); scope (exit) assert (Invariant());
2348 return reserve(size);
2352 void gc_free(void* p)
2356 return locked!(void, () {
2357 assert (Invariant()); scope (exit) assert (Invariant());
2362 void* gc_addrOf(void* p)
2366 return locked!(void*, () {
2367 assert (Invariant()); scope (exit) assert (Invariant());
2368 Pool* pool = findPool(p);
2371 return pool.findBase(p);
2375 size_t gc_sizeOf(void* p)
2379 return locked!(size_t, () {
2380 assert (Invariant()); scope (exit) assert (Invariant());
2385 BlkInfo gc_query(void* p)
2388 return BlkInfo.init;
2389 return locked!(BlkInfo, () {
2390 assert (Invariant()); scope (exit) assert (Invariant());
2395 // NOTE: This routine is experimental. The stats or function name may change
2396 // before it is made officially available.
2399 return locked!(GCStats, () {
2400 assert (Invariant()); scope (exit) assert (Invariant());
2405 void gc_addRoot(void* p)
2409 return locked!(void, () {
2410 assert (Invariant()); scope (exit) assert (Invariant());
2411 if (gc.roots.append(p) is null)
2412 onOutOfMemoryError();
2416 void gc_addRange(void* p, size_t size)
2418 if (p is null || size == 0)
2420 return locked!(void, () {
2421 assert (Invariant()); scope (exit) assert (Invariant());
2422 if (gc.ranges.append(Range(p, p + size)) is null)
2423 onOutOfMemoryError();
2427 void gc_removeRoot(void* p)
2431 return locked!(void, () {
2432 assert (Invariant()); scope (exit) assert (Invariant());
2433 bool r = gc.roots.remove(p);
2438 void gc_removeRange(void* p)
2442 return locked!(void, () {
2443 assert (Invariant()); scope (exit) assert (Invariant());
2444 bool r = gc.ranges.remove(Range(p, null));
2449 void* gc_weakpointerCreate(Object r)
2451 // weakpointers do their own locking
2452 return weakpointerCreate(r);
2455 void gc_weakpointerDestroy(void* wp)
2457 // weakpointers do their own locking
2458 weakpointerDestroy(wp);
2461 Object gc_weakpointerGet(void* wp)
2463 // weakpointers do their own locking
2464 return weakpointerGet(wp);
2468 // vim: set et sw=4 sts=4 :