2 * This module contains the garbage collector implementation.
4 * Copyright: Copyright (C) 2001-2007 Digital Mars, www.digitalmars.com.
7 * This software is provided 'as-is', without any express or implied
8 * warranty. In no event will the authors be held liable for any damages
9 * arising from the use of this software.
11 * Permission is granted to anyone to use this software for any purpose,
12 * including commercial applications, and to alter it and redistribute it
13 * freely, in both source and binary form, subject to the following
16 * o The origin of this software must not be misrepresented; you must not
17 * claim that you wrote the original software. If you use this software
18 * in a product, an acknowledgment in the product documentation would be
19 * appreciated but is not required.
20 * o Altered source versions must be plainly marked as such, and must not
21 * be misrepresented as being the original software.
22 * o This notice may not be removed or altered from any source
24 * Authors: Walter Bright, David Friedman, Sean Kelly
29 // D Programming Language Garbage Collector implementation
31 /************** Debugging ***************************/
33 //debug = COLLECT_PRINTF; // turn on printf's
34 //debug = PTRCHECK; // more pointer checking
35 //debug = PTRCHECK2; // thorough but slow pointer checking
37 /*************** Configuration *********************/
39 version = STACKGROWSDOWN; // growing the stack means subtracting from the stack pointer
40 // (use for Intel X86 CPUs)
41 // else growing the stack means adding to the stack pointer
43 /***************************************************/
45 import rt.gc.cdgc.bits: GCBits;
46 import rt.gc.cdgc.stats: GCStats, Stats;
47 import dynarray = rt.gc.cdgc.dynarray;
48 import os = rt.gc.cdgc.os;
49 import opts = rt.gc.cdgc.opts;
51 import cstdlib = tango.stdc.stdlib;
52 import cstring = tango.stdc.string;
53 import cstdio = tango.stdc.stdio;
54 debug(COLLECT_PRINTF) alias cstdio.printf printf;
57 * This is a small optimization that proved it's usefulness. For small chunks
58 * or memory memset() seems to be slower (probably because of the call) that
59 * simply doing a simple loop to set the memory.
61 void memset(void* dst, int c, size_t n)
63 // This number (32) has been determined empirically
65 cstring.memset(dst, c, n);
68 auto p = cast(ubyte*)(dst);
75 // BUG: The following import will likely not work, since the gcc
76 // subdirectory is elsewhere. Instead, perhaps the functions
77 // could be declared directly or some other resolution could
79 static import gcc.builtins; // for __builtin_unwind_int
89 package enum BlkAttr : uint
91 FINALIZE = 0b0000_0001,
92 NO_SCAN = 0b0000_0010,
93 NO_MOVE = 0b0000_0100,
94 ALL_BITS = 0b1111_1111
97 package bool has_pointermap(uint attrs)
99 return !opts.options.conservative && !(attrs & BlkAttr.NO_SCAN);
102 private size_t round_up(size_t n, size_t to)
104 return (n + to - 1) / to;
109 alias void delegate(Object) DEvent;
110 alias void delegate( void*, void* ) scanFn;
111 enum { OPFAIL = ~cast(size_t)0 }
115 version (DigitalMars) version(OSX)
116 oid _d_osx_image_init();
118 void* rt_stackBottom();
120 void rt_finalize( void* p, bool det = true );
121 void rt_attachDisposeEvent(Object h, DEvent e);
122 bool rt_detachDisposeEvent(Object h, DEvent e);
123 void rt_scanStaticData( scanFn scan );
126 bool thread_needLock();
127 void thread_suspendAll();
128 void thread_resumeAll();
129 void thread_scanAll( scanFn fn, void* curStackTop = null );
131 void onOutOfMemoryError();
139 POOLSIZE = (4096*256),
153 B_PAGE, // start of large alloc
154 B_PAGEPLUS, // continuation of large alloc
174 int opCmp(in Range other)
176 if (pbot < other.pbot)
179 return cast(int)(pbot > other.pbot);
184 const uint binsize[B_MAX] = [ 16,32,64,128,256,512,1024,2048,4096 ];
185 const uint notbinsize[B_MAX] = [ ~(16u-1),~(32u-1),~(64u-1),~(128u-1),~(256u-1),
186 ~(512u-1),~(1024u-1),~(2048u-1),~(4096u-1) ];
189 /* ============================ GC =============================== */
192 class GCLock {} // just a dummy so we can get a global lock
203 // !=0 means don't scan stack
208 /// Turn off collections if > 0
211 // PID of the fork()ed process doing the mark() (0 if is not running)
214 /// min(pool.baseAddr)
216 /// max(pool.topAddr)
219 /// Total heap memory
224 /// Free list for each size
225 List*[B_MAX] free_list;
227 dynarray.DynArray!(void*) roots;
228 dynarray.DynArray!(Range) ranges;
229 dynarray.DynArray!(Pool*) pools;
234 // call locked if necessary
235 private T locked(T, alias Code)()
237 if (thread_needLock())
238 synchronized (gc.lock) return Code();
246 bool collect_in_progress()
248 return gc.mark_proc_pid != 0;
254 assert (gc !is null);
256 size_t total_mem = 0;
258 for (size_t i = 0; i < gc.pools.length; i++) {
259 Pool* pool = gc.pools[i];
262 assert(gc.min_addr == pool.baseAddr);
263 if (i + 1 < gc.pools.length)
264 assert(*pool < *gc.pools[i + 1]);
265 else if (i + 1 == gc.pools.length)
266 assert(gc.max_addr == pool.topAddr);
267 total_mem += pool.npages * PAGESIZE;
268 for (size_t pn = 0; pn < pool.npages; ++pn)
269 if (pool.pagetable[pn] == B_FREE)
270 free_mem += PAGESIZE;
273 gc.roots.Invariant();
274 gc.ranges.Invariant();
276 for (size_t i = 0; i < gc.ranges.length; i++) {
277 assert(gc.ranges[i].pbot);
278 assert(gc.ranges[i].ptop);
279 assert(gc.ranges[i].pbot <= gc.ranges[i].ptop);
282 for (size_t i = 0; i < B_PAGE; i++) {
283 for (List *list = gc.free_list[i]; list; list = list.next) {
284 auto pool = list.pool;
285 assert (pool !is null);
286 auto p = cast(byte*) list;
287 assert (p >= pool.baseAddr);
288 assert (p < pool.topAddr);
289 assert (pool.freebits.test((p - pool.baseAddr) / 16));
290 free_mem += binsize[i];
293 assert (gc.total_mem == total_mem);
294 assert (gc.free_mem == free_mem);
301 * Find Pool that pointer is in.
302 * Return null if not in a Pool.
303 * Assume pools is sorted.
305 Pool* findPool(void* p)
307 if (p < gc.min_addr || p >= gc.max_addr)
309 if (gc.pools.length == 0)
311 if (gc.pools.length == 1)
313 /// The pooltable[] is sorted by address, so do a binary search
315 size_t high = gc.pools.length - 1;
316 while (low <= high) {
317 size_t mid = (low + high) / 2;
318 auto pool = gc.pools[mid];
319 if (p < pool.baseAddr)
321 else if (p >= pool.topAddr)
332 * Determine the base address of the block containing p. If p is not a gc
333 * allocated pointer, return null.
335 BlkInfo getInfo(void* p)
338 Pool* pool = findPool(p);
342 info.base = pool.findBase(p);
343 if (info.base is null)
345 info.size = pool.findSize(info.base);
346 size_t bit_i = (info.base - pool.baseAddr) / 16;
347 info.attr = getAttr(pool, bit_i);
348 if (has_pointermap(info.attr)) {
349 info.size -= size_t.sizeof; // PointerMap bitmask
350 // Points to the PointerMap bitmask pointer, not user data
351 if (p >= (info.base + info.size)) {
355 if (opts.options.sentinel) {
356 info.base = sentinel_add(info.base);
357 // points to sentinel data, not user data
358 if (p < info.base || p >= sentinel_post(info.base))
360 info.size -= SENTINEL_EXTRA;
367 * Compute bin for size.
369 Bins findBin(size_t size)
413 * Allocate a new pool of at least size bytes.
414 * Sort it into pools.
415 * Mark all memory in the pool as B_FREE.
416 * Return the actual number of bytes reserved or 0 on error.
418 size_t reserve(size_t size)
421 size_t npages = round_up(size, PAGESIZE);
422 Pool* pool = newPool(npages);
426 return pool.npages * PAGESIZE;
431 * Minimizes physical memory usage by returning free pools to the OS.
433 * If full is false, keep some pools alive if the resulting free memory would
436 void minimize(bool full = true)
438 // The shared mark bits of the freed pool might be used by the mark process
439 if (collect_in_progress())
442 if (gc.pools.length == 0)
445 for (size_t n = 0; n < gc.pools.length; n++)
447 Pool* pool = gc.pools[n];
449 for (pn = 0; pn < pool.npages; pn++)
451 if (cast(Bins)pool.pagetable[pn] != B_FREE)
454 if (pn < pool.npages)
457 size_t pool_size = pool.npages * PAGESIZE;
459 double percent_free = (gc.free_mem - pool_size) * 100.0 /
460 (gc.total_mem - pool_size);
461 if (percent_free < opts.options.min_free)
462 continue; // not enough free, don't remove this pool
464 gc.total_mem -= pool_size;
465 gc.free_mem -= pool_size;
468 gc.pools.remove_at(n);
471 gc.min_addr = gc.pools[0].baseAddr;
472 gc.max_addr = gc.pools[gc.pools.length - 1].topAddr;
477 * Allocate a chunk of memory that is larger than a page.
478 * Return null if out of memory.
480 void* bigAlloc(size_t npages, out Pool* pool, size_t* pn, bool* collected)
483 // This code could use some refinement when repeatedly
484 // allocating very large arrays.
488 for (size_t n = 0; n < gc.pools.length; n++)
491 *pn = pool.allocPages(npages);
493 return pool.baseAddr + *pn * PAGESIZE;
501 pool = newPool(npages);
503 return null; // let malloc handle the error
504 *pn = pool.allocPages(npages);
505 assert(*pn != OPFAIL);
506 return pool.baseAddr + *pn * PAGESIZE;
509 if (void* p = find_block())
516 size_t freedpages = fullcollectshell();
518 if (freedpages >= npages) {
519 if (void* p = find_block())
528 * Allocate a new pool with at least npages in it.
529 * Sort it into pools.
530 * Return null if failed.
532 Pool *newPool(size_t npages)
534 // Minimum of POOLSIZE
535 if (npages < POOLSIZE/PAGESIZE)
536 npages = POOLSIZE/PAGESIZE;
537 else if (npages > POOLSIZE/PAGESIZE)
539 // Give us 150% of requested size, so there's room to extend
540 auto n = npages + (npages >> 1);
541 if (n < size_t.max/PAGESIZE)
545 // Allocate successively larger pools up to 8 megs
548 size_t n = gc.pools.length;
550 n = 8; // cap pool size at 8 megs
551 n *= (POOLSIZE / PAGESIZE);
556 auto pool = cast(Pool*) cstdlib.calloc(1, Pool.sizeof);
559 pool.initialize(npages);
566 auto inserted_pool = *gc.pools.insert_sorted!("*a < *b")(pool);
567 if (inserted_pool is null) {
571 assert (inserted_pool is pool);
572 gc.min_addr = gc.pools[0].baseAddr;
573 gc.max_addr = gc.pools[gc.pools.length - 1].topAddr;
574 size_t pool_size = pool.topAddr - pool.baseAddr;
575 gc.total_mem += pool_size;
576 gc.free_mem += pool_size;
582 * Allocate a page of bin's.
586 int allocPage(Bins bin)
591 for (size_t n = 0; n < gc.pools.length; n++)
594 pn = pool.allocPages(1);
601 pool.pagetable[pn] = cast(ubyte)bin;
603 // Convert page to free list
604 size_t size = binsize[bin];
605 auto list_head = &gc.free_list[bin];
607 byte* p = pool.baseAddr + pn * PAGESIZE;
608 byte* ptop = p + PAGESIZE;
609 size_t bit_i = pn * (PAGESIZE / 16);
610 pool.freebits.set_group(bit_i, PAGESIZE / 16);
611 for (; p < ptop; p += size)
613 List* l = cast(List *) p;
623 * Search a range of memory values and mark any pointers into the GC pool using
624 * type information (bitmask of pointer locations).
626 void mark_range(void *pbot, void *ptop, size_t* pm_bitmask)
628 // TODO: make our own assert because assert uses the GC
629 assert (pbot <= ptop);
631 const BITS_PER_WORD = size_t.sizeof * 8;
633 void **p1 = cast(void **)pbot;
634 void **p2 = cast(void **)ptop;
636 bool changes = false;
638 size_t type_size = pm_bitmask[0];
639 size_t* pm_bits = pm_bitmask + 1;
640 bool has_type_info = type_size != 1 || pm_bits[0] != 1 || pm_bits[1] != 0;
642 //printf("marking range: %p -> %p\n", pbot, ptop);
643 for (; p1 + type_size <= p2; p1 += type_size) {
644 for (size_t n = 0; n < type_size; n++) {
645 // scan bit set for this word
647 !(pm_bits[n / BITS_PER_WORD] & (1 << (n % BITS_PER_WORD))))
652 if (p < gc.min_addr || p >= gc.max_addr)
655 if ((cast(size_t)p & ~(PAGESIZE-1)) == pcache)
658 Pool* pool = findPool(p);
661 size_t offset = cast(size_t)(p - pool.baseAddr);
663 size_t pn = offset / PAGESIZE;
664 Bins bin = cast(Bins)pool.pagetable[pn];
666 // Cache B_PAGE, B_PAGEPLUS and B_FREE lookups
668 pcache = cast(size_t)p & ~(PAGESIZE-1);
670 // Adjust bit to be at start of allocated memory block
672 bit_i = (offset & notbinsize[bin]) / 16;
673 else if (bin == B_PAGEPLUS)
679 while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
680 bit_i = pn * (PAGESIZE / 16);
682 else // Don't mark bits in B_FREE pages
685 if (!pool.mark.test(bit_i))
687 pool.mark.set(bit_i);
688 if (!pool.noscan.test(bit_i))
690 pool.scan.set(bit_i);
698 gc.any_changes = true;
702 * Return number of full pages free'd.
704 size_t fullcollectshell(bool early = false)
706 gc.stats.collection_started();
708 gc.stats.collection_finished();
710 // The purpose of the 'shell' is to ensure all the registers
711 // get put on the stack so they'll be scanned
716 gcc.builtins.__builtin_unwind_init();
723 uint eax,ecx,edx,ebx,ebp,esi,edi;
736 else version (X86_64)
738 ulong rax,rbx,rcx,rdx,rbp,rsi,rdi,r8,r9,r10,r11,r12,r13,r14,r15;
761 static assert( false, "Architecture not supported." );
772 result = fullcollect(sp, early);
795 size_t fullcollect(void *stackTop, bool early = false)
797 debug(COLLECT_PRINTF) printf("Gcx.fullcollect(early=%d)\n",
800 // We will block the mutator only if eager allocation is not used and this
801 // is not an early collection.
802 bool block = !opts.options.eager_alloc && !early;
804 // If there is a mark process running, check if it already finished. If
805 // that is the case, we lunch the sweep phase and hope enough memory is
806 // freed. If it's still running, either we block until the mark phase is
807 // done (and then sweep to finish the collection), or we tell the caller
808 // process no memory has been recovered (it will allocated more to fulfill
809 // the current request if eager allocation is used) and let the mark phase
810 // keep running in parallel.
811 if (collect_in_progress()) {
812 os.WRes r = os.wait_pid(gc.mark_proc_pid, block);
813 assert (r != os.WRes.ERROR);
816 debug(COLLECT_PRINTF) printf("\t\tmark proc DONE (block=%d)\n",
818 gc.mark_proc_pid = 0;
820 case os.WRes.RUNNING:
821 debug(COLLECT_PRINTF) printf("\t\tmark proc RUNNING\n");
824 // Something went wrong, if block is true, wait() should never
826 goto case os.WRes.ERROR;
828 debug(COLLECT_PRINTF) printf("\t\tmark proc ERROR\n");
829 disable_fork(); // Try to keep going without forking
834 // We always need to stop the world to make threads save the CPU registers
835 // in the stack and prepare themselves for thread_scanAll()
837 gc.stats.world_stopped();
839 // If forking is enabled, we fork() and start a new mark phase in the
840 // child. If the collection should not block, the parent process tells the
841 // caller no memory could be recycled immediately (if eager allocation is
842 // used, and this collection was triggered by an allocation, the caller
843 // should allocate more memory to fulfill the request). If the collection
844 // should block, the parent will wait for the mark phase to finish before
845 // returning control to the mutator, but other threads are restarted and
846 // may run in parallel with the mark phase (unless they allocate or use the
847 // GC themselves, in which case the global GC lock will stop them).
848 if (opts.options.fork) {
849 cstdio.fflush(null); // avoid duplicated FILE* output
850 os.pid_t child_pid = os.fork();
851 assert (child_pid != -1); // don't accept errors in non-release mode
853 case -1: // if fork() fails, fall-back to stop-the-world
856 case 0: // child process (i.e. the collectors mark phase)
859 break; // bogus, will never reach here
860 default: // parent process (i.e. the mutator)
862 gc.stats.world_started();
864 gc.mark_proc_pid = child_pid;
867 os.WRes r = os.wait_pid(child_pid); // block until it finishes
868 assert (r == os.WRes.DONE);
869 debug(COLLECT_PRINTF) printf("\t\tmark proc DONE (block=%d)\n",
871 if (r == os.WRes.DONE)
873 debug(COLLECT_PRINTF) printf("\tmark() proc ERROR\n");
874 // If there was some error, try to keep going without forking
876 // Re-suspend the threads to do the marking in this process
878 gc.stats.world_stopped();
883 // If we reach here, we are using the standard stop-the-world collection,
884 // either because fork was disabled in the first place, or because it was
885 // disabled because of some error.
888 gc.stats.world_started();
897 void mark(void *stackTop)
899 debug(COLLECT_PRINTF) printf("\tmark()\n");
901 gc.any_changes = false;
903 for (size_t n = 0; n < gc.pools.length; n++)
905 Pool* pool = gc.pools[n];
906 pool.mark.copy(&pool.freebits);
910 /// Marks a range of memory in conservative mode.
911 void mark_conservative_range(void* pbot, void* ptop)
913 mark_range(pbot, ptop, PointerMap.init.bits.ptr);
916 rt_scanStaticData(&mark_conservative_range);
920 // Scan stacks and registers for each paused thread
921 thread_scanAll(&mark_conservative_range, stackTop);
925 debug(COLLECT_PRINTF) printf("scan roots[]\n");
926 mark_conservative_range(gc.roots.ptr, gc.roots.ptr + gc.roots.length);
929 debug(COLLECT_PRINTF) printf("scan ranges[]\n");
930 for (size_t n = 0; n < gc.ranges.length; n++)
932 debug(COLLECT_PRINTF) printf("\t%x .. %x\n", gc.ranges[n].pbot, gc.ranges[n].ptop);
933 mark_conservative_range(gc.ranges[n].pbot, gc.ranges[n].ptop);
936 debug(COLLECT_PRINTF) printf("\tscan heap\n");
937 while (gc.any_changes)
939 gc.any_changes = false;
940 for (size_t n = 0; n < gc.pools.length; n++)
946 Pool* pool = gc.pools[n];
948 bbase = pool.scan.base();
949 btop = bbase + pool.scan.nwords;
950 for (b = bbase; b < btop;)
966 o = pool.baseAddr + (b - bbase) * 32 * 16;
967 if (!(bitm & 0xFFFF))
972 for (; bitm; o += 16, bitm >>= 1)
977 pn = cast(size_t)(o - pool.baseAddr) / PAGESIZE;
978 bin = cast(Bins)pool.pagetable[pn];
980 if (opts.options.conservative)
981 mark_conservative_range(o, o + binsize[bin]);
983 auto end_of_blk = cast(size_t**)(o +
984 binsize[bin] - size_t.sizeof);
985 size_t* pm_bitmask = *end_of_blk;
986 mark_range(o, end_of_blk, pm_bitmask);
989 else if (bin == B_PAGE || bin == B_PAGEPLUS)
991 if (bin == B_PAGEPLUS)
993 while (pool.pagetable[pn - 1] != B_PAGE)
997 while (pn + u < pool.npages &&
998 pool.pagetable[pn + u] == B_PAGEPLUS)
1001 size_t blk_size = u * PAGESIZE;
1002 if (opts.options.conservative)
1003 mark_conservative_range(o, o + blk_size);
1005 auto end_of_blk = cast(size_t**)(o + blk_size -
1007 size_t* pm_bitmask = *end_of_blk;
1008 mark_range(o, end_of_blk, pm_bitmask);
1023 // Free up everything not marked
1024 debug(COLLECT_PRINTF) printf("\tsweep\n");
1027 gc.free_mem = 0; // will be recalculated
1028 size_t freedpages = 0;
1030 for (size_t n = 0; n < gc.pools.length; n++)
1032 Pool* pool = gc.pools[n];
1034 uint* bbase = pool.mark.base();
1036 for (pn = 0; pn < pool.npages; pn++, bbase += PAGESIZE / (32 * 16))
1038 Bins bin = cast(Bins)pool.pagetable[pn];
1042 auto size = binsize[bin];
1043 byte* p = pool.baseAddr + pn * PAGESIZE;
1044 byte* ptop = p + PAGESIZE;
1045 size_t bit_i = pn * (PAGESIZE/16);
1046 size_t bit_stride = size / 16;
1048 version(none) // BUG: doesn't work because freebits() must also be cleared
1050 // If free'd entire page
1051 if (bbase[0] == 0 && bbase[1] == 0 && bbase[2] == 0 &&
1052 bbase[3] == 0 && bbase[4] == 0 && bbase[5] == 0 &&
1053 bbase[6] == 0 && bbase[7] == 0)
1055 for (; p < ptop; p += size, bit_i += bit_stride)
1057 if (pool.finals.testClear(bit_i)) {
1058 if (opts.options.sentinel)
1059 rt_finalize(sentinel_add(p), false/*gc.no_stack > 0*/);
1061 rt_finalize(p, false/*gc.no_stack > 0*/);
1063 clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
1065 if (opts.options.mem_stomp)
1066 memset(p, 0xF3, size);
1068 pool.pagetable[pn] = B_FREE;
1073 for (; p < ptop; p += size, bit_i += bit_stride)
1075 if (!pool.mark.test(bit_i))
1077 if (opts.options.sentinel)
1078 sentinel_Invariant(sentinel_add(p));
1080 pool.freebits.set(bit_i);
1081 if (pool.finals.testClear(bit_i)) {
1082 if (opts.options.sentinel)
1083 rt_finalize(sentinel_add(p), false/*gc.no_stack > 0*/);
1085 rt_finalize(p, false/*gc.no_stack > 0*/);
1087 clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
1089 if (opts.options.mem_stomp)
1090 memset(p, 0xF3, size);
1096 else if (bin == B_PAGE)
1098 size_t bit_stride = PAGESIZE / 16;
1099 size_t bit_i = pn * bit_stride;
1100 if (!pool.mark.test(bit_i))
1102 byte *p = pool.baseAddr + pn * PAGESIZE;
1103 if (opts.options.sentinel)
1104 sentinel_Invariant(sentinel_add(p));
1105 if (pool.finals.testClear(bit_i)) {
1106 if (opts.options.sentinel)
1107 rt_finalize(sentinel_add(p), false/*gc.no_stack > 0*/);
1109 rt_finalize(p, false/*gc.no_stack > 0*/);
1111 clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
1113 debug(COLLECT_PRINTF) printf("\tcollecting big %p\n", p);
1114 pool.pagetable[pn] = B_FREE;
1115 pool.freebits.set_group(bit_i, PAGESIZE / 16);
1117 gc.free_mem += PAGESIZE;
1118 if (opts.options.mem_stomp)
1119 memset(p, 0xF3, PAGESIZE);
1120 while (pn + 1 < pool.npages && pool.pagetable[pn + 1] == B_PAGEPLUS)
1123 pool.pagetable[pn] = B_FREE;
1124 bit_i += bit_stride;
1125 pool.freebits.set_group(bit_i, PAGESIZE / 16);
1127 gc.free_mem += PAGESIZE;
1129 if (opts.options.mem_stomp)
1132 memset(p, 0xF3, PAGESIZE);
1137 else if (bin == B_FREE) {
1138 gc.free_mem += PAGESIZE;
1144 gc.free_list[] = null;
1146 // Free complete pages, rebuild free list
1147 debug(COLLECT_PRINTF) printf("\tfree complete pages\n");
1148 size_t recoveredpages = 0;
1149 for (size_t n = 0; n < gc.pools.length; n++)
1151 Pool* pool = gc.pools[n];
1152 for (size_t pn = 0; pn < pool.npages; pn++)
1154 Bins bin = cast(Bins)pool.pagetable[pn];
1160 size_t size = binsize[bin];
1161 size_t bit_stride = size / 16;
1162 size_t bit_base = pn * (PAGESIZE / 16);
1163 size_t bit_top = bit_base + (PAGESIZE / 16);
1167 for (; bit_i < bit_top; bit_i += bit_stride)
1169 if (!pool.freebits.test(bit_i))
1172 pool.pagetable[pn] = B_FREE;
1173 pool.freebits.set_group(bit_base, PAGESIZE / 16);
1175 gc.free_mem += PAGESIZE;
1179 p = pool.baseAddr + pn * PAGESIZE;
1180 for (u = 0; u < PAGESIZE; u += size)
1182 bit_i = bit_base + u / 16;
1183 if (pool.freebits.test(bit_i))
1185 assert ((p+u) >= pool.baseAddr);
1186 assert ((p+u) < pool.topAddr);
1187 List* list = cast(List*) (p + u);
1188 // avoid unnecesary writes (it really saves time)
1189 if (list.next != gc.free_list[bin])
1190 list.next = gc.free_list[bin];
1191 if (list.pool != pool)
1193 gc.free_list[bin] = list;
1194 gc.free_mem += binsize[bin];
1201 debug(COLLECT_PRINTF) printf("recovered pages = %d\n", recoveredpages);
1202 debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedpages, gc.pools.length);
1204 return freedpages + recoveredpages;
1211 uint getAttr(Pool* pool, size_t bit_i)
1219 if (pool.finals.test(bit_i))
1220 attrs |= BlkAttr.FINALIZE;
1221 if (pool.noscan.test(bit_i))
1222 attrs |= BlkAttr.NO_SCAN;
1223 // if (pool.nomove.test(bit_i))
1224 // attrs |= BlkAttr.NO_MOVE;
1232 void setAttr(Pool* pool, size_t bit_i, uint mask)
1239 if (mask & BlkAttr.FINALIZE)
1241 pool.finals.set(bit_i);
1243 if (mask & BlkAttr.NO_SCAN)
1245 pool.noscan.set(bit_i);
1247 // if (mask & BlkAttr.NO_MOVE)
1249 // if (!pool.nomove.nbits)
1250 // pool.nomove.alloc(pool.mark.nbits);
1251 // pool.nomove.set(bit_i);
1259 void clrAttr(Pool* pool, size_t bit_i, uint mask)
1266 if (mask & BlkAttr.FINALIZE)
1267 pool.finals.clear(bit_i);
1268 if (mask & BlkAttr.NO_SCAN)
1269 pool.noscan.clear(bit_i);
1270 // if (mask & BlkAttr.NO_MOVE && pool.nomove.nbits)
1271 // pool.nomove.clear(bit_i);
1277 // we have to disable all options that assume fork is enabled
1278 opts.options.fork = false;
1279 opts.options.eager_alloc = false;
1280 opts.options.early_collect = false;
1287 gc.stack_bottom = cast(char*)&dummy;
1288 opts.parse(cstdlib.getenv("D_GC_OPTS"));
1289 // If we are going to fork, make sure we have the needed OS support
1290 if (opts.options.fork)
1291 opts.options.fork = os.HAVE_SHARED && os.HAVE_FORK;
1292 // Disable fork()-related options if we don't have it
1293 if (!opts.options.fork)
1295 gc.lock = GCLock.classinfo;
1297 setStackBottom(rt_stackBottom());
1298 gc.stats = Stats(gc);
1299 if (opts.options.prealloc_npools) {
1300 size_t pages = round_up(opts.options.prealloc_psize, PAGESIZE);
1301 for (size_t i = 0; i < opts.options.prealloc_npools; ++i)
1307 // Launch a parallel collection if we don't have enough free memory available
1308 // (we have less than min_free% of the total heap free).
1309 void early_collect()
1311 if (!opts.options.early_collect || collect_in_progress())
1313 double percent_free = gc.free_mem * 100.0 / gc.total_mem;
1314 if (percent_free < opts.options.min_free)
1315 fullcollectshell(true);
1322 private void *malloc(size_t size, uint attrs, size_t* pm_bitmask)
1326 gc.stats.malloc_started(size, attrs, pm_bitmask);
1328 gc.stats.malloc_finished(p);
1333 if (opts.options.sentinel)
1334 size += SENTINEL_EXTRA;
1336 bool has_pm = has_pointermap(attrs);
1338 size += size_t.sizeof;
1341 // Cache previous binsize lookup - Dave Fladebo.
1342 static size_t lastsize = -1;
1343 static Bins lastbin;
1344 if (size == lastsize)
1348 bin = findBin(size);
1354 size_t bit_i = void;
1355 size_t capacity = void; // to figure out where to store the bitmask
1356 bool collected = false;
1359 p = gc.free_list[bin];
1362 if (!allocPage(bin) && !gc.disabled) // try to find a new page
1364 if (!thread_needLock())
1366 /* Then we haven't locked it yet. Be sure
1367 * and gc.lock for a collection, since a finalizer
1368 * may start a new thread.
1370 synchronized (gc.lock)
1375 else if (!fullcollectshell()) // collect to find a new page
1381 if (!gc.free_list[bin] && !allocPage(bin))
1383 newPool(1); // allocate new pool to find a new page
1384 // TODO: hint allocPage() to use the pool we just created
1385 int result = allocPage(bin);
1387 onOutOfMemoryError();
1389 p = gc.free_list[bin];
1391 capacity = binsize[bin];
1393 // Return next item from free list
1394 List* list = cast(List*) p;
1395 assert ((cast(byte*)list) >= list.pool.baseAddr);
1396 assert ((cast(byte*)list) < list.pool.topAddr);
1397 gc.free_list[bin] = list.next;
1399 bit_i = (p - pool.baseAddr) / 16;
1400 assert (pool.freebits.test(bit_i));
1401 pool.freebits.clear(bit_i);
1402 if (!(attrs & BlkAttr.NO_SCAN))
1403 memset(p + size, 0, capacity - size);
1404 if (opts.options.mem_stomp)
1405 memset(p, 0xF0, size);
1410 size_t npages = round_up(size, PAGESIZE);
1411 p = bigAlloc(npages, pool, &pn, &collected);
1413 onOutOfMemoryError();
1414 assert (pool !is null);
1416 capacity = npages * PAGESIZE;
1417 bit_i = pn * (PAGESIZE / 16);
1418 pool.freebits.clear(bit_i);
1419 pool.pagetable[pn] = B_PAGE;
1421 memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
1422 p = pool.baseAddr + pn * PAGESIZE;
1423 memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
1424 if (opts.options.mem_stomp)
1425 memset(p, 0xF1, size);
1429 // Store the bit mask AFTER SENTINEL_POST
1430 // TODO: store it BEFORE, so the bitmask is protected too
1432 auto end_of_blk = cast(size_t**)(p + capacity - size_t.sizeof);
1433 *end_of_blk = pm_bitmask;
1434 size -= size_t.sizeof;
1437 if (opts.options.sentinel) {
1438 size -= SENTINEL_EXTRA;
1439 p = sentinel_add(p);
1440 sentinel_init(p, size);
1444 setAttr(pool, bit_i, attrs);
1445 assert (bin >= B_PAGE || !pool.freebits.test(bit_i));
1448 gc.free_mem -= capacity;
1450 // If there is not enough free memory (after a collection), allocate
1451 // a new pool big enough to have at least the min_free% of the total
1452 // heap free. If the collection left too much free memory, try to free
1453 // some empty pools.
1454 double percent_free = gc.free_mem * 100.0 / gc.total_mem;
1455 if (percent_free < opts.options.min_free) {
1456 auto pool_size = gc.total_mem * 1.0 / opts.options.min_free
1458 newPool(round_up(cast(size_t)pool_size, PAGESIZE));
1473 private void *calloc(size_t size, uint attrs, size_t* pm_bitmask)
1477 void *p = malloc(size, attrs, pm_bitmask);
1486 private void *realloc(void *p, size_t size, uint attrs,
1496 return malloc(size, attrs, pm_bitmask);
1498 Pool* pool = findPool(p);
1502 // Set or retrieve attributes as appropriate
1503 auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
1505 clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
1506 setAttr(pool, bit_i, attrs);
1509 attrs = getAttr(pool, bit_i);
1511 void* blk_base_addr = pool.findBase(p);
1512 size_t blk_size = pool.findSize(p);
1513 bool has_pm = has_pointermap(attrs);
1514 size_t pm_bitmask_size = 0;
1516 pm_bitmask_size = size_t.sizeof;
1517 // Retrieve pointer map bit mask if appropriate
1518 if (pm_bitmask is null) {
1519 auto end_of_blk = cast(size_t**)(
1520 blk_base_addr + blk_size - size_t.sizeof);
1521 pm_bitmask = *end_of_blk;
1525 if (opts.options.sentinel) {
1526 sentinel_Invariant(p);
1527 size_t sentinel_stored_size = *sentinel_size(p);
1528 if (sentinel_stored_size != size) {
1529 void* p2 = malloc(size, attrs, pm_bitmask);
1530 if (sentinel_stored_size < size)
1531 size = sentinel_stored_size;
1532 cstring.memcpy(p2, p, size);
1538 size += pm_bitmask_size;
1539 if (blk_size >= PAGESIZE && size >= PAGESIZE) {
1540 auto psz = blk_size / PAGESIZE;
1541 auto newsz = round_up(size, PAGESIZE);
1545 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
1549 if (opts.options.mem_stomp)
1550 memset(p + size - pm_bitmask_size, 0xF2,
1551 blk_size - size - pm_bitmask_size);
1552 pool.freePages(pagenum + newsz, psz - newsz);
1553 auto new_blk_size = (PAGESIZE * newsz);
1554 gc.free_mem += blk_size - new_blk_size;
1555 // update the size cache, assuming that is very likely the
1556 // size of this block will be queried in the near future
1557 pool.update_cache(p, new_blk_size);
1559 auto end_of_blk = cast(size_t**)(blk_base_addr +
1560 new_blk_size - pm_bitmask_size);
1561 *end_of_blk = pm_bitmask;
1566 if (pagenum + newsz <= pool.npages) {
1567 // Attempt to expand in place
1568 for (size_t i = pagenum + psz; 1;) {
1569 if (i == pagenum + newsz) {
1570 if (opts.options.mem_stomp)
1571 memset(p + blk_size - pm_bitmask_size, 0xF0,
1572 size - blk_size - pm_bitmask_size);
1573 memset(pool.pagetable + pagenum + psz, B_PAGEPLUS,
1575 auto new_blk_size = (PAGESIZE * newsz);
1576 gc.free_mem -= new_blk_size - blk_size;
1577 // update the size cache, assuming that is very
1578 // likely the size of this block will be queried in
1580 pool.update_cache(p, new_blk_size);
1582 auto end_of_blk = cast(size_t**)(
1583 blk_base_addr + new_blk_size - pm_bitmask_size);
1584 *end_of_blk = pm_bitmask;
1589 if (i == pool.npages)
1591 if (pool.pagetable[i] != B_FREE)
1598 // if new size is bigger or less than half
1599 if (blk_size < size || blk_size > size * 2) {
1600 size -= pm_bitmask_size;
1601 blk_size -= pm_bitmask_size;
1602 void* p2 = malloc(size, attrs, pm_bitmask);
1603 if (blk_size < size)
1605 cstring.memcpy(p2, p, size);
1614 * Attempt to in-place enlarge the memory block pointed to by p by at least
1615 * min_size beyond its current capacity, up to a maximum of max_size. This
1616 * does not attempt to move the memory block (like realloc() does).
1619 * 0 if could not extend p,
1620 * total size of entire memory block if successful.
1622 private size_t extend(void* p, size_t minsize, size_t maxsize)
1625 assert( minsize <= maxsize );
1629 if (opts.options.sentinel)
1632 Pool* pool = findPool(p);
1636 // Retrieve attributes
1637 auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
1638 uint attrs = getAttr(pool, bit_i);
1640 void* blk_base_addr = pool.findBase(p);
1641 size_t blk_size = pool.findSize(p);
1642 bool has_pm = has_pointermap(attrs);
1643 size_t* pm_bitmask = null;
1644 size_t pm_bitmask_size = 0;
1646 pm_bitmask_size = size_t.sizeof;
1647 // Retrieve pointer map bit mask
1648 auto end_of_blk = cast(size_t**)(blk_base_addr +
1649 blk_size - size_t.sizeof);
1650 pm_bitmask = *end_of_blk;
1652 minsize += size_t.sizeof;
1653 maxsize += size_t.sizeof;
1656 if (blk_size < PAGESIZE)
1657 return 0; // cannot extend buckets
1659 auto psz = blk_size / PAGESIZE;
1660 auto minsz = round_up(minsize, PAGESIZE);
1661 auto maxsz = round_up(maxsize, PAGESIZE);
1663 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
1666 for (sz = 0; sz < maxsz; sz++)
1668 auto i = pagenum + psz + sz;
1669 if (i == pool.npages)
1671 if (pool.pagetable[i] != B_FREE)
1681 size_t new_size = (psz + sz) * PAGESIZE;
1683 if (opts.options.mem_stomp)
1684 memset(p + blk_size - pm_bitmask_size, 0xF0,
1685 new_size - blk_size - pm_bitmask_size);
1686 memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
1689 gc.free_mem -= new_size - blk_size;
1690 // update the size cache, assuming that is very likely the size of this
1691 // block will be queried in the near future
1692 pool.update_cache(p, new_size);
1695 new_size -= size_t.sizeof;
1696 auto end_of_blk = cast(size_t**)(blk_base_addr + new_size);
1697 *end_of_blk = pm_bitmask;
1709 private void free(void *p)
1718 // Find which page it is in
1720 if (!pool) // if not one of ours
1722 if (opts.options.sentinel) {
1723 sentinel_Invariant(p);
1724 p = sentinel_sub(p);
1726 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1727 bit_i = cast(size_t)(p - pool.baseAddr) / 16;
1728 clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
1730 bin = cast(Bins)pool.pagetable[pagenum];
1731 if (bin == B_PAGE) // if large alloc
1736 pool.freebits.set_group(bit_i, PAGESIZE / 16);
1737 while (++n < pool.npages && pool.pagetable[n] == B_PAGEPLUS)
1739 size_t size = npages * PAGESIZE;
1740 if (opts.options.mem_stomp)
1741 memset(p, 0xF2, size);
1742 pool.freePages(pagenum, npages);
1743 gc.free_mem += size;
1744 // just in case we were caching this pointer
1745 pool.clear_cache(p);
1750 List* list = cast(List*) p;
1752 if (opts.options.mem_stomp)
1753 memset(p, 0xF2, binsize[bin]);
1755 list.next = gc.free_list[bin];
1757 gc.free_list[bin] = list;
1758 pool.freebits.set(bit_i);
1759 gc.free_mem += binsize[bin];
1761 double percent_free = gc.free_mem * 100.0 / gc.total_mem;
1762 if (percent_free > opts.options.min_free)
1768 * Determine the allocated size of pointer p. If p is an interior pointer
1769 * or not a gc allocated pointer, return 0.
1771 private size_t sizeOf(void *p)
1775 if (opts.options.sentinel)
1776 p = sentinel_sub(p);
1778 Pool* pool = findPool(p);
1782 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
1783 uint attrs = getAttr(pool, biti);
1785 size_t size = pool.findSize(p);
1786 size_t pm_bitmask_size = 0;
1787 if (has_pointermap(attrs))
1788 pm_bitmask_size = size_t.sizeof;
1790 if (opts.options.sentinel) {
1791 // Check for interior pointer
1793 // 1) size is a power of 2 for less than PAGESIZE values
1794 // 2) base of memory pool is aligned on PAGESIZE boundary
1795 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
1797 return size - SENTINEL_EXTRA - pm_bitmask_size;
1800 if (p == gc.p_cache)
1801 return gc.size_cache;
1803 // Check for interior pointer
1805 // 1) size is a power of 2 for less than PAGESIZE values
1806 // 2) base of memory pool is aligned on PAGESIZE boundary
1807 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
1811 gc.size_cache = size - pm_bitmask_size;
1813 return gc.size_cache;
1819 * Verify that pointer p:
1820 * 1) belongs to this memory pool
1821 * 2) points to the start of an allocated piece of memory
1822 * 3) is not on a free list
1824 private void checkNoSync(void *p)
1828 if (opts.options.sentinel)
1829 sentinel_Invariant(p);
1837 if (opts.options.sentinel)
1838 p = sentinel_sub(p);
1841 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1842 bin = cast(Bins)pool.pagetable[pagenum];
1843 assert(bin <= B_PAGE);
1844 size = binsize[bin];
1845 assert((cast(size_t)p & (size - 1)) == 0);
1851 // Check that p is not on a free list
1852 for (List* list = gc.free_list[bin]; list; list = list.next)
1854 assert(cast(void*)list != p);
1865 private void setStackBottom(void *p)
1867 version (STACKGROWSDOWN)
1869 //p = (void *)((uint *)p + 4);
1870 if (p > gc.stack_bottom)
1872 gc.stack_bottom = p;
1877 //p = (void *)((uint *)p - 4);
1878 if (p < gc.stack_bottom)
1880 gc.stack_bottom = cast(char*)p;
1887 * Retrieve statistics about garbage collection.
1888 * Useful for debugging and tuning.
1890 private GCStats getStats()
1900 for (n = 0; n < gc.pools.length; n++)
1902 Pool* pool = gc.pools[n];
1903 psize += pool.npages * PAGESIZE;
1904 for (size_t j = 0; j < pool.npages; j++)
1906 Bins bin = cast(Bins)pool.pagetable[j];
1909 else if (bin == B_PAGE)
1911 else if (bin < B_PAGE)
1916 for (n = 0; n < B_PAGE; n++)
1918 for (List* list = gc.free_list[n]; list; list = list.next)
1919 flsize += binsize[n];
1922 usize = bsize - flsize;
1924 stats.poolsize = psize;
1925 stats.usedsize = bsize - flsize;
1926 stats.freelistsize = flsize;
1930 /******************* weak-reference support *********************/
1932 private struct WeakPointer
1936 void ondestroy(Object r)
1938 assert(r is reference);
1939 // lock for memory consistency (parallel readers)
1940 // also ensures that weakpointerDestroy can be called while another
1941 // thread is freeing the reference with "delete"
1942 return locked!(void, () {
1949 * Create a weak pointer to the given object.
1950 * Returns a pointer to an opaque struct allocated in C memory.
1952 void* weakpointerCreate( Object r )
1956 // must be allocated in C memory
1957 // 1. to hide the reference from the GC
1958 // 2. the GC doesn't scan delegates added by rt_attachDisposeEvent
1960 auto wp = cast(WeakPointer*)(cstdlib.malloc(WeakPointer.sizeof));
1962 onOutOfMemoryError();
1964 rt_attachDisposeEvent(r, &wp.ondestroy);
1971 * Destroy a weak pointer returned by weakpointerCreate().
1972 * If null is passed, nothing happens.
1974 void weakpointerDestroy( void* p )
1978 auto wp = cast(WeakPointer*)p;
1979 // must be extra careful about the GC or parallel threads
1980 // finalizing the reference at the same time
1981 return locked!(void, () {
1983 rt_detachDisposeEvent(wp.reference, &wp.ondestroy);
1990 * Query a weak pointer and return either the object passed to
1991 * weakpointerCreate, or null if it was free'd in the meantime.
1992 * If null is passed, null is returned.
1994 Object weakpointerGet( void* p )
1998 // NOTE: could avoid the lock by using Fawzi style GC counters but
1999 // that'd require core.sync.Atomic and lots of care about memory
2000 // consistency it's an optional optimization see
2001 // http://dsource.org/projects/tango/browser/trunk/user/tango/core/Lifetime.d?rev=5100#L158
2002 return locked!(Object, () {
2003 return (cast(WeakPointer*)p).reference;
2009 /* ============================ Pool =============================== */
2016 GCBits mark; // entries already scanned, or should not be scanned
2017 GCBits scan; // entries that need to be scanned
2018 GCBits freebits; // entries that are on the free list
2019 GCBits finals; // entries that need finalizer run on them
2020 GCBits noscan; // entries that should not be scanned
2025 /// Cache for findSize()
2029 void clear_cache(void* ptr = null)
2031 if (ptr is null || ptr is this.cached_ptr) {
2032 this.cached_ptr = null;
2033 this.cached_size = 0;
2037 void update_cache(void* ptr, size_t size)
2039 this.cached_ptr = ptr;
2040 this.cached_size = size;
2043 void initialize(size_t npages)
2045 size_t poolsize = npages * PAGESIZE;
2046 assert(poolsize >= POOLSIZE);
2047 baseAddr = cast(byte *) os.alloc(poolsize);
2049 // Some of the code depends on page alignment of memory pools
2050 assert((cast(size_t)baseAddr & (PAGESIZE - 1)) == 0);
2057 topAddr = baseAddr + poolsize;
2059 size_t nbits = cast(size_t)poolsize / 16;
2061 // if the GC will run in parallel in a fork()ed process, we need to
2062 // share the mark bits
2063 os.Vis vis = os.Vis.PRIV;
2064 if (opts.options.fork)
2065 vis = os.Vis.SHARED;
2066 mark.alloc(nbits, vis); // shared between mark and sweep
2067 freebits.alloc(nbits); // not used by the mark phase
2068 scan.alloc(nbits); // only used in the mark phase
2069 finals.alloc(nbits); // not used by the mark phase
2070 noscan.alloc(nbits); // mark phase *MUST* have a snapshot
2072 // all is free when we start
2075 // avoid accidental sweeping of new pools while using eager allocation
2076 if (collect_in_progress())
2079 pagetable = cast(ubyte*) cstdlib.malloc(npages);
2081 onOutOfMemoryError();
2082 memset(pagetable, B_FREE, npages);
2084 this.npages = npages;
2096 result = os.dealloc(baseAddr, npages * PAGESIZE);
2104 // See Gcx.Dtor() for the rationale of the null check.
2106 cstdlib.free(pagetable);
2108 os.Vis vis = os.Vis.PRIV;
2109 if (opts.options.fork)
2110 vis = os.Vis.SHARED;
2129 //freebits.Invariant();
2130 //finals.Invariant();
2131 //noscan.Invariant();
2135 //if (baseAddr + npages * PAGESIZE != topAddr)
2136 //printf("baseAddr = %p, npages = %d, topAddr = %p\n", baseAddr, npages, topAddr);
2137 assert(baseAddr + npages * PAGESIZE == topAddr);
2140 for (size_t i = 0; i < npages; i++)
2142 Bins bin = cast(Bins)pagetable[i];
2143 assert(bin < B_MAX);
2149 * Allocate n pages from Pool.
2150 * Returns OPFAIL on failure.
2152 size_t allocPages(size_t n)
2158 for (i = 0; i < npages; i++)
2160 if (pagetable[i] == B_FREE)
2173 * Free npages pages starting with pagenum.
2175 void freePages(size_t pagenum, size_t npages)
2177 memset(&pagetable[pagenum], B_FREE, npages);
2182 * Find base address of block containing pointer p.
2183 * Returns null if the pointer doesn't belong to this pool
2185 void* findBase(void *p)
2187 size_t offset = cast(size_t)(p - this.baseAddr);
2188 size_t pagenum = offset / PAGESIZE;
2189 Bins bin = cast(Bins)this.pagetable[pagenum];
2190 // Adjust bit to be at start of allocated memory block
2192 return this.baseAddr + (offset & notbinsize[bin]);
2193 if (bin == B_PAGEPLUS) {
2195 --pagenum, offset -= PAGESIZE;
2196 } while (cast(Bins)this.pagetable[pagenum] == B_PAGEPLUS);
2197 return this.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
2199 // we are in a B_FREE page
2205 * Find size of pointer p.
2206 * Returns 0 if p doesn't belong to this pool if if it's block size is less
2209 size_t findSize(void *p)
2211 size_t pagenum = cast(size_t)(p - this.baseAddr) / PAGESIZE;
2212 Bins bin = cast(Bins)this.pagetable[pagenum];
2214 return binsize[bin];
2215 if (this.cached_ptr == p)
2216 return this.cached_size;
2217 size_t i = pagenum + 1;
2218 for (; i < this.npages; i++)
2219 if (this.pagetable[i] != B_PAGEPLUS)
2221 this.cached_ptr = p;
2222 this.cached_size = (i - pagenum) * PAGESIZE;
2223 return this.cached_size;
2228 * Used for sorting pools
2230 int opCmp(in Pool other)
2232 if (baseAddr < other.baseAddr)
2235 return cast(int)(baseAddr > other.baseAddr);
2240 /* ============================ SENTINEL =============================== */
2243 const size_t SENTINEL_PRE = cast(size_t) 0xF4F4F4F4F4F4F4F4UL; // 32 or 64 bits
2244 const ubyte SENTINEL_POST = 0xF5; // 8 bits
2245 const uint SENTINEL_EXTRA = 2 * size_t.sizeof + 1;
2248 size_t* sentinel_size(void *p) { return &(cast(size_t *)p)[-2]; }
2249 size_t* sentinel_pre(void *p) { return &(cast(size_t *)p)[-1]; }
2250 ubyte* sentinel_post(void *p) { return &(cast(ubyte *)p)[*sentinel_size(p)]; }
2253 void sentinel_init(void *p, size_t size)
2255 *sentinel_size(p) = size;
2256 *sentinel_pre(p) = SENTINEL_PRE;
2257 *sentinel_post(p) = SENTINEL_POST;
2261 void sentinel_Invariant(void *p)
2263 if (*sentinel_pre(p) != SENTINEL_PRE ||
2264 *sentinel_post(p) != SENTINEL_POST)
2269 void *sentinel_add(void *p)
2271 return p + 2 * size_t.sizeof;
2275 void *sentinel_sub(void *p)
2277 return p - 2 * size_t.sizeof;
2282 /* ============================ C Public Interface ======================== */
2285 private int _termCleanupLevel=1;
2289 /// sets the cleanup level done by gc
2292 /// 2: fullCollect ignoring stack roots (might crash daemonThreads)
2293 /// result !=0 if the value was invalid
2294 int gc_setTermCleanupLevel(int cLevel)
2296 if (cLevel<0 || cLevel>2) return cLevel;
2297 _termCleanupLevel=cLevel;
2301 /// returns the cleanup level done by gc
2302 int gc_getTermCleanupLevel()
2304 return _termCleanupLevel;
2309 scope (exit) assert (Invariant());
2310 gc = cast(GC*) cstdlib.calloc(1, GC.sizeof);
2313 version (DigitalMars) version(OSX) {
2314 _d_osx_image_init();
2316 // NOTE: The GC must initialize the thread library
2317 // before its first collection.
2323 assert (Invariant());
2324 if (_termCleanupLevel<1) {
2326 } else if (_termCleanupLevel==2){
2327 // a more complete cleanup
2328 // NOTE: There may be daemons threads still running when this routine is
2329 // called. If so, cleaning memory out from under then is a good
2330 // way to make them crash horribly.
2331 // Often this probably doesn't matter much since the app is
2332 // supposed to be shutting down anyway, but for example tests might
2333 // crash (and be considerd failed even if the test was ok).
2334 // thus this is not the default and should be enabled by
2335 // I'm disabling cleanup for now until I can think about it some
2338 // not really a 'collect all' -- still scans static data area, roots,
2340 return locked!(void, () {
2346 // default (safe) clenup
2347 return locked!(void, () {
2355 return locked!(void, () {
2356 assert (Invariant()); scope (exit) assert (Invariant());
2357 assert (gc.disabled > 0);
2364 return locked!(void, () {
2365 assert (Invariant()); scope (exit) assert (Invariant());
2372 return locked!(void, () {
2373 assert (Invariant()); scope (exit) assert (Invariant());
2381 return locked!(void, () {
2382 assert (Invariant()); scope (exit) assert (Invariant());
2387 uint gc_getAttr(void* p)
2391 return locked!(uint, () {
2392 assert (Invariant()); scope (exit) assert (Invariant());
2393 Pool* pool = findPool(p);
2396 auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
2397 return getAttr(pool, bit_i);
2401 uint gc_setAttr(void* p, uint attrs)
2405 return locked!(uint, () {
2406 assert (Invariant()); scope (exit) assert (Invariant());
2407 Pool* pool = findPool(p);
2410 auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
2411 uint old_attrs = getAttr(pool, bit_i);
2412 setAttr(pool, bit_i, attrs);
2417 uint gc_clrAttr(void* p, uint attrs)
2421 return locked!(uint, () {
2422 assert (Invariant()); scope (exit) assert (Invariant());
2423 Pool* pool = findPool(p);
2426 auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
2427 uint old_attrs = getAttr(pool, bit_i);
2428 clrAttr(pool, bit_i, attrs);
2433 void* gc_malloc(size_t size, uint attrs = 0,
2434 PointerMap ptrmap = PointerMap.init)
2438 return locked!(void*, () {
2439 assert (Invariant()); scope (exit) assert (Invariant());
2440 return malloc(size, attrs, ptrmap.bits.ptr);
2444 void* gc_calloc(size_t size, uint attrs = 0,
2445 PointerMap ptrmap = PointerMap.init)
2449 return locked!(void*, () {
2450 assert (Invariant()); scope (exit) assert (Invariant());
2451 return calloc(size, attrs, ptrmap.bits.ptr);
2455 void* gc_realloc(void* p, size_t size, uint attrs = 0,
2456 PointerMap ptrmap = PointerMap.init)
2458 return locked!(void*, () {
2459 assert (Invariant()); scope (exit) assert (Invariant());
2460 return realloc(p, size, attrs, ptrmap.bits.ptr);
2464 size_t gc_extend(void* p, size_t min_size, size_t max_size)
2466 return locked!(size_t, () {
2467 assert (Invariant()); scope (exit) assert (Invariant());
2468 return extend(p, min_size, max_size);
2472 size_t gc_reserve(size_t size)
2476 return locked!(size_t, () {
2477 assert (Invariant()); scope (exit) assert (Invariant());
2478 return reserve(size);
2482 void gc_free(void* p)
2486 return locked!(void, () {
2487 assert (Invariant()); scope (exit) assert (Invariant());
2492 void* gc_addrOf(void* p)
2496 return locked!(void*, () {
2497 assert (Invariant()); scope (exit) assert (Invariant());
2498 Pool* pool = findPool(p);
2501 return pool.findBase(p);
2505 size_t gc_sizeOf(void* p)
2509 return locked!(size_t, () {
2510 assert (Invariant()); scope (exit) assert (Invariant());
2515 BlkInfo gc_query(void* p)
2518 return BlkInfo.init;
2519 return locked!(BlkInfo, () {
2520 assert (Invariant()); scope (exit) assert (Invariant());
2525 // NOTE: This routine is experimental. The stats or function name may change
2526 // before it is made officially available.
2529 return locked!(GCStats, () {
2530 assert (Invariant()); scope (exit) assert (Invariant());
2535 void gc_addRoot(void* p)
2539 return locked!(void, () {
2540 assert (Invariant()); scope (exit) assert (Invariant());
2541 if (gc.roots.append(p) is null)
2542 onOutOfMemoryError();
2546 void gc_addRange(void* p, size_t size)
2548 if (p is null || size == 0)
2550 return locked!(void, () {
2551 assert (Invariant()); scope (exit) assert (Invariant());
2552 if (gc.ranges.append(Range(p, p + size)) is null)
2553 onOutOfMemoryError();
2557 void gc_removeRoot(void* p)
2561 return locked!(void, () {
2562 assert (Invariant()); scope (exit) assert (Invariant());
2563 bool r = gc.roots.remove(p);
2568 void gc_removeRange(void* p)
2572 return locked!(void, () {
2573 assert (Invariant()); scope (exit) assert (Invariant());
2574 bool r = gc.ranges.remove(Range(p, null));
2579 void* gc_weakpointerCreate(Object r)
2581 // weakpointers do their own locking
2582 return weakpointerCreate(r);
2585 void gc_weakpointerDestroy(void* wp)
2587 // weakpointers do their own locking
2588 weakpointerDestroy(wp);
2591 Object gc_weakpointerGet(void* wp)
2593 // weakpointers do their own locking
2594 return weakpointerGet(wp);
2598 // vim: set et sw=4 sts=4 :