2 * This module contains the garbage collector implementation.
4 * Copyright: Copyright (C) 2001-2007 Digital Mars, www.digitalmars.com.
7 * This software is provided 'as-is', without any express or implied
8 * warranty. In no event will the authors be held liable for any damages
9 * arising from the use of this software.
11 * Permission is granted to anyone to use this software for any purpose,
12 * including commercial applications, and to alter it and redistribute it
13 * freely, in both source and binary form, subject to the following
16 * o The origin of this software must not be misrepresented; you must not
17 * claim that you wrote the original software. If you use this software
18 * in a product, an acknowledgment in the product documentation would be
19 * appreciated but is not required.
20 * o Altered source versions must be plainly marked as such, and must not
21 * be misrepresented as being the original software.
22 * o This notice may not be removed or altered from any source
24 * Authors: Walter Bright, David Friedman, Sean Kelly
29 // D Programming Language Garbage Collector implementation
31 /************** Debugging ***************************/
33 //debug = COLLECT_PRINTF; // turn on printf's
34 //debug = PTRCHECK; // more pointer checking
35 //debug = PTRCHECK2; // thorough but slow pointer checking
37 /*************** Configuration *********************/
39 version = STACKGROWSDOWN; // growing the stack means subtracting from the stack pointer
40 // (use for Intel X86 CPUs)
41 // else growing the stack means adding to the stack pointer
43 /***************************************************/
45 import rt.gc.cdgc.bits: GCBits;
46 import rt.gc.cdgc.stats: GCStats, Stats;
47 import dynarray = rt.gc.cdgc.dynarray;
48 import os = rt.gc.cdgc.os;
49 import opts = rt.gc.cdgc.opts;
51 import cstdlib = tango.stdc.stdlib;
52 import cstring = tango.stdc.string;
53 import cstdio = tango.stdc.stdio;
54 debug(COLLECT_PRINTF) alias cstdio.printf printf;
57 * This is a small optimization that proved it's usefulness. For small chunks
58 * or memory memset() seems to be slower (probably because of the call) that
59 * simply doing a simple loop to set the memory.
61 void memset(void* dst, int c, size_t n)
63 // This number (32) has been determined empirically
65 cstring.memset(dst, c, n);
68 auto p = cast(ubyte*)(dst);
75 // BUG: The following import will likely not work, since the gcc
76 // subdirectory is elsewhere. Instead, perhaps the functions
77 // could be declared directly or some other resolution could
79 static import gcc.builtins; // for __builtin_unwind_int
89 package enum BlkAttr : uint
91 FINALIZE = 0b0000_0001,
92 NO_SCAN = 0b0000_0010,
93 NO_MOVE = 0b0000_0100,
94 ALL_BITS = 0b1111_1111
97 package bool has_pointermap(uint attrs)
99 return !opts.options.conservative && !(attrs & BlkAttr.NO_SCAN);
104 alias void delegate(Object) DEvent;
105 alias void delegate( void*, void* ) scanFn;
106 enum { OPFAIL = ~cast(size_t)0 }
110 version (DigitalMars) version(OSX)
111 oid _d_osx_image_init();
113 void* rt_stackBottom();
115 void rt_finalize( void* p, bool det = true );
116 void rt_attachDisposeEvent(Object h, DEvent e);
117 bool rt_detachDisposeEvent(Object h, DEvent e);
118 void rt_scanStaticData( scanFn scan );
121 bool thread_needLock();
122 void thread_suspendAll();
123 void thread_resumeAll();
124 void thread_scanAll( scanFn fn, void* curStackTop = null );
126 void onOutOfMemoryError();
134 POOLSIZE = (4096*256),
148 B_PAGE, // start of large alloc
149 B_PAGEPLUS, // continuation of large alloc
169 int opCmp(in Range other)
171 if (pbot < other.pbot)
174 return cast(int)(pbot > other.pbot);
179 const uint binsize[B_MAX] = [ 16,32,64,128,256,512,1024,2048,4096 ];
180 const uint notbinsize[B_MAX] = [ ~(16u-1),~(32u-1),~(64u-1),~(128u-1),~(256u-1),
181 ~(512u-1),~(1024u-1),~(2048u-1),~(4096u-1) ];
184 /* ============================ GC =============================== */
187 class GCLock {} // just a dummy so we can get a global lock
198 // !=0 means don't scan stack
203 /// Turn off collections if > 0
206 // PID of the fork()ed process doing the mark() (0 if is not running)
209 /// min(pool.baseAddr)
211 /// max(pool.topAddr)
214 /// Free list for each size
215 List*[B_MAX] free_list;
217 dynarray.DynArray!(void*) roots;
218 dynarray.DynArray!(Range) ranges;
219 dynarray.DynArray!(Pool*) pools;
224 // call locked if necessary
225 private T locked(T, alias Code)()
227 if (thread_needLock())
228 synchronized (gc.lock) return Code();
237 assert (gc !is null);
239 for (size_t i = 0; i < gc.pools.length; i++) {
240 Pool* pool = gc.pools[i];
243 assert(gc.min_addr == pool.baseAddr);
244 if (i + 1 < gc.pools.length)
245 assert(*pool < *gc.pools[i + 1]);
246 else if (i + 1 == gc.pools.length)
247 assert(gc.max_addr == pool.topAddr);
250 gc.roots.Invariant();
251 gc.ranges.Invariant();
253 for (size_t i = 0; i < gc.ranges.length; i++) {
254 assert(gc.ranges[i].pbot);
255 assert(gc.ranges[i].ptop);
256 assert(gc.ranges[i].pbot <= gc.ranges[i].ptop);
259 for (size_t i = 0; i < B_PAGE; i++) {
260 for (List *list = gc.free_list[i]; list; list = list.next) {
261 auto pool = list.pool;
262 assert (pool !is null);
263 auto p = cast(byte*) list;
264 assert (p >= pool.baseAddr);
265 assert (p < pool.topAddr);
266 assert (pool.freebits.test((p - pool.baseAddr) / 16));
275 * Find Pool that pointer is in.
276 * Return null if not in a Pool.
277 * Assume pools is sorted.
279 Pool* findPool(void* p)
281 if (p < gc.min_addr || p >= gc.max_addr)
283 if (gc.pools.length == 0)
285 if (gc.pools.length == 1)
287 /// The pooltable[] is sorted by address, so do a binary search
289 size_t high = gc.pools.length - 1;
290 while (low <= high) {
291 size_t mid = (low + high) / 2;
292 auto pool = gc.pools[mid];
293 if (p < pool.baseAddr)
295 else if (p >= pool.topAddr)
306 * Determine the base address of the block containing p. If p is not a gc
307 * allocated pointer, return null.
309 BlkInfo getInfo(void* p)
312 Pool* pool = findPool(p);
316 info.base = pool.findBase(p);
317 if (info.base is null)
319 info.size = pool.findSize(info.base);
320 size_t bit_i = (info.base - pool.baseAddr) / 16;
321 info.attr = getAttr(pool, bit_i);
322 if (has_pointermap(info.attr)) {
323 info.size -= size_t.sizeof; // PointerMap bitmask
324 // Points to the PointerMap bitmask pointer, not user data
325 if (p >= (info.base + info.size)) {
329 if (opts.options.sentinel) {
330 info.base = sentinel_add(info.base);
331 // points to sentinel data, not user data
332 if (p < info.base || p >= sentinel_post(info.base))
334 info.size -= SENTINEL_EXTRA;
341 * Compute bin for size.
343 Bins findBin(size_t size)
387 * Allocate a new pool of at least size bytes.
388 * Sort it into pools.
389 * Mark all memory in the pool as B_FREE.
390 * Return the actual number of bytes reserved or 0 on error.
392 size_t reserve(size_t size)
395 size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
396 Pool* pool = newPool(npages);
400 return pool.npages * PAGESIZE;
405 * Minimizes physical memory usage by returning free pools to the OS.
409 // Disabled if a parallel collection is in progress because the shared mark
410 // bits of the freed pool might be used by the mark process
411 if (gc.mark_proc_pid != 0)
418 for (n = 0; n < gc.pools.length; n++)
421 for (pn = 0; pn < pool.npages; pn++)
423 if (cast(Bins)pool.pagetable[pn] != B_FREE)
426 if (pn < pool.npages)
430 gc.pools.remove_at(n);
433 gc.min_addr = gc.pools[0].baseAddr;
434 gc.max_addr = gc.pools[gc.pools.length - 1].topAddr;
439 * Allocate a chunk of memory that is larger than a page.
440 * Return null if out of memory.
442 void* bigAlloc(size_t size, out Pool* pool)
451 npages = (size + PAGESIZE - 1) / PAGESIZE;
455 // This code could use some refinement when repeatedly
456 // allocating very large arrays.
458 for (n = 0; n < gc.pools.length; n++)
461 pn = pool.allocPages(npages);
476 freedpages = fullcollectshell();
477 if (freedpages >= gc.pools.length * ((POOLSIZE / PAGESIZE) / 4))
482 // Release empty pools to prevent bloat
485 pool = newPool(npages);
491 pn = pool.allocPages(npages);
492 assert(pn != OPFAIL);
495 // Release empty pools to prevent bloat
498 pool = newPool(npages);
501 pn = pool.allocPages(npages);
502 assert(pn != OPFAIL);
512 size_t bit_i = pn * (PAGESIZE / 16);
513 pool.freebits.clear(bit_i);
514 pool.pagetable[pn] = B_PAGE;
516 memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
517 p = pool.baseAddr + pn * PAGESIZE;
518 memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
519 if (opts.options.mem_stomp)
520 memset(p, 0xF1, size);
524 return null; // let mallocNoSync handle the error
529 * Allocate a new pool with at least npages in it.
530 * Sort it into pools.
531 * Return null if failed.
533 Pool *newPool(size_t npages)
535 // Minimum of POOLSIZE
536 if (npages < POOLSIZE/PAGESIZE)
537 npages = POOLSIZE/PAGESIZE;
538 else if (npages > POOLSIZE/PAGESIZE)
540 // Give us 150% of requested size, so there's room to extend
541 auto n = npages + (npages >> 1);
542 if (n < size_t.max/PAGESIZE)
546 // Allocate successively larger pools up to 8 megs
549 size_t n = gc.pools.length;
551 n = 8; // cap pool size at 8 megs
552 n *= (POOLSIZE / PAGESIZE);
557 auto pool = cast(Pool*) cstdlib.calloc(1, Pool.sizeof);
560 pool.initialize(npages);
567 auto inserted_pool = *gc.pools.insert_sorted!("*a < *b")(pool);
568 if (inserted_pool is null) {
572 assert (inserted_pool is pool);
573 gc.min_addr = gc.pools[0].baseAddr;
574 gc.max_addr = gc.pools[gc.pools.length - 1].topAddr;
580 * Allocate a page of bin's.
584 int allocPage(Bins bin)
589 for (size_t n = 0; n < gc.pools.length; n++)
592 pn = pool.allocPages(1);
599 pool.pagetable[pn] = cast(ubyte)bin;
601 // Convert page to free list
602 size_t size = binsize[bin];
603 auto list_head = &gc.free_list[bin];
605 byte* p = pool.baseAddr + pn * PAGESIZE;
606 byte* ptop = p + PAGESIZE;
607 size_t bit_i = pn * (PAGESIZE / 16);
608 pool.freebits.set_group(bit_i, PAGESIZE / 16);
609 for (; p < ptop; p += size)
611 List* l = cast(List *) p;
621 * Search a range of memory values and mark any pointers into the GC pool using
622 * type information (bitmask of pointer locations).
624 void mark_range(void *pbot, void *ptop, size_t* pm_bitmask)
626 // TODO: make our own assert because assert uses the GC
627 assert (pbot <= ptop);
629 const BITS_PER_WORD = size_t.sizeof * 8;
631 void **p1 = cast(void **)pbot;
632 void **p2 = cast(void **)ptop;
634 bool changes = false;
636 size_t type_size = pm_bitmask[0];
637 size_t* pm_bits = pm_bitmask + 1;
638 bool has_type_info = type_size != 1 || pm_bits[0] != 1 || pm_bits[1] != 0;
640 //printf("marking range: %p -> %p\n", pbot, ptop);
641 for (; p1 + type_size <= p2; p1 += type_size) {
642 for (size_t n = 0; n < type_size; n++) {
643 // scan bit set for this word
645 !(pm_bits[n / BITS_PER_WORD] & (1 << (n % BITS_PER_WORD))))
650 if (p < gc.min_addr || p >= gc.max_addr)
653 if ((cast(size_t)p & ~(PAGESIZE-1)) == pcache)
656 Pool* pool = findPool(p);
659 size_t offset = cast(size_t)(p - pool.baseAddr);
661 size_t pn = offset / PAGESIZE;
662 Bins bin = cast(Bins)pool.pagetable[pn];
664 // Cache B_PAGE, B_PAGEPLUS and B_FREE lookups
666 pcache = cast(size_t)p & ~(PAGESIZE-1);
668 // Adjust bit to be at start of allocated memory block
670 bit_i = (offset & notbinsize[bin]) / 16;
671 else if (bin == B_PAGEPLUS)
677 while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
678 bit_i = pn * (PAGESIZE / 16);
680 else // Don't mark bits in B_FREE pages
683 if (!pool.mark.test(bit_i))
685 pool.mark.set(bit_i);
686 if (!pool.noscan.test(bit_i))
688 pool.scan.set(bit_i);
696 gc.any_changes = true;
700 * Return number of full pages free'd.
702 size_t fullcollectshell()
704 gc.stats.collection_started();
706 gc.stats.collection_finished();
708 // The purpose of the 'shell' is to ensure all the registers
709 // get put on the stack so they'll be scanned
714 gcc.builtins.__builtin_unwind_init();
721 uint eax,ecx,edx,ebx,ebp,esi,edi;
734 else version (X86_64)
736 ulong rax,rbx,rcx,rdx,rbp,rsi,rdi,r8,r9,r10,r11,r12,r13,r14,r15;
759 static assert( false, "Architecture not supported." );
770 result = fullcollect(sp);
793 size_t fullcollect(void *stackTop)
795 debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n");
797 // If eager allocation is used, we need to check first if there is a mark
798 // process running. If there isn't, we start a new one (see the next code
799 // block). If there is, we check if it's still running or already finished.
800 // If it's still running, we tell the caller process no memory has been
801 // recovered (it will allocated more to fulfill the current request). If
802 // the mark process is done, we lunch the sweep phase and hope enough
803 // memory is freed (if that not the case, the caller will allocate more
804 // memory and the next time it's exhausted it will run a new collection).
805 if (opts.options.eager_alloc) {
806 if (gc.mark_proc_pid != 0) { // there is a mark process in progress
807 os.WRes r = os.wait_pid(gc.mark_proc_pid, false); // don't block
808 assert (r != os.WRes.ERROR);
811 debug(COLLECT_PRINTF) printf("\t\tmark proc DONE\n");
812 gc.mark_proc_pid = 0;
814 case os.WRes.RUNNING:
815 debug(COLLECT_PRINTF) printf("\t\tmark proc RUNNING\n");
818 debug(COLLECT_PRINTF) printf("\t\tmark proc ERROR\n");
819 disable_fork(); // Try to keep going without forking
825 // We always need to stop the world to make threads save the CPU registers
826 // in the stack and prepare themselves for thread_scanAll()
828 gc.stats.world_stopped();
830 // If forking is enabled, we fork() and start a new mark phase in the
831 // child. The parent process will tell the caller that no memory could be
832 // recycled if eager allocation is used, allowing the mutator to keep going
833 // almost instantly (at the expense of more memory consumption because
834 // a new allocation will be triggered to fulfill the current request). If
835 // no eager allocation is used, the parent will wait for the mark phase to
836 // finish before returning control to the mutator, but other threads are
837 // restarted and may run in parallel with the mark phase (unless they
838 // allocate or use the GC themselves, in which case the global GC lock will
840 if (opts.options.fork) {
841 cstdio.fflush(null); // avoid duplicated FILE* output
842 os.pid_t child_pid = os.fork();
843 assert (child_pid != -1); // don't accept errors in non-release mode
845 case -1: // if fork() fails, fall-back to stop-the-world
848 case 0: // child process (i.e. the collectors mark phase)
851 break; // bogus, will never reach here
852 default: // parent process (i.e. the mutator)
853 // start the world again and wait for the mark phase to finish
855 gc.stats.world_started();
856 if (opts.options.eager_alloc) {
857 gc.mark_proc_pid = child_pid;
860 os.WRes r = os.wait_pid(child_pid); // block until it finishes
861 assert (r == os.WRes.DONE);
862 debug(COLLECT_PRINTF) printf("\t\tmark proc DONE (block)\n");
863 if (r == os.WRes.DONE)
865 debug(COLLECT_PRINTF) printf("\tmark() proc ERROR\n");
866 // If there was some error, try to keep going without forking
868 // Re-suspend the threads to do the marking in this process
870 gc.stats.world_stopped();
875 // If we reach here, we are using the standard stop-the-world collection,
876 // either because fork was disabled in the first place, or because it was
877 // disabled because of some error.
880 gc.stats.world_started();
889 void mark(void *stackTop)
891 debug(COLLECT_PRINTF) printf("\tmark()\n");
893 gc.any_changes = false;
895 for (size_t n = 0; n < gc.pools.length; n++)
897 Pool* pool = gc.pools[n];
898 pool.mark.copy(&pool.freebits);
902 /// Marks a range of memory in conservative mode.
903 void mark_conservative_range(void* pbot, void* ptop)
905 mark_range(pbot, ptop, PointerMap.init.bits.ptr);
908 rt_scanStaticData(&mark_conservative_range);
912 // Scan stacks and registers for each paused thread
913 thread_scanAll(&mark_conservative_range, stackTop);
917 debug(COLLECT_PRINTF) printf("scan roots[]\n");
918 mark_conservative_range(gc.roots.ptr, gc.roots.ptr + gc.roots.length);
921 debug(COLLECT_PRINTF) printf("scan ranges[]\n");
922 for (size_t n = 0; n < gc.ranges.length; n++)
924 debug(COLLECT_PRINTF) printf("\t%x .. %x\n", gc.ranges[n].pbot, gc.ranges[n].ptop);
925 mark_conservative_range(gc.ranges[n].pbot, gc.ranges[n].ptop);
928 debug(COLLECT_PRINTF) printf("\tscan heap\n");
929 while (gc.any_changes)
931 gc.any_changes = false;
932 for (size_t n = 0; n < gc.pools.length; n++)
938 Pool* pool = gc.pools[n];
940 bbase = pool.scan.base();
941 btop = bbase + pool.scan.nwords;
942 for (b = bbase; b < btop;)
958 o = pool.baseAddr + (b - bbase) * 32 * 16;
959 if (!(bitm & 0xFFFF))
964 for (; bitm; o += 16, bitm >>= 1)
969 pn = cast(size_t)(o - pool.baseAddr) / PAGESIZE;
970 bin = cast(Bins)pool.pagetable[pn];
972 if (opts.options.conservative)
973 mark_conservative_range(o, o + binsize[bin]);
975 auto end_of_blk = cast(size_t**)(o +
976 binsize[bin] - size_t.sizeof);
977 size_t* pm_bitmask = *end_of_blk;
978 mark_range(o, end_of_blk, pm_bitmask);
981 else if (bin == B_PAGE || bin == B_PAGEPLUS)
983 if (bin == B_PAGEPLUS)
985 while (pool.pagetable[pn - 1] != B_PAGE)
989 while (pn + u < pool.npages &&
990 pool.pagetable[pn + u] == B_PAGEPLUS)
993 size_t blk_size = u * PAGESIZE;
994 if (opts.options.conservative)
995 mark_conservative_range(o, o + blk_size);
997 auto end_of_blk = cast(size_t**)(o + blk_size -
999 size_t* pm_bitmask = *end_of_blk;
1000 mark_range(o, end_of_blk, pm_bitmask);
1015 // Free up everything not marked
1016 debug(COLLECT_PRINTF) printf("\tsweep\n");
1019 size_t freedpages = 0;
1021 for (size_t n = 0; n < gc.pools.length; n++)
1023 Pool* pool = gc.pools[n];
1025 uint* bbase = pool.mark.base();
1027 for (pn = 0; pn < pool.npages; pn++, bbase += PAGESIZE / (32 * 16))
1029 Bins bin = cast(Bins)pool.pagetable[pn];
1033 auto size = binsize[bin];
1034 byte* p = pool.baseAddr + pn * PAGESIZE;
1035 byte* ptop = p + PAGESIZE;
1036 size_t bit_i = pn * (PAGESIZE/16);
1037 size_t bit_stride = size / 16;
1039 version(none) // BUG: doesn't work because freebits() must also be cleared
1041 // If free'd entire page
1042 if (bbase[0] == 0 && bbase[1] == 0 && bbase[2] == 0 &&
1043 bbase[3] == 0 && bbase[4] == 0 && bbase[5] == 0 &&
1044 bbase[6] == 0 && bbase[7] == 0)
1046 for (; p < ptop; p += size, bit_i += bit_stride)
1048 if (pool.finals.testClear(bit_i)) {
1049 if (opts.options.sentinel)
1050 rt_finalize(sentinel_add(p), false/*gc.no_stack > 0*/);
1052 rt_finalize(p, false/*gc.no_stack > 0*/);
1054 clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
1056 if (opts.options.mem_stomp)
1057 memset(p, 0xF3, size);
1059 pool.pagetable[pn] = B_FREE;
1064 for (; p < ptop; p += size, bit_i += bit_stride)
1066 if (!pool.mark.test(bit_i))
1068 if (opts.options.sentinel)
1069 sentinel_Invariant(sentinel_add(p));
1071 pool.freebits.set(bit_i);
1072 if (pool.finals.testClear(bit_i)) {
1073 if (opts.options.sentinel)
1074 rt_finalize(sentinel_add(p), false/*gc.no_stack > 0*/);
1076 rt_finalize(p, false/*gc.no_stack > 0*/);
1078 clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
1080 if (opts.options.mem_stomp)
1081 memset(p, 0xF3, size);
1087 else if (bin == B_PAGE)
1089 size_t bit_stride = PAGESIZE / 16;
1090 size_t bit_i = pn * bit_stride;
1091 if (!pool.mark.test(bit_i))
1093 byte *p = pool.baseAddr + pn * PAGESIZE;
1094 if (opts.options.sentinel)
1095 sentinel_Invariant(sentinel_add(p));
1096 if (pool.finals.testClear(bit_i)) {
1097 if (opts.options.sentinel)
1098 rt_finalize(sentinel_add(p), false/*gc.no_stack > 0*/);
1100 rt_finalize(p, false/*gc.no_stack > 0*/);
1102 clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
1104 debug(COLLECT_PRINTF) printf("\tcollecting big %p\n", p);
1105 pool.pagetable[pn] = B_FREE;
1106 pool.freebits.set_group(bit_i, PAGESIZE / 16);
1108 if (opts.options.mem_stomp)
1109 memset(p, 0xF3, PAGESIZE);
1110 while (pn + 1 < pool.npages && pool.pagetable[pn + 1] == B_PAGEPLUS)
1113 pool.pagetable[pn] = B_FREE;
1114 bit_i += bit_stride;
1115 pool.freebits.set_group(bit_i, PAGESIZE / 16);
1118 if (opts.options.mem_stomp)
1121 memset(p, 0xF3, PAGESIZE);
1130 gc.free_list[] = null;
1132 // Free complete pages, rebuild free list
1133 debug(COLLECT_PRINTF) printf("\tfree complete pages\n");
1134 size_t recoveredpages = 0;
1135 for (size_t n = 0; n < gc.pools.length; n++)
1137 Pool* pool = gc.pools[n];
1138 for (size_t pn = 0; pn < pool.npages; pn++)
1140 Bins bin = cast(Bins)pool.pagetable[pn];
1146 size_t size = binsize[bin];
1147 size_t bit_stride = size / 16;
1148 size_t bit_base = pn * (PAGESIZE / 16);
1149 size_t bit_top = bit_base + (PAGESIZE / 16);
1153 for (; bit_i < bit_top; bit_i += bit_stride)
1155 if (!pool.freebits.test(bit_i))
1158 pool.pagetable[pn] = B_FREE;
1159 pool.freebits.set_group(bit_base, PAGESIZE / 16);
1164 p = pool.baseAddr + pn * PAGESIZE;
1165 for (u = 0; u < PAGESIZE; u += size)
1167 bit_i = bit_base + u / 16;
1168 if (pool.freebits.test(bit_i))
1170 assert ((p+u) >= pool.baseAddr);
1171 assert ((p+u) < pool.topAddr);
1172 List* list = cast(List*) (p + u);
1173 // avoid unnecesary writes (it really saves time)
1174 if (list.next != gc.free_list[bin])
1175 list.next = gc.free_list[bin];
1176 if (list.pool != pool)
1178 gc.free_list[bin] = list;
1185 debug(COLLECT_PRINTF) printf("recovered pages = %d\n", recoveredpages);
1186 debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedpages, gc.pools.length);
1188 return freedpages + recoveredpages;
1195 uint getAttr(Pool* pool, size_t bit_i)
1203 if (pool.finals.test(bit_i))
1204 attrs |= BlkAttr.FINALIZE;
1205 if (pool.noscan.test(bit_i))
1206 attrs |= BlkAttr.NO_SCAN;
1207 // if (pool.nomove.test(bit_i))
1208 // attrs |= BlkAttr.NO_MOVE;
1216 void setAttr(Pool* pool, size_t bit_i, uint mask)
1223 if (mask & BlkAttr.FINALIZE)
1225 pool.finals.set(bit_i);
1227 if (mask & BlkAttr.NO_SCAN)
1229 pool.noscan.set(bit_i);
1231 // if (mask & BlkAttr.NO_MOVE)
1233 // if (!pool.nomove.nbits)
1234 // pool.nomove.alloc(pool.mark.nbits);
1235 // pool.nomove.set(bit_i);
1243 void clrAttr(Pool* pool, size_t bit_i, uint mask)
1250 if (mask & BlkAttr.FINALIZE)
1251 pool.finals.clear(bit_i);
1252 if (mask & BlkAttr.NO_SCAN)
1253 pool.noscan.clear(bit_i);
1254 // if (mask & BlkAttr.NO_MOVE && pool.nomove.nbits)
1255 // pool.nomove.clear(bit_i);
1261 // we have to disable both options, as eager_alloc assumes fork is enabled
1262 opts.options.fork = false;
1263 opts.options.eager_alloc = false;
1270 gc.stack_bottom = cast(char*)&dummy;
1271 opts.parse(cstdlib.getenv("D_GC_OPTS"));
1272 // If we are going to fork, make sure we have the needed OS support
1273 if (opts.options.fork)
1274 opts.options.fork = os.HAVE_SHARED && os.HAVE_FORK;
1275 // Eager allocation is only possible when forking
1276 if (!opts.options.fork)
1277 opts.options.eager_alloc = false;
1278 gc.lock = GCLock.classinfo;
1280 setStackBottom(rt_stackBottom());
1281 gc.stats = Stats(gc);
1282 if (opts.options.prealloc_npools) {
1283 size_t pages = (opts.options.prealloc_psize + PAGESIZE - 1) / PAGESIZE;
1284 for (size_t i = 0; i < opts.options.prealloc_npools; ++i)
1293 private void *malloc(size_t size, uint attrs, size_t* pm_bitmask)
1297 gc.stats.malloc_started(size, attrs, pm_bitmask);
1299 gc.stats.malloc_finished(p);
1304 if (opts.options.sentinel)
1305 size += SENTINEL_EXTRA;
1307 bool has_pm = has_pointermap(attrs);
1309 size += size_t.sizeof;
1312 // Cache previous binsize lookup - Dave Fladebo.
1313 static size_t lastsize = -1;
1314 static Bins lastbin;
1315 if (size == lastsize)
1319 bin = findBin(size);
1325 size_t bit_i = void;
1326 size_t capacity = void; // to figure out where to store the bitmask
1329 p = gc.free_list[bin];
1332 if (!allocPage(bin) && !gc.disabled) // try to find a new page
1334 if (!thread_needLock())
1336 /* Then we haven't locked it yet. Be sure
1337 * and gc.lock for a collection, since a finalizer
1338 * may start a new thread.
1340 synchronized (gc.lock)
1345 else if (!fullcollectshell()) // collect to find a new page
1350 if (!gc.free_list[bin] && !allocPage(bin))
1352 newPool(1); // allocate new pool to find a new page
1353 // TODO: hint allocPage() to use the pool we just created
1354 int result = allocPage(bin);
1356 onOutOfMemoryError();
1358 p = gc.free_list[bin];
1360 capacity = binsize[bin];
1362 // Return next item from free list
1363 List* list = cast(List*) p;
1364 assert ((cast(byte*)list) >= list.pool.baseAddr);
1365 assert ((cast(byte*)list) < list.pool.topAddr);
1366 gc.free_list[bin] = list.next;
1368 bit_i = (p - pool.baseAddr) / 16;
1369 assert (pool.freebits.test(bit_i));
1370 pool.freebits.clear(bit_i);
1371 if (!(attrs & BlkAttr.NO_SCAN))
1372 memset(p + size, 0, capacity - size);
1373 if (opts.options.mem_stomp)
1374 memset(p, 0xF0, size);
1378 p = bigAlloc(size, pool);
1380 onOutOfMemoryError();
1381 assert (pool !is null);
1382 // Round the size up to the number of pages needed to store it
1383 size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
1384 capacity = npages * PAGESIZE;
1385 bit_i = (p - pool.baseAddr) / 16;
1388 // Store the bit mask AFTER SENTINEL_POST
1389 // TODO: store it BEFORE, so the bitmask is protected too
1391 auto end_of_blk = cast(size_t**)(p + capacity - size_t.sizeof);
1392 *end_of_blk = pm_bitmask;
1393 size -= size_t.sizeof;
1396 if (opts.options.sentinel) {
1397 size -= SENTINEL_EXTRA;
1398 p = sentinel_add(p);
1399 sentinel_init(p, size);
1403 setAttr(pool, bit_i, attrs);
1404 assert (bin >= B_PAGE || !pool.freebits.test(bit_i));
1414 private void *calloc(size_t size, uint attrs, size_t* pm_bitmask)
1418 void *p = malloc(size, attrs, pm_bitmask);
1427 private void *realloc(void *p, size_t size, uint attrs,
1440 p = malloc(size, attrs, pm_bitmask);
1444 Pool* pool = findPool(p);
1448 // Set or retrieve attributes as appropriate
1449 auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
1451 clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
1452 setAttr(pool, bit_i, attrs);
1455 attrs = getAttr(pool, bit_i);
1457 void* blk_base_addr = pool.findBase(p);
1458 size_t blk_size = pool.findSize(p);
1459 bool has_pm = has_pointermap(attrs);
1460 size_t pm_bitmask_size = 0;
1462 pm_bitmask_size = size_t.sizeof;
1463 // Retrieve pointer map bit mask if appropriate
1464 if (pm_bitmask is null) {
1465 auto end_of_blk = cast(size_t**)(blk_base_addr +
1466 blk_size - size_t.sizeof);
1467 pm_bitmask = *end_of_blk;
1471 if (opts.options.sentinel)
1473 sentinel_Invariant(p);
1474 size_t sentinel_stored_size = *sentinel_size(p);
1475 if (sentinel_stored_size != size)
1477 void* p2 = malloc(size, attrs, pm_bitmask);
1478 if (sentinel_stored_size < size)
1479 size = sentinel_stored_size;
1480 cstring.memcpy(p2, p, size);
1486 size += pm_bitmask_size;
1487 if (blk_size >= PAGESIZE && size >= PAGESIZE)
1489 auto psz = blk_size / PAGESIZE;
1490 auto newsz = (size + PAGESIZE - 1) / PAGESIZE;
1494 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
1499 if (opts.options.mem_stomp)
1500 memset(p + size - pm_bitmask_size, 0xF2,
1501 blk_size - size - pm_bitmask_size);
1502 pool.freePages(pagenum + newsz, psz - newsz);
1503 auto new_blk_size = (PAGESIZE * newsz);
1504 // update the size cache, assuming that is very likely the
1505 // size of this block will be queried in the near future
1506 pool.update_cache(p, new_blk_size);
1508 auto end_of_blk = cast(size_t**)(blk_base_addr +
1509 new_blk_size - pm_bitmask_size);
1510 *end_of_blk = pm_bitmask;
1514 else if (pagenum + newsz <= pool.npages)
1516 // Attempt to expand in place
1517 for (size_t i = pagenum + psz; 1;)
1519 if (i == pagenum + newsz)
1521 if (opts.options.mem_stomp)
1522 memset(p + blk_size - pm_bitmask_size,
1523 0xF0, size - blk_size
1525 memset(pool.pagetable + pagenum +
1526 psz, B_PAGEPLUS, newsz - psz);
1527 auto new_blk_size = (PAGESIZE * newsz);
1528 // update the size cache, assuming that is very
1529 // likely the size of this block will be queried in
1531 pool.update_cache(p, new_blk_size);
1533 auto end_of_blk = cast(size_t**)(
1534 blk_base_addr + new_blk_size -
1536 *end_of_blk = pm_bitmask;
1540 if (i == pool.npages)
1544 if (pool.pagetable[i] != B_FREE)
1550 // if new size is bigger or less than half
1551 if (blk_size < size || blk_size > size * 2)
1553 size -= pm_bitmask_size;
1554 blk_size -= pm_bitmask_size;
1555 void* p2 = malloc(size, attrs, pm_bitmask);
1556 if (blk_size < size)
1558 cstring.memcpy(p2, p, size);
1568 * Attempt to in-place enlarge the memory block pointed to by p by at least
1569 * min_size beyond its current capacity, up to a maximum of max_size. This
1570 * does not attempt to move the memory block (like realloc() does).
1573 * 0 if could not extend p,
1574 * total size of entire memory block if successful.
1576 private size_t extend(void* p, size_t minsize, size_t maxsize)
1579 assert( minsize <= maxsize );
1583 if (opts.options.sentinel)
1586 Pool* pool = findPool(p);
1590 // Retrieve attributes
1591 auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
1592 uint attrs = getAttr(pool, bit_i);
1594 void* blk_base_addr = pool.findBase(p);
1595 size_t blk_size = pool.findSize(p);
1596 bool has_pm = has_pointermap(attrs);
1597 size_t* pm_bitmask = null;
1598 size_t pm_bitmask_size = 0;
1600 pm_bitmask_size = size_t.sizeof;
1601 // Retrieve pointer map bit mask
1602 auto end_of_blk = cast(size_t**)(blk_base_addr +
1603 blk_size - size_t.sizeof);
1604 pm_bitmask = *end_of_blk;
1606 minsize += size_t.sizeof;
1607 maxsize += size_t.sizeof;
1610 if (blk_size < PAGESIZE)
1611 return 0; // cannot extend buckets
1613 auto psz = blk_size / PAGESIZE;
1614 auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE;
1615 auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE;
1617 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
1620 for (sz = 0; sz < maxsz; sz++)
1622 auto i = pagenum + psz + sz;
1623 if (i == pool.npages)
1625 if (pool.pagetable[i] != B_FREE)
1635 size_t new_size = (psz + sz) * PAGESIZE;
1637 if (opts.options.mem_stomp)
1638 memset(p + blk_size - pm_bitmask_size, 0xF0,
1639 new_size - blk_size - pm_bitmask_size);
1640 memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
1643 // update the size cache, assuming that is very likely the size of this
1644 // block will be queried in the near future
1645 pool.update_cache(p, new_size);
1648 new_size -= size_t.sizeof;
1649 auto end_of_blk = cast(size_t**)(blk_base_addr + new_size);
1650 *end_of_blk = pm_bitmask;
1659 private void free(void *p)
1668 // Find which page it is in
1670 if (!pool) // if not one of ours
1672 if (opts.options.sentinel) {
1673 sentinel_Invariant(p);
1674 p = sentinel_sub(p);
1676 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1677 bit_i = cast(size_t)(p - pool.baseAddr) / 16;
1678 clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
1680 bin = cast(Bins)pool.pagetable[pagenum];
1681 if (bin == B_PAGE) // if large alloc
1686 pool.freebits.set_group(bit_i, PAGESIZE / 16);
1687 while (++n < pool.npages && pool.pagetable[n] == B_PAGEPLUS)
1689 if (opts.options.mem_stomp)
1690 memset(p, 0xF2, npages * PAGESIZE);
1691 pool.freePages(pagenum, npages);
1692 // just in case we were caching this pointer
1693 pool.clear_cache(p);
1698 List* list = cast(List*) p;
1700 if (opts.options.mem_stomp)
1701 memset(p, 0xF2, binsize[bin]);
1703 list.next = gc.free_list[bin];
1705 gc.free_list[bin] = list;
1706 pool.freebits.set(bit_i);
1712 * Determine the allocated size of pointer p. If p is an interior pointer
1713 * or not a gc allocated pointer, return 0.
1715 private size_t sizeOf(void *p)
1719 if (opts.options.sentinel)
1720 p = sentinel_sub(p);
1722 Pool* pool = findPool(p);
1726 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
1727 uint attrs = getAttr(pool, biti);
1729 size_t size = pool.findSize(p);
1730 size_t pm_bitmask_size = 0;
1731 if (has_pointermap(attrs))
1732 pm_bitmask_size = size_t.sizeof;
1734 if (opts.options.sentinel) {
1735 // Check for interior pointer
1737 // 1) size is a power of 2 for less than PAGESIZE values
1738 // 2) base of memory pool is aligned on PAGESIZE boundary
1739 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
1741 return size - SENTINEL_EXTRA - pm_bitmask_size;
1744 if (p == gc.p_cache)
1745 return gc.size_cache;
1747 // Check for interior pointer
1749 // 1) size is a power of 2 for less than PAGESIZE values
1750 // 2) base of memory pool is aligned on PAGESIZE boundary
1751 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
1755 gc.size_cache = size - pm_bitmask_size;
1757 return gc.size_cache;
1763 * Verify that pointer p:
1764 * 1) belongs to this memory pool
1765 * 2) points to the start of an allocated piece of memory
1766 * 3) is not on a free list
1768 private void checkNoSync(void *p)
1772 if (opts.options.sentinel)
1773 sentinel_Invariant(p);
1781 if (opts.options.sentinel)
1782 p = sentinel_sub(p);
1785 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1786 bin = cast(Bins)pool.pagetable[pagenum];
1787 assert(bin <= B_PAGE);
1788 size = binsize[bin];
1789 assert((cast(size_t)p & (size - 1)) == 0);
1795 // Check that p is not on a free list
1796 for (List* list = gc.free_list[bin]; list; list = list.next)
1798 assert(cast(void*)list != p);
1809 private void setStackBottom(void *p)
1811 version (STACKGROWSDOWN)
1813 //p = (void *)((uint *)p + 4);
1814 if (p > gc.stack_bottom)
1816 gc.stack_bottom = p;
1821 //p = (void *)((uint *)p - 4);
1822 if (p < gc.stack_bottom)
1824 gc.stack_bottom = cast(char*)p;
1831 * Retrieve statistics about garbage collection.
1832 * Useful for debugging and tuning.
1834 private GCStats getStats()
1844 for (n = 0; n < gc.pools.length; n++)
1846 Pool* pool = gc.pools[n];
1847 psize += pool.npages * PAGESIZE;
1848 for (size_t j = 0; j < pool.npages; j++)
1850 Bins bin = cast(Bins)pool.pagetable[j];
1853 else if (bin == B_PAGE)
1855 else if (bin < B_PAGE)
1860 for (n = 0; n < B_PAGE; n++)
1862 for (List* list = gc.free_list[n]; list; list = list.next)
1863 flsize += binsize[n];
1866 usize = bsize - flsize;
1868 stats.poolsize = psize;
1869 stats.usedsize = bsize - flsize;
1870 stats.freelistsize = flsize;
1874 /******************* weak-reference support *********************/
1876 private struct WeakPointer
1880 void ondestroy(Object r)
1882 assert(r is reference);
1883 // lock for memory consistency (parallel readers)
1884 // also ensures that weakpointerDestroy can be called while another
1885 // thread is freeing the reference with "delete"
1886 return locked!(void, () {
1893 * Create a weak pointer to the given object.
1894 * Returns a pointer to an opaque struct allocated in C memory.
1896 void* weakpointerCreate( Object r )
1900 // must be allocated in C memory
1901 // 1. to hide the reference from the GC
1902 // 2. the GC doesn't scan delegates added by rt_attachDisposeEvent
1904 auto wp = cast(WeakPointer*)(cstdlib.malloc(WeakPointer.sizeof));
1906 onOutOfMemoryError();
1908 rt_attachDisposeEvent(r, &wp.ondestroy);
1915 * Destroy a weak pointer returned by weakpointerCreate().
1916 * If null is passed, nothing happens.
1918 void weakpointerDestroy( void* p )
1922 auto wp = cast(WeakPointer*)p;
1923 // must be extra careful about the GC or parallel threads
1924 // finalizing the reference at the same time
1925 return locked!(void, () {
1927 rt_detachDisposeEvent(wp.reference, &wp.ondestroy);
1934 * Query a weak pointer and return either the object passed to
1935 * weakpointerCreate, or null if it was free'd in the meantime.
1936 * If null is passed, null is returned.
1938 Object weakpointerGet( void* p )
1942 // NOTE: could avoid the lock by using Fawzi style GC counters but
1943 // that'd require core.sync.Atomic and lots of care about memory
1944 // consistency it's an optional optimization see
1945 // http://dsource.org/projects/tango/browser/trunk/user/tango/core/Lifetime.d?rev=5100#L158
1946 return locked!(Object, () {
1947 return (cast(WeakPointer*)p).reference;
1953 /* ============================ Pool =============================== */
1960 GCBits mark; // entries already scanned, or should not be scanned
1961 GCBits scan; // entries that need to be scanned
1962 GCBits freebits; // entries that are on the free list
1963 GCBits finals; // entries that need finalizer run on them
1964 GCBits noscan; // entries that should not be scanned
1969 /// Cache for findSize()
1973 void clear_cache(void* ptr = null)
1975 if (ptr is null || ptr is this.cached_ptr) {
1976 this.cached_ptr = null;
1977 this.cached_size = 0;
1981 void update_cache(void* ptr, size_t size)
1983 this.cached_ptr = ptr;
1984 this.cached_size = size;
1987 void initialize(size_t npages)
1989 size_t poolsize = npages * PAGESIZE;
1990 assert(poolsize >= POOLSIZE);
1991 baseAddr = cast(byte *) os.alloc(poolsize);
1993 // Some of the code depends on page alignment of memory pools
1994 assert((cast(size_t)baseAddr & (PAGESIZE - 1)) == 0);
2001 topAddr = baseAddr + poolsize;
2003 size_t nbits = cast(size_t)poolsize / 16;
2005 // if the GC will run in parallel in a fork()ed process, we need to
2006 // share the mark bits
2007 os.Vis vis = os.Vis.PRIV;
2008 if (opts.options.fork)
2009 vis = os.Vis.SHARED;
2010 mark.alloc(nbits, vis); // shared between mark and sweep
2011 freebits.alloc(nbits); // not used by the mark phase
2012 scan.alloc(nbits); // only used in the mark phase
2013 finals.alloc(nbits); // not used by the mark phase
2014 noscan.alloc(nbits); // mark phase *MUST* have a snapshot
2016 // all is free when we start
2019 // avoid accidental sweeping of new pools while using eager allocation
2020 if (gc.mark_proc_pid)
2023 pagetable = cast(ubyte*) cstdlib.malloc(npages);
2025 onOutOfMemoryError();
2026 memset(pagetable, B_FREE, npages);
2028 this.npages = npages;
2040 result = os.dealloc(baseAddr, npages * PAGESIZE);
2048 // See Gcx.Dtor() for the rationale of the null check.
2050 cstdlib.free(pagetable);
2052 os.Vis vis = os.Vis.PRIV;
2053 if (opts.options.fork)
2054 vis = os.Vis.SHARED;
2073 //freebits.Invariant();
2074 //finals.Invariant();
2075 //noscan.Invariant();
2079 //if (baseAddr + npages * PAGESIZE != topAddr)
2080 //printf("baseAddr = %p, npages = %d, topAddr = %p\n", baseAddr, npages, topAddr);
2081 assert(baseAddr + npages * PAGESIZE == topAddr);
2084 for (size_t i = 0; i < npages; i++)
2086 Bins bin = cast(Bins)pagetable[i];
2087 assert(bin < B_MAX);
2093 * Allocate n pages from Pool.
2094 * Returns OPFAIL on failure.
2096 size_t allocPages(size_t n)
2102 for (i = 0; i < npages; i++)
2104 if (pagetable[i] == B_FREE)
2117 * Free npages pages starting with pagenum.
2119 void freePages(size_t pagenum, size_t npages)
2121 memset(&pagetable[pagenum], B_FREE, npages);
2126 * Find base address of block containing pointer p.
2127 * Returns null if the pointer doesn't belong to this pool
2129 void* findBase(void *p)
2131 size_t offset = cast(size_t)(p - this.baseAddr);
2132 size_t pagenum = offset / PAGESIZE;
2133 Bins bin = cast(Bins)this.pagetable[pagenum];
2134 // Adjust bit to be at start of allocated memory block
2136 return this.baseAddr + (offset & notbinsize[bin]);
2137 if (bin == B_PAGEPLUS) {
2139 --pagenum, offset -= PAGESIZE;
2140 } while (cast(Bins)this.pagetable[pagenum] == B_PAGEPLUS);
2141 return this.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
2143 // we are in a B_FREE page
2149 * Find size of pointer p.
2150 * Returns 0 if p doesn't belong to this pool if if it's block size is less
2153 size_t findSize(void *p)
2155 size_t pagenum = cast(size_t)(p - this.baseAddr) / PAGESIZE;
2156 Bins bin = cast(Bins)this.pagetable[pagenum];
2158 return binsize[bin];
2159 if (this.cached_ptr == p)
2160 return this.cached_size;
2161 size_t i = pagenum + 1;
2162 for (; i < this.npages; i++)
2163 if (this.pagetable[i] != B_PAGEPLUS)
2165 this.cached_ptr = p;
2166 this.cached_size = (i - pagenum) * PAGESIZE;
2167 return this.cached_size;
2172 * Used for sorting pools
2174 int opCmp(in Pool other)
2176 if (baseAddr < other.baseAddr)
2179 return cast(int)(baseAddr > other.baseAddr);
2184 /* ============================ SENTINEL =============================== */
2187 const size_t SENTINEL_PRE = cast(size_t) 0xF4F4F4F4F4F4F4F4UL; // 32 or 64 bits
2188 const ubyte SENTINEL_POST = 0xF5; // 8 bits
2189 const uint SENTINEL_EXTRA = 2 * size_t.sizeof + 1;
2192 size_t* sentinel_size(void *p) { return &(cast(size_t *)p)[-2]; }
2193 size_t* sentinel_pre(void *p) { return &(cast(size_t *)p)[-1]; }
2194 ubyte* sentinel_post(void *p) { return &(cast(ubyte *)p)[*sentinel_size(p)]; }
2197 void sentinel_init(void *p, size_t size)
2199 *sentinel_size(p) = size;
2200 *sentinel_pre(p) = SENTINEL_PRE;
2201 *sentinel_post(p) = SENTINEL_POST;
2205 void sentinel_Invariant(void *p)
2207 if (*sentinel_pre(p) != SENTINEL_PRE ||
2208 *sentinel_post(p) != SENTINEL_POST)
2213 void *sentinel_add(void *p)
2215 return p + 2 * size_t.sizeof;
2219 void *sentinel_sub(void *p)
2221 return p - 2 * size_t.sizeof;
2226 /* ============================ C Public Interface ======================== */
2229 private int _termCleanupLevel=1;
2233 /// sets the cleanup level done by gc
2236 /// 2: fullCollect ignoring stack roots (might crash daemonThreads)
2237 /// result !=0 if the value was invalid
2238 int gc_setTermCleanupLevel(int cLevel)
2240 if (cLevel<0 || cLevel>2) return cLevel;
2241 _termCleanupLevel=cLevel;
2245 /// returns the cleanup level done by gc
2246 int gc_getTermCleanupLevel()
2248 return _termCleanupLevel;
2253 scope (exit) assert (Invariant());
2254 gc = cast(GC*) cstdlib.calloc(1, GC.sizeof);
2257 version (DigitalMars) version(OSX) {
2258 _d_osx_image_init();
2260 // NOTE: The GC must initialize the thread library
2261 // before its first collection.
2267 assert (Invariant());
2268 if (_termCleanupLevel<1) {
2270 } else if (_termCleanupLevel==2){
2271 // a more complete cleanup
2272 // NOTE: There may be daemons threads still running when this routine is
2273 // called. If so, cleaning memory out from under then is a good
2274 // way to make them crash horribly.
2275 // Often this probably doesn't matter much since the app is
2276 // supposed to be shutting down anyway, but for example tests might
2277 // crash (and be considerd failed even if the test was ok).
2278 // thus this is not the default and should be enabled by
2279 // I'm disabling cleanup for now until I can think about it some
2282 // not really a 'collect all' -- still scans static data area, roots,
2284 return locked!(void, () {
2290 // default (safe) clenup
2291 return locked!(void, () {
2299 return locked!(void, () {
2300 assert (Invariant()); scope (exit) assert (Invariant());
2301 assert (gc.disabled > 0);
2308 return locked!(void, () {
2309 assert (Invariant()); scope (exit) assert (Invariant());
2316 return locked!(void, () {
2317 assert (Invariant()); scope (exit) assert (Invariant());
2325 return locked!(void, () {
2326 assert (Invariant()); scope (exit) assert (Invariant());
2331 uint gc_getAttr(void* p)
2335 return locked!(uint, () {
2336 assert (Invariant()); scope (exit) assert (Invariant());
2337 Pool* pool = findPool(p);
2340 auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
2341 return getAttr(pool, bit_i);
2345 uint gc_setAttr(void* p, uint attrs)
2349 return locked!(uint, () {
2350 assert (Invariant()); scope (exit) assert (Invariant());
2351 Pool* pool = findPool(p);
2354 auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
2355 uint old_attrs = getAttr(pool, bit_i);
2356 setAttr(pool, bit_i, attrs);
2361 uint gc_clrAttr(void* p, uint attrs)
2365 return locked!(uint, () {
2366 assert (Invariant()); scope (exit) assert (Invariant());
2367 Pool* pool = findPool(p);
2370 auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
2371 uint old_attrs = getAttr(pool, bit_i);
2372 clrAttr(pool, bit_i, attrs);
2377 void* gc_malloc(size_t size, uint attrs = 0,
2378 PointerMap ptrmap = PointerMap.init)
2382 return locked!(void*, () {
2383 assert (Invariant()); scope (exit) assert (Invariant());
2384 return malloc(size, attrs, ptrmap.bits.ptr);
2388 void* gc_calloc(size_t size, uint attrs = 0,
2389 PointerMap ptrmap = PointerMap.init)
2393 return locked!(void*, () {
2394 assert (Invariant()); scope (exit) assert (Invariant());
2395 return calloc(size, attrs, ptrmap.bits.ptr);
2399 void* gc_realloc(void* p, size_t size, uint attrs = 0,
2400 PointerMap ptrmap = PointerMap.init)
2402 return locked!(void*, () {
2403 assert (Invariant()); scope (exit) assert (Invariant());
2404 return realloc(p, size, attrs, ptrmap.bits.ptr);
2408 size_t gc_extend(void* p, size_t min_size, size_t max_size)
2410 return locked!(size_t, () {
2411 assert (Invariant()); scope (exit) assert (Invariant());
2412 return extend(p, min_size, max_size);
2416 size_t gc_reserve(size_t size)
2420 return locked!(size_t, () {
2421 assert (Invariant()); scope (exit) assert (Invariant());
2422 return reserve(size);
2426 void gc_free(void* p)
2430 return locked!(void, () {
2431 assert (Invariant()); scope (exit) assert (Invariant());
2436 void* gc_addrOf(void* p)
2440 return locked!(void*, () {
2441 assert (Invariant()); scope (exit) assert (Invariant());
2442 Pool* pool = findPool(p);
2445 return pool.findBase(p);
2449 size_t gc_sizeOf(void* p)
2453 return locked!(size_t, () {
2454 assert (Invariant()); scope (exit) assert (Invariant());
2459 BlkInfo gc_query(void* p)
2462 return BlkInfo.init;
2463 return locked!(BlkInfo, () {
2464 assert (Invariant()); scope (exit) assert (Invariant());
2469 // NOTE: This routine is experimental. The stats or function name may change
2470 // before it is made officially available.
2473 return locked!(GCStats, () {
2474 assert (Invariant()); scope (exit) assert (Invariant());
2479 void gc_addRoot(void* p)
2483 return locked!(void, () {
2484 assert (Invariant()); scope (exit) assert (Invariant());
2485 if (gc.roots.append(p) is null)
2486 onOutOfMemoryError();
2490 void gc_addRange(void* p, size_t size)
2492 if (p is null || size == 0)
2494 return locked!(void, () {
2495 assert (Invariant()); scope (exit) assert (Invariant());
2496 if (gc.ranges.append(Range(p, p + size)) is null)
2497 onOutOfMemoryError();
2501 void gc_removeRoot(void* p)
2505 return locked!(void, () {
2506 assert (Invariant()); scope (exit) assert (Invariant());
2507 bool r = gc.roots.remove(p);
2512 void gc_removeRange(void* p)
2516 return locked!(void, () {
2517 assert (Invariant()); scope (exit) assert (Invariant());
2518 bool r = gc.ranges.remove(Range(p, null));
2523 void* gc_weakpointerCreate(Object r)
2525 // weakpointers do their own locking
2526 return weakpointerCreate(r);
2529 void gc_weakpointerDestroy(void* wp)
2531 // weakpointers do their own locking
2532 weakpointerDestroy(wp);
2535 Object gc_weakpointerGet(void* wp)
2537 // weakpointers do their own locking
2538 return weakpointerGet(wp);
2542 // vim: set et sw=4 sts=4 :