From 67f00b96c70725ee445641a158a97fe4f6e735d3 Mon Sep 17 00:00:00 2001 From: Leandro Lucarella Date: Fri, 30 Jul 2010 23:43:53 -0300 Subject: [PATCH] Convert methods to free functions Making the GC an object makes no sense, since you can't instantiate it more than once, it just make the code unnecessarily extra indented. The GC struct now only have attributes (several renamed) and they are only grouped for clarity (and to make easier to calculate the GC memory overhead). All methods are converted to free functions that uses a global instance of the GC struct. --- rt/gc/cdgc/gc.d | 3653 ++++++++++++++++++++++++----------------------- 1 file changed, 1843 insertions(+), 1810 deletions(-) diff --git a/rt/gc/cdgc/gc.d b/rt/gc/cdgc/gc.d index 064f81e..dbcd518 100644 --- a/rt/gc/cdgc/gc.d +++ b/rt/gc/cdgc/gc.d @@ -184,2281 +184,2285 @@ const uint notbinsize[B_MAX] = [ ~(16u-1),~(32u-1),~(64u-1),~(128u-1),~(256u-1), /* ============================ GC =============================== */ -class GCLock { } // just a dummy so we can get a global lock +class GCLock {} // just a dummy so we can get a global lock struct GC { - ClassInfo lock; // global lock + // global lock + ClassInfo lock; - void *p_cache; + void* p_cache; size_t size_cache; - uint noStack; // !=0 means don't scan stack - uint anychanges; - void *stackBottom; + // !=0 means don't scan stack + uint no_stack; + bool any_changes; + void* stack_bottom; uint inited; - int disabled; // turn off collections if >0 + /// Turn off collections if > 0 + int disabled; - byte *minAddr; // min(baseAddr) - byte *maxAddr; // max(topAddr) + /// min(pool.baseAddr) + byte *min_addr; + /// max(pool.topAddr) + byte *max_addr; - List *bucket[B_MAX]; // free list for each size + /// Free list for each size + List*[B_MAX] free_list; dynarray.DynArray!(void*) roots; dynarray.DynArray!(Range) ranges; dynarray.DynArray!(Pool) pools; Stats stats; +} +private GC* gc; - invariant - { - if (inited) - { - //printf("Gcx.invariant(): this = %p\n", this); - size_t i; - - for (i = 0; i < pools.length; i++) - { - Pool* pool = pools[i]; - pool.Invariant(); - if (i == 0) - { - assert(minAddr == pool.baseAddr); - } - if (i + 1 < pools.length) - { - assert(*pool < pools[i + 1]); - } - else if (i + 1 == pools.length) - { - assert(maxAddr == pool.topAddr); - } - } - - roots.Invariant(); - ranges.Invariant(); - - for (i = 0; i < ranges.length; i++) - { - assert(ranges[i].pbot); - assert(ranges[i].ptop); - assert(ranges[i].pbot <= ranges[i].ptop); - } - - for (i = 0; i < B_PAGE; i++) +bool Invariant() +{ + assert (gc !is null); + if (gc.inited) { + for (size_t i = 0; i < gc.pools.length; i++) { + Pool* pool = gc.pools[i]; + pool.Invariant(); + if (i == 0) + assert(gc.min_addr == pool.baseAddr); + if (i + 1 < gc.pools.length) + assert(*pool < gc.pools[i + 1]); + else if (i + 1 == gc.pools.length) + assert(gc.max_addr == pool.topAddr); + } + + gc.roots.Invariant(); + gc.ranges.Invariant(); + + for (size_t i = 0; i < gc.ranges.length; i++) { + assert(gc.ranges[i].pbot); + assert(gc.ranges[i].ptop); + assert(gc.ranges[i].pbot <= gc.ranges[i].ptop); + } + + for (size_t i = 0; i < B_PAGE; i++) + for (List *list = gc.free_list[i]; list; list = list.next) { - for (List *list = bucket[i]; list; list = list.next) - { - } } - } } + return true; +} - /** - * Find Pool that pointer is in. - * Return null if not in a Pool. - * Assume pools is sorted. - */ - Pool *findPool(void *p) +/** + * Find Pool that pointer is in. + * Return null if not in a Pool. + * Assume pools is sorted. + */ +Pool *findPool(void *p) +{ + if (p >= gc.min_addr && p < gc.max_addr) { - if (p >= minAddr && p < maxAddr) + if (gc.pools.length == 1) { - if (pools.length == 1) - { - return pools[0]; - } + return gc.pools[0]; + } - for (size_t i = 0; i < pools.length; i++) + for (size_t i = 0; i < gc.pools.length; i++) + { + Pool* pool = gc.pools[i]; + if (p < pool.topAddr) { - Pool* pool = pools[i]; - if (p < pool.topAddr) - { - if (pool.baseAddr <= p) - return pool; - break; - } + if (pool.baseAddr <= p) + return pool; + break; } } - return null; } + return null; +} - /** - * Find base address of block containing pointer p. - * Returns null if not a gc'd pointer - */ - void* findBase(void *p) +/** + * Find base address of block containing pointer p. + * Returns null if not a gc'd pointer + */ +void* findBase(void *p) +{ + Pool *pool; + + pool = findPool(p); + if (pool) { - Pool *pool; + size_t offset = cast(size_t)(p - pool.baseAddr); + size_t pn = offset / PAGESIZE; + Bins bin = cast(Bins)pool.pagetable[pn]; - pool = findPool(p); - if (pool) + // Adjust bit to be at start of allocated memory block + if (bin <= B_PAGE) { - size_t offset = cast(size_t)(p - pool.baseAddr); - size_t pn = offset / PAGESIZE; - Bins bin = cast(Bins)pool.pagetable[pn]; - - // Adjust bit to be at start of allocated memory block - if (bin <= B_PAGE) - { - return pool.baseAddr + (offset & notbinsize[bin]); - } - else if (bin == B_PAGEPLUS) + return pool.baseAddr + (offset & notbinsize[bin]); + } + else if (bin == B_PAGEPLUS) + { + do { - do - { - --pn, offset -= PAGESIZE; - } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS); + --pn, offset -= PAGESIZE; + } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS); - return pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1))); - } - else - { - // we are in a B_FREE page - return null; - } + return pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1))); + } + else + { + // we are in a B_FREE page + return null; } - return null; } + return null; +} - /** - * Find size of pointer p. - * Returns 0 if not a gc'd pointer - */ - size_t findSize(void *p) +/** + * Find size of pointer p. + * Returns 0 if not a gc'd pointer + */ +size_t findSize(void *p) +{ + Pool* pool; + size_t size = 0; + + pool = findPool(p); + if (pool) { - Pool* pool; - size_t size = 0; + size_t pagenum; + Bins bin; - pool = findPool(p); - if (pool) + pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE; + bin = cast(Bins)pool.pagetable[pagenum]; + size = binsize[bin]; + if (bin == B_PAGE) { - size_t pagenum; - Bins bin; + ubyte* pt; + size_t i; - pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE; - bin = cast(Bins)pool.pagetable[pagenum]; - size = binsize[bin]; - if (bin == B_PAGE) + pt = &pool.pagetable[0]; + for (i = pagenum + 1; i < pool.npages; i++) { - ubyte* pt; - size_t i; - - pt = &pool.pagetable[0]; - for (i = pagenum + 1; i < pool.npages; i++) - { - if (pt[i] != B_PAGEPLUS) - break; - } - size = (i - pagenum) * PAGESIZE; + if (pt[i] != B_PAGEPLUS) + break; } + size = (i - pagenum) * PAGESIZE; } - return size; } + return size; +} - /** - * - */ - BlkInfo getInfo(void* p) - { - Pool* pool; - BlkInfo info; +/** + * + */ +BlkInfo getInfo(void* p) +{ + Pool* pool; + BlkInfo info; - pool = findPool(p); - if (pool) - { - size_t offset = cast(size_t)(p - pool.baseAddr); - size_t pn = offset / PAGESIZE; - Bins bin = cast(Bins)pool.pagetable[pn]; + pool = findPool(p); + if (pool) + { + size_t offset = cast(size_t)(p - pool.baseAddr); + size_t pn = offset / PAGESIZE; + Bins bin = cast(Bins)pool.pagetable[pn]; - //////////////////////////////////////////////////////////////////// - // findAddr - //////////////////////////////////////////////////////////////////// + //////////////////////////////////////////////////////////////////// + // findAddr + //////////////////////////////////////////////////////////////////// - if (bin <= B_PAGE) + if (bin <= B_PAGE) + { + info.base = pool.baseAddr + (offset & notbinsize[bin]); + } + else if (bin == B_PAGEPLUS) + { + do { - info.base = pool.baseAddr + (offset & notbinsize[bin]); + --pn, offset -= PAGESIZE; } - else if (bin == B_PAGEPLUS) - { - do - { - --pn, offset -= PAGESIZE; - } - while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS); + while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS); - info.base = pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1))); + info.base = pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1))); - // fix bin for use by size calc below - bin = cast(Bins)pool.pagetable[pn]; - } + // fix bin for use by size calc below + bin = cast(Bins)pool.pagetable[pn]; + } - //////////////////////////////////////////////////////////////////// - // findSize - //////////////////////////////////////////////////////////////////// + //////////////////////////////////////////////////////////////////// + // findSize + //////////////////////////////////////////////////////////////////// - info.size = binsize[bin]; - if (bin == B_PAGE) - { - ubyte* pt; - size_t i; + info.size = binsize[bin]; + if (bin == B_PAGE) + { + ubyte* pt; + size_t i; - pt = &pool.pagetable[0]; - for (i = pn + 1; i < pool.npages; i++) - { - if (pt[i] != B_PAGEPLUS) - break; - } - info.size = (i - pn) * PAGESIZE; + pt = &pool.pagetable[0]; + for (i = pn + 1; i < pool.npages; i++) + { + if (pt[i] != B_PAGEPLUS) + break; } + info.size = (i - pn) * PAGESIZE; + } - //////////////////////////////////////////////////////////////////// - // getAttr - //////////////////////////////////////////////////////////////////// + //////////////////////////////////////////////////////////////////// + // getAttr + //////////////////////////////////////////////////////////////////// - info.attr = getAttr(pool, cast(size_t)(offset / 16)); - if (!(info.attr & BlkAttr.NO_SCAN)) - info.size -= (size_t*).sizeof; // bitmask - } - return info; + info.attr = getAttr(pool, cast(size_t)(offset / 16)); + if (!(info.attr & BlkAttr.NO_SCAN)) + info.size -= (size_t*).sizeof; // bitmask } + return info; +} - /** - * Compute bin for size. - */ - static Bins findBin(size_t size) +/** + * Compute bin for size. + */ +static Bins findBin(size_t size) +{ + Bins bin; + if (size <= 256) { - Bins bin; - if (size <= 256) + if (size <= 64) { - if (size <= 64) - { - if (size <= 16) - bin = B_16; - else if (size <= 32) - bin = B_32; - else - bin = B_64; - } + if (size <= 16) + bin = B_16; + else if (size <= 32) + bin = B_32; else - { - if (size <= 128) - bin = B_128; - else - bin = B_256; - } + bin = B_64; } else { - if (size <= 1024) - { - if (size <= 512) - bin = B_512; - else - bin = B_1024; - } + if (size <= 128) + bin = B_128; else - { - if (size <= 2048) - bin = B_2048; - else - bin = B_PAGE; - } + bin = B_256; + } + } + else + { + if (size <= 1024) + { + if (size <= 512) + bin = B_512; + else + bin = B_1024; + } + else + { + if (size <= 2048) + bin = B_2048; + else + bin = B_PAGE; } - return bin; } + return bin; +} - /** - * Allocate a new pool of at least size bytes. - * Sort it into pools. - * Mark all memory in the pool as B_FREE. - * Return the actual number of bytes reserved or 0 on error. - */ - size_t reserveNoSync(size_t size) - { - assert(size != 0); - size_t npages = (size + PAGESIZE - 1) / PAGESIZE; - Pool* pool = newPool(npages); +/** + * Allocate a new pool of at least size bytes. + * Sort it into pools. + * Mark all memory in the pool as B_FREE. + * Return the actual number of bytes reserved or 0 on error. + */ +size_t reserveNoSync(size_t size) +{ + assert(size != 0); + size_t npages = (size + PAGESIZE - 1) / PAGESIZE; + Pool* pool = newPool(npages); - if (!pool) - return 0; - return pool.npages * PAGESIZE; - } + if (!pool) + return 0; + return pool.npages * PAGESIZE; +} - /** - * Minimizes physical memory usage by returning free pools to the OS. - */ - void minimizeNoSync() - { - size_t n; - size_t pn; - Pool* pool; +/** + * Minimizes physical memory usage by returning free pools to the OS. + */ +void minimizeNoSync() +{ + size_t n; + size_t pn; + Pool* pool; - for (n = 0; n < pools.length; n++) + for (n = 0; n < gc.pools.length; n++) + { + pool = gc.pools[n]; + for (pn = 0; pn < pool.npages; pn++) { - pool = pools[n]; - for (pn = 0; pn < pool.npages; pn++) - { - if (cast(Bins)pool.pagetable[pn] != B_FREE) - break; - } - if (pn < pool.npages) - continue; - pool.Dtor(); - pools.remove_at(n); - n--; + if (cast(Bins)pool.pagetable[pn] != B_FREE) + break; } - minAddr = pools[0].baseAddr; - maxAddr = pools[pools.length - 1].topAddr; + if (pn < pool.npages) + continue; + pool.Dtor(); + gc.pools.remove_at(n); + n--; } + gc.min_addr = gc.pools[0].baseAddr; + gc.max_addr = gc.pools[gc.pools.length - 1].topAddr; +} - /** - * Allocate a chunk of memory that is larger than a page. - * Return null if out of memory. - */ - void *bigAlloc(size_t size) - { - Pool* pool; - size_t npages; - size_t n; - size_t pn; - size_t freedpages; - void* p; - int state; +/** + * Allocate a chunk of memory that is larger than a page. + * Return null if out of memory. + */ +void *bigAlloc(size_t size) +{ + Pool* pool; + size_t npages; + size_t n; + size_t pn; + size_t freedpages; + void* p; + int state; - npages = (size + PAGESIZE - 1) / PAGESIZE; + npages = (size + PAGESIZE - 1) / PAGESIZE; + + for (state = 0; ; ) + { + // This code could use some refinement when repeatedly + // allocating very large arrays. - for (state = 0; ; ) + for (n = 0; n < gc.pools.length; n++) { - // This code could use some refinement when repeatedly - // allocating very large arrays. + pool = gc.pools[n]; + pn = pool.allocPages(npages); + if (pn != OPFAIL) + goto L1; + } - for (n = 0; n < pools.length; n++) + // Failed + switch (state) + { + case 0: + if (gc.disabled) { - pool = pools[n]; - pn = pool.allocPages(npages); - if (pn != OPFAIL) - goto L1; + state = 1; + continue; } - - // Failed - switch (state) + // Try collecting + freedpages = fullcollectshell(); + if (freedpages >= gc.pools.length * ((POOLSIZE / PAGESIZE) / 4)) { - case 0: - if (disabled) - { - state = 1; - continue; - } - // Try collecting - freedpages = fullcollectshell(); - if (freedpages >= pools.length * ((POOLSIZE / PAGESIZE) / 4)) - { - state = 1; - continue; - } - // Release empty pools to prevent bloat - minimize(); - // Allocate new pool - pool = newPool(npages); - if (!pool) - { - state = 2; - continue; - } - pn = pool.allocPages(npages); - assert(pn != OPFAIL); - goto L1; - case 1: - // Release empty pools to prevent bloat - minimize(); - // Allocate new pool - pool = newPool(npages); - if (!pool) - goto Lnomemory; - pn = pool.allocPages(npages); - assert(pn != OPFAIL); - goto L1; - case 2: - goto Lnomemory; - default: - assert(false); + state = 1; + continue; } - } + // Release empty pools to prevent bloat + minimize(); + // Allocate new pool + pool = newPool(npages); + if (!pool) + { + state = 2; + continue; + } + pn = pool.allocPages(npages); + assert(pn != OPFAIL); + goto L1; + case 1: + // Release empty pools to prevent bloat + minimize(); + // Allocate new pool + pool = newPool(npages); + if (!pool) + goto Lnomemory; + pn = pool.allocPages(npages); + assert(pn != OPFAIL); + goto L1; + case 2: + goto Lnomemory; + default: + assert(false); + } + } + + L1: + pool.pagetable[pn] = B_PAGE; + if (npages > 1) + memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1); + p = pool.baseAddr + pn * PAGESIZE; + memset(cast(char *)p + size, 0, npages * PAGESIZE - size); + if (opts.options.mem_stomp) + memset(p, 0xF1, size); + return p; + + Lnomemory: + return null; // let mallocNoSync handle the error +} - L1: - pool.pagetable[pn] = B_PAGE; - if (npages > 1) - memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1); - p = pool.baseAddr + pn * PAGESIZE; - memset(cast(char *)p + size, 0, npages * PAGESIZE - size); - if (opts.options.mem_stomp) - memset(p, 0xF1, size); - return p; - Lnomemory: - return null; // let mallocNoSync handle the error +/** + * Allocate a new pool with at least npages in it. + * Sort it into pools. + * Return null if failed. + */ +Pool *newPool(size_t npages) +{ + // Minimum of POOLSIZE + if (npages < POOLSIZE/PAGESIZE) + npages = POOLSIZE/PAGESIZE; + else if (npages > POOLSIZE/PAGESIZE) + { + // Give us 150% of requested size, so there's room to extend + auto n = npages + (npages >> 1); + if (n < size_t.max/PAGESIZE) + npages = n; } - - /** - * Allocate a new pool with at least npages in it. - * Sort it into pools. - * Return null if failed. - */ - Pool *newPool(size_t npages) + // Allocate successively larger pools up to 8 megs + if (gc.pools.length) { - // Minimum of POOLSIZE - if (npages < POOLSIZE/PAGESIZE) - npages = POOLSIZE/PAGESIZE; - else if (npages > POOLSIZE/PAGESIZE) - { - // Give us 150% of requested size, so there's room to extend - auto n = npages + (npages >> 1); - if (n < size_t.max/PAGESIZE) - npages = n; - } - - // Allocate successively larger pools up to 8 megs - if (pools.length) - { - size_t n = pools.length; - if (n > 8) - n = 8; // cap pool size at 8 megs - n *= (POOLSIZE / PAGESIZE); - if (npages < n) - npages = n; - } - - Pool p; - p.initialize(npages); - if (!p.baseAddr) - { - p.Dtor(); - return null; - } - - Pool* pool = pools.insert_sorted(p); - if (pool) - { - minAddr = pools[0].baseAddr; - maxAddr = pools[pools.length - 1].topAddr; - } - return pool; + size_t n = gc.pools.length; + if (n > 8) + n = 8; // cap pool size at 8 megs + n *= (POOLSIZE / PAGESIZE); + if (npages < n) + npages = n; } - - /** - * Allocate a page of bin's. - * Returns: - * 0 failed - */ - int allocPage(Bins bin) + Pool p; + p.initialize(npages); + if (!p.baseAddr) { - Pool* pool; - size_t n; - size_t pn; - byte* p; - byte* ptop; + p.Dtor(); + return null; + } - for (n = 0; n < pools.length; n++) - { - pool = pools[n]; - pn = pool.allocPages(1); - if (pn != OPFAIL) - goto L1; - } - return 0; // failed + Pool* pool = gc.pools.insert_sorted(p); + if (pool) + { + gc.min_addr = gc.pools[0].baseAddr; + gc.max_addr = gc.pools[gc.pools.length - 1].topAddr; + } + return pool; +} - L1: - pool.pagetable[pn] = cast(ubyte)bin; - // Convert page to free list - size_t size = binsize[bin]; - List **b = &bucket[bin]; +/** + * Allocate a page of bin's. + * Returns: + * 0 failed + */ +int allocPage(Bins bin) +{ + Pool* pool; + size_t n; + size_t pn; + byte* p; + byte* ptop; - p = pool.baseAddr + pn * PAGESIZE; - ptop = p + PAGESIZE; - for (; p < ptop; p += size) - { - (cast(List *)p).next = *b; - *b = cast(List *)p; - } - return 1; + for (n = 0; n < gc.pools.length; n++) + { + pool = gc.pools[n]; + pn = pool.allocPages(1); + if (pn != OPFAIL) + goto L1; } + return 0; // failed + L1: + pool.pagetable[pn] = cast(ubyte)bin; - /** - * Marks a range of memory using the conservative bit mask. Used for - * the stack, for the data segment, and additional memory ranges. - */ - void mark_conservative(void* pbot, void* ptop) + // Convert page to free list + size_t size = binsize[bin]; + List **b = &gc.free_list[bin]; + + p = pool.baseAddr + pn * PAGESIZE; + ptop = p + PAGESIZE; + for (; p < ptop; p += size) { - mark(pbot, ptop, PointerMap.init.bits.ptr); + (cast(List *)p).next = *b; + *b = cast(List *)p; } + return 1; +} - /** - * Search a range of memory values and mark any pointers into the GC pool. - */ - void mark(void *pbot, void *ptop, size_t* pm_bitmask) - { - const BITS_PER_WORD = size_t.sizeof * 8; +/** + * Marks a range of memory using the conservative bit mask. Used for + * the stack, for the data segment, and additional memory ranges. + */ +void mark_conservative(void* pbot, void* ptop) +{ + mark(pbot, ptop, PointerMap.init.bits.ptr); +} - void **p1 = cast(void **)pbot; - void **p2 = cast(void **)ptop; - size_t pcache = 0; - uint changes = 0; - size_t type_size = pm_bitmask[0]; - size_t* pm_bits = pm_bitmask + 1; +/** + * Search a range of memory values and mark any pointers into the GC pool. + */ +void mark(void *pbot, void *ptop, size_t* pm_bitmask) +{ + // TODO: make our own assert because assert uses the GC + assert (pbot <= ptop); - //printf("marking range: %p -> %p\n", pbot, ptop); - for (; p1 + type_size <= p2; p1 += type_size) { - for (size_t n = 0; n < type_size; n++) { - // scan bit set for this word - if (!(pm_bits[n / BITS_PER_WORD] & (1 << (n % BITS_PER_WORD)))) - continue; + const BITS_PER_WORD = size_t.sizeof * 8; - void* p = *(p1 + n); + void **p1 = cast(void **)pbot; + void **p2 = cast(void **)ptop; + size_t pcache = 0; + uint changes = 0; - if (p < minAddr || p >= maxAddr) - continue; + size_t type_size = pm_bitmask[0]; + size_t* pm_bits = pm_bitmask + 1; - if ((cast(size_t)p & ~(PAGESIZE-1)) == pcache) - continue; + //printf("marking range: %p -> %p\n", pbot, ptop); + for (; p1 + type_size <= p2; p1 += type_size) { + for (size_t n = 0; n < type_size; n++) { + // scan bit set for this word + if (!(pm_bits[n / BITS_PER_WORD] & (1 << (n % BITS_PER_WORD)))) + continue; + + void* p = *(p1 + n); + + if (p < gc.min_addr || p >= gc.max_addr) + continue; + + if ((cast(size_t)p & ~(PAGESIZE-1)) == pcache) + continue; + + Pool* pool = findPool(p); + if (pool) + { + size_t offset = cast(size_t)(p - pool.baseAddr); + size_t bit_i; + size_t pn = offset / PAGESIZE; + Bins bin = cast(Bins)pool.pagetable[pn]; - Pool* pool = findPool(p); - if (pool) + // Adjust bit to be at start of allocated memory block + if (bin <= B_PAGE) + bit_i = (offset & notbinsize[bin]) >> 4; + else if (bin == B_PAGEPLUS) { - size_t offset = cast(size_t)(p - pool.baseAddr); - size_t bit_i; - size_t pn = offset / PAGESIZE; - Bins bin = cast(Bins)pool.pagetable[pn]; - - // Adjust bit to be at start of allocated memory block - if (bin <= B_PAGE) - bit_i = (offset & notbinsize[bin]) >> 4; - else if (bin == B_PAGEPLUS) - { - do - { - --pn; - } - while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS); - bit_i = pn * (PAGESIZE / 16); - } - else + do { - // Don't mark bits in B_FREE pages - continue; + --pn; } + while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS); + bit_i = pn * (PAGESIZE / 16); + } + else + { + // Don't mark bits in B_FREE pages + continue; + } - if (bin >= B_PAGE) // Cache B_PAGE and B_PAGEPLUS lookups - pcache = cast(size_t)p & ~(PAGESIZE-1); + if (bin >= B_PAGE) // Cache B_PAGE and B_PAGEPLUS lookups + pcache = cast(size_t)p & ~(PAGESIZE-1); - if (!pool.mark.test(bit_i)) + if (!pool.mark.test(bit_i)) + { + pool.mark.set(bit_i); + if (!pool.noscan.test(bit_i)) { - pool.mark.set(bit_i); - if (!pool.noscan.test(bit_i)) - { - pool.scan.set(bit_i); - changes = 1; - } + pool.scan.set(bit_i); + changes = 1; } } } } - anychanges |= changes; } + if (changes) + gc.any_changes = true; +} - /** - * Return number of full pages free'd. - */ - size_t fullcollectshell() - { - stats.collection_started(); - scope (exit) - stats.collection_finished(); +/** + * Return number of full pages free'd. + */ +size_t fullcollectshell() +{ + gc.stats.collection_started(); + scope (exit) + gc.stats.collection_finished(); - // The purpose of the 'shell' is to ensure all the registers - // get put on the stack so they'll be scanned - void *sp; - size_t result; - version (GNU) - { - gcc.builtins.__builtin_unwind_init(); - sp = & sp; - } - else version(LDC) + // The purpose of the 'shell' is to ensure all the registers + // get put on the stack so they'll be scanned + void *sp; + size_t result; + version (GNU) + { + gcc.builtins.__builtin_unwind_init(); + sp = & sp; + } + else version(LDC) + { + version(X86) { - version(X86) - { - uint eax,ecx,edx,ebx,ebp,esi,edi; - asm - { - mov eax[EBP], EAX ; - mov ecx[EBP], ECX ; - mov edx[EBP], EDX ; - mov ebx[EBP], EBX ; - mov ebp[EBP], EBP ; - mov esi[EBP], ESI ; - mov edi[EBP], EDI ; - mov sp[EBP], ESP ; - } - } - else version (X86_64) - { - ulong rax,rbx,rcx,rdx,rbp,rsi,rdi,r8,r9,r10,r11,r12,r13,r14,r15; - asm - { - movq rax[RBP], RAX ; - movq rbx[RBP], RBX ; - movq rcx[RBP], RCX ; - movq rdx[RBP], RDX ; - movq rbp[RBP], RBP ; - movq rsi[RBP], RSI ; - movq rdi[RBP], RDI ; - movq r8 [RBP], R8 ; - movq r9 [RBP], R9 ; - movq r10[RBP], R10 ; - movq r11[RBP], R11 ; - movq r12[RBP], R12 ; - movq r13[RBP], R13 ; - movq r14[RBP], R14 ; - movq r15[RBP], R15 ; - movq sp[RBP], RSP ; - } - } - else + uint eax,ecx,edx,ebx,ebp,esi,edi; + asm { - static assert( false, "Architecture not supported." ); + mov eax[EBP], EAX ; + mov ecx[EBP], ECX ; + mov edx[EBP], EDX ; + mov ebx[EBP], EBX ; + mov ebp[EBP], EBP ; + mov esi[EBP], ESI ; + mov edi[EBP], EDI ; + mov sp[EBP], ESP ; } } - else - { - asm - { - pushad ; - mov sp[EBP],ESP ; - } - } - result = fullcollect(sp); - version (GNU) - { - // nothing to do - } - else version(LDC) + else version (X86_64) { - // nothing to do + ulong rax,rbx,rcx,rdx,rbp,rsi,rdi,r8,r9,r10,r11,r12,r13,r14,r15; + asm + { + movq rax[RBP], RAX ; + movq rbx[RBP], RBX ; + movq rcx[RBP], RCX ; + movq rdx[RBP], RDX ; + movq rbp[RBP], RBP ; + movq rsi[RBP], RSI ; + movq rdi[RBP], RDI ; + movq r8 [RBP], R8 ; + movq r9 [RBP], R9 ; + movq r10[RBP], R10 ; + movq r11[RBP], R11 ; + movq r12[RBP], R12 ; + movq r13[RBP], R13 ; + movq r14[RBP], R14 ; + movq r15[RBP], R15 ; + movq sp[RBP], RSP ; + } } else { - asm - { - popad ; - } + static assert( false, "Architecture not supported." ); } - return result; } + else + { + asm + { + pushad ; + mov sp[EBP],ESP ; + } + } + result = fullcollect(sp); + version (GNU) + { + // nothing to do + } + else version(LDC) + { + // nothing to do + } + else + { + asm + { + popad ; + } + } + return result; +} - /** - * - */ - size_t fullcollect(void *stackTop) - { - size_t n; - Pool* pool; +/** + * + */ +size_t fullcollect(void *stackTop) +{ + size_t n; + Pool* pool; - debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n"); + debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n"); - thread_suspendAll(); - stats.world_stopped(); + thread_suspendAll(); + gc.stats.world_stopped(); - p_cache = null; - size_cache = 0; + gc.p_cache = null; + gc.size_cache = 0; - anychanges = 0; - for (n = 0; n < pools.length; n++) - { - pool = pools[n]; - pool.mark.zero(); - pool.scan.zero(); - pool.freebits.zero(); - } + gc.any_changes = false; + for (n = 0; n < gc.pools.length; n++) + { + pool = gc.pools[n]; + pool.mark.zero(); + pool.scan.zero(); + pool.freebits.zero(); + } - // Mark each free entry, so it doesn't get scanned - for (n = 0; n < B_PAGE; n++) + // Mark each free entry, so it doesn't get scanned + for (n = 0; n < B_PAGE; n++) + { + for (List *list = gc.free_list[n]; list; list = list.next) { - for (List *list = bucket[n]; list; list = list.next) - { - pool = findPool(list); - assert(pool); - pool.freebits.set(cast(size_t)(cast(byte*)list - pool.baseAddr) / 16); - } + pool = findPool(list); + assert(pool); + pool.freebits.set(cast(size_t)(cast(byte*)list - pool.baseAddr) / 16); } + } - for (n = 0; n < pools.length; n++) - { - pool = pools[n]; - pool.mark.copy(&pool.freebits); - } + for (n = 0; n < gc.pools.length; n++) + { + pool = gc.pools[n]; + pool.mark.copy(&pool.freebits); + } + + void mark_conservative_dg(void* pbot, void* ptop) + { + mark_conservative(pbot, ptop); + } - rt_scanStaticData( &mark_conservative ); + rt_scanStaticData(&mark_conservative_dg); - if (!noStack) - { - // Scan stacks and registers for each paused thread - thread_scanAll( &mark_conservative, stackTop ); - } + if (!gc.no_stack) + { + // Scan stacks and registers for each paused thread + thread_scanAll(&mark_conservative_dg, stackTop); + } - // Scan roots - debug(COLLECT_PRINTF) printf("scan roots[]\n"); - mark_conservative(roots.ptr, roots.ptr + roots.length); + // Scan roots + debug(COLLECT_PRINTF) printf("scan roots[]\n"); + mark_conservative(gc.roots.ptr, gc.roots.ptr + gc.roots.length); - // Scan ranges - debug(COLLECT_PRINTF) printf("scan ranges[]\n"); - for (n = 0; n < ranges.length; n++) - { - debug(COLLECT_PRINTF) printf("\t%x .. %x\n", ranges[n].pbot, ranges[n].ptop); - mark_conservative(ranges[n].pbot, ranges[n].ptop); - } + // Scan ranges + debug(COLLECT_PRINTF) printf("scan ranges[]\n"); + for (n = 0; n < gc.ranges.length; n++) + { + debug(COLLECT_PRINTF) printf("\t%x .. %x\n", gc.ranges[n].pbot, gc.ranges[n].ptop); + mark_conservative(gc.ranges[n].pbot, gc.ranges[n].ptop); + } - debug(COLLECT_PRINTF) printf("\tscan heap\n"); - while (anychanges) + debug(COLLECT_PRINTF) printf("\tscan heap\n"); + while (gc.any_changes) + { + gc.any_changes = false; + for (n = 0; n < gc.pools.length; n++) { - anychanges = 0; - for (n = 0; n < pools.length; n++) + uint *bbase; + uint *b; + uint *btop; + + pool = gc.pools[n]; + + bbase = pool.scan.base(); + btop = bbase + pool.scan.nwords; + for (b = bbase; b < btop;) { - uint *bbase; - uint *b; - uint *btop; + Bins bin; + size_t pn; + size_t u; + size_t bitm; + byte* o; - pool = pools[n]; + bitm = *b; + if (!bitm) + { + b++; + continue; + } + *b = 0; - bbase = pool.scan.base(); - btop = bbase + pool.scan.nwords; - for (b = bbase; b < btop;) + o = pool.baseAddr + (b - bbase) * 32 * 16; + if (!(bitm & 0xFFFF)) { - Bins bin; - size_t pn; - size_t u; - size_t bitm; - byte* o; - - bitm = *b; - if (!bitm) - { - b++; + bitm >>= 16; + o += 16 * 16; + } + for (; bitm; o += 16, bitm >>= 1) + { + if (!(bitm & 1)) continue; - } - *b = 0; - o = pool.baseAddr + (b - bbase) * 32 * 16; - if (!(bitm & 0xFFFF)) - { - bitm >>= 16; - o += 16 * 16; + pn = cast(size_t)(o - pool.baseAddr) / PAGESIZE; + bin = cast(Bins)pool.pagetable[pn]; + if (bin < B_PAGE) { + if (opts.options.conservative) + mark_conservative(o, o + binsize[bin]); + else { + auto end_of_blk = cast(size_t**)(o + + binsize[bin] - size_t.sizeof); + size_t* pm_bitmask = *end_of_blk; + mark(o, end_of_blk, pm_bitmask); + } } - for (; bitm; o += 16, bitm >>= 1) + else if (bin == B_PAGE || bin == B_PAGEPLUS) { - if (!(bitm & 1)) - continue; - - pn = cast(size_t)(o - pool.baseAddr) / PAGESIZE; - bin = cast(Bins)pool.pagetable[pn]; - if (bin < B_PAGE) { - if (opts.options.conservative) - mark_conservative(o, o + binsize[bin]); - else { - auto end_of_blk = cast(size_t**)(o + - binsize[bin] - size_t.sizeof); - size_t* pm_bitmask = *end_of_blk; - mark(o, end_of_blk, pm_bitmask); - } - } - else if (bin == B_PAGE || bin == B_PAGEPLUS) + if (bin == B_PAGEPLUS) { - if (bin == B_PAGEPLUS) - { - while (pool.pagetable[pn - 1] != B_PAGE) - pn--; - } - u = 1; - while (pn + u < pool.npages && - pool.pagetable[pn + u] == B_PAGEPLUS) - u++; - - size_t blk_size = u * PAGESIZE; - if (opts.options.conservative) - mark_conservative(o, o + blk_size); - else { - auto end_of_blk = cast(size_t**)(o + blk_size - - size_t.sizeof); - size_t* pm_bitmask = *end_of_blk; - mark(o, end_of_blk, pm_bitmask); - } + while (pool.pagetable[pn - 1] != B_PAGE) + pn--; + } + u = 1; + while (pn + u < pool.npages && + pool.pagetable[pn + u] == B_PAGEPLUS) + u++; + + size_t blk_size = u * PAGESIZE; + if (opts.options.conservative) + mark_conservative(o, o + blk_size); + else { + auto end_of_blk = cast(size_t**)(o + blk_size - + size_t.sizeof); + size_t* pm_bitmask = *end_of_blk; + mark(o, end_of_blk, pm_bitmask); } } } } } + } - thread_resumeAll(); - stats.world_started(); + thread_resumeAll(); + gc.stats.world_started(); - // Free up everything not marked - debug(COLLECT_PRINTF) printf("\tfree'ing\n"); - size_t freedpages = 0; - size_t freed = 0; - for (n = 0; n < pools.length; n++) + // Free up everything not marked + debug(COLLECT_PRINTF) printf("\tfree'ing\n"); + size_t freedpages = 0; + size_t freed = 0; + for (n = 0; n < gc.pools.length; n++) + { + pool = gc.pools[n]; + uint* bbase = pool.mark.base(); + size_t pn; + for (pn = 0; pn < pool.npages; pn++, bbase += PAGESIZE / (32 * 16)) { - pool = pools[n]; - uint* bbase = pool.mark.base(); - size_t pn; - for (pn = 0; pn < pool.npages; pn++, bbase += PAGESIZE / (32 * 16)) + Bins bin = cast(Bins)pool.pagetable[pn]; + + if (bin < B_PAGE) { - Bins bin = cast(Bins)pool.pagetable[pn]; + auto size = binsize[bin]; + byte* p = pool.baseAddr + pn * PAGESIZE; + byte* ptop = p + PAGESIZE; + size_t bit_i = pn * (PAGESIZE/16); + size_t bit_stride = size / 16; - if (bin < B_PAGE) +version(none) // BUG: doesn't work because freebits() must also be cleared +{ + // If free'd entire page + if (bbase[0] == 0 && bbase[1] == 0 && bbase[2] == 0 && + bbase[3] == 0 && bbase[4] == 0 && bbase[5] == 0 && + bbase[6] == 0 && bbase[7] == 0) { - auto size = binsize[bin]; - byte* p = pool.baseAddr + pn * PAGESIZE; - byte* ptop = p + PAGESIZE; - size_t bit_i = pn * (PAGESIZE/16); - size_t bit_stride = size / 16; - - version(none) // BUG: doesn't work because freebits() must also be cleared - { - // If free'd entire page - if (bbase[0] == 0 && bbase[1] == 0 && bbase[2] == 0 && - bbase[3] == 0 && bbase[4] == 0 && bbase[5] == 0 && - bbase[6] == 0 && bbase[7] == 0) - { - for (; p < ptop; p += size, bit_i += bit_stride) - { - if (pool.finals.nbits && pool.finals.testClear(bit_i)) { - if (opts.options.sentinel) - rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/); - else - rt_finalize(cast(List *)p, false/*noStack > 0*/); - } - this.clrAttr(pool, bit_i, BlkAttr.ALL_BITS); - - List *list = cast(List *)p; - - if (opts.options.mem_stomp) - memset(p, 0xF3, size); - } - pool.pagetable[pn] = B_FREE; - freed += PAGESIZE; - continue; - } - } for (; p < ptop; p += size, bit_i += bit_stride) { - if (!pool.mark.test(bit_i)) - { + if (pool.finals.nbits && pool.finals.testClear(bit_i)) { if (opts.options.sentinel) - sentinel_Invariant(sentinel_add(p)); - - pool.freebits.set(bit_i); - if (pool.finals.nbits && pool.finals.testClear(bit_i)) { - if (opts.options.sentinel) - rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/); - else - rt_finalize(cast(List *)p, false/*noStack > 0*/); - } - clrAttr(pool, bit_i, BlkAttr.ALL_BITS); - - List *list = cast(List *)p; + rt_finalize(cast(List *)sentinel_add(p), false/*gc.no_stack > 0*/); + else + rt_finalize(cast(List *)p, false/*gc.no_stack > 0*/); + } + clrAttr(pool, bit_i, BlkAttr.ALL_BITS); - if (opts.options.mem_stomp) - memset(p, 0xF3, size); + List *list = cast(List *)p; - freed += size; - } + if (opts.options.mem_stomp) + memset(p, 0xF3, size); } + pool.pagetable[pn] = B_FREE; + freed += PAGESIZE; + continue; } - else if (bin == B_PAGE) +} + for (; p < ptop; p += size, bit_i += bit_stride) { - size_t bit_i = pn * (PAGESIZE / 16); if (!pool.mark.test(bit_i)) { - byte *p = pool.baseAddr + pn * PAGESIZE; if (opts.options.sentinel) sentinel_Invariant(sentinel_add(p)); + + pool.freebits.set(bit_i); if (pool.finals.nbits && pool.finals.testClear(bit_i)) { if (opts.options.sentinel) - rt_finalize(sentinel_add(p), false/*noStack > 0*/); + rt_finalize(cast(List *)sentinel_add(p), false/*gc.no_stack > 0*/); else - rt_finalize(p, false/*noStack > 0*/); + rt_finalize(cast(List *)p, false/*gc.no_stack > 0*/); } clrAttr(pool, bit_i, BlkAttr.ALL_BITS); - debug(COLLECT_PRINTF) printf("\tcollecting big %x\n", p); + List *list = cast(List *)p; + + if (opts.options.mem_stomp) + memset(p, 0xF3, size); + + freed += size; + } + } + } + else if (bin == B_PAGE) + { + size_t bit_i = pn * (PAGESIZE / 16); + if (!pool.mark.test(bit_i)) + { + byte *p = pool.baseAddr + pn * PAGESIZE; + if (opts.options.sentinel) + sentinel_Invariant(sentinel_add(p)); + if (pool.finals.nbits && pool.finals.testClear(bit_i)) { + if (opts.options.sentinel) + rt_finalize(sentinel_add(p), false/*gc.no_stack > 0*/); + else + rt_finalize(p, false/*gc.no_stack > 0*/); + } + clrAttr(pool, bit_i, BlkAttr.ALL_BITS); + + debug(COLLECT_PRINTF) printf("\tcollecting big %x\n", p); + pool.pagetable[pn] = B_FREE; + freedpages++; + if (opts.options.mem_stomp) + memset(p, 0xF3, PAGESIZE); + while (pn + 1 < pool.npages && pool.pagetable[pn + 1] == B_PAGEPLUS) + { + pn++; pool.pagetable[pn] = B_FREE; freedpages++; + if (opts.options.mem_stomp) - memset(p, 0xF3, PAGESIZE); - while (pn + 1 < pool.npages && pool.pagetable[pn + 1] == B_PAGEPLUS) { - pn++; - pool.pagetable[pn] = B_FREE; - freedpages++; - - if (opts.options.mem_stomp) - { - p += PAGESIZE; - memset(p, 0xF3, PAGESIZE); - } + p += PAGESIZE; + memset(p, 0xF3, PAGESIZE); } } } } } + } - // Zero buckets - bucket[] = null; + // Zero buckets + gc.free_list[] = null; - // Free complete pages, rebuild free list - debug(COLLECT_PRINTF) printf("\tfree complete pages\n"); - size_t recoveredpages = 0; - for (n = 0; n < pools.length; n++) + // Free complete pages, rebuild free list + debug(COLLECT_PRINTF) printf("\tfree complete pages\n"); + size_t recoveredpages = 0; + for (n = 0; n < gc.pools.length; n++) + { + pool = gc.pools[n]; + for (size_t pn = 0; pn < pool.npages; pn++) { - pool = pools[n]; - for (size_t pn = 0; pn < pool.npages; pn++) - { - Bins bin = cast(Bins)pool.pagetable[pn]; - size_t bit_i; - size_t u; + Bins bin = cast(Bins)pool.pagetable[pn]; + size_t bit_i; + size_t u; - if (bin < B_PAGE) + if (bin < B_PAGE) + { + size_t size = binsize[bin]; + size_t bit_stride = size / 16; + size_t bit_base = pn * (PAGESIZE / 16); + size_t bit_top = bit_base + (PAGESIZE / 16); + byte* p; + + bit_i = bit_base; + for (; bit_i < bit_top; bit_i += bit_stride) { - size_t size = binsize[bin]; - size_t bit_stride = size / 16; - size_t bit_base = pn * (PAGESIZE / 16); - size_t bit_top = bit_base + (PAGESIZE / 16); - byte* p; - - bit_i = bit_base; - for (; bit_i < bit_top; bit_i += bit_stride) - { - if (!pool.freebits.test(bit_i)) - goto Lnotfree; - } - pool.pagetable[pn] = B_FREE; - recoveredpages++; - continue; + if (!pool.freebits.test(bit_i)) + goto Lnotfree; + } + pool.pagetable[pn] = B_FREE; + recoveredpages++; + continue; - Lnotfree: - p = pool.baseAddr + pn * PAGESIZE; - for (u = 0; u < PAGESIZE; u += size) + Lnotfree: + p = pool.baseAddr + pn * PAGESIZE; + for (u = 0; u < PAGESIZE; u += size) + { + bit_i = bit_base + u / 16; + if (pool.freebits.test(bit_i)) { - bit_i = bit_base + u / 16; - if (pool.freebits.test(bit_i)) - { - List *list = cast(List *)(p + u); - // avoid unnecessary writes - if (list.next != bucket[bin]) - list.next = bucket[bin]; - bucket[bin] = list; - } + List *list = cast(List *)(p + u); + // avoid unnecessary writes + if (list.next != gc.free_list[bin]) + list.next = gc.free_list[bin]; + gc.free_list[bin] = list; } } } } + } - debug(COLLECT_PRINTF) printf("recovered pages = %d\n", recoveredpages); - debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedpages, pools.length); + debug(COLLECT_PRINTF) printf("recovered pages = %d\n", recoveredpages); + debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedpages, gc.pools.length); - return freedpages + recoveredpages; - } + return freedpages + recoveredpages; +} - /** - * - */ - uint getAttr(Pool* pool, size_t bit_i) - in - { - assert( pool ); - } - body - { - uint attrs; +/** + * + */ +uint getAttr(Pool* pool, size_t bit_i) +in +{ + assert( pool ); +} +body +{ + uint attrs; - if (pool.finals.nbits && - pool.finals.test(bit_i)) - attrs |= BlkAttr.FINALIZE; - if (pool.noscan.test(bit_i)) - attrs |= BlkAttr.NO_SCAN; + if (pool.finals.nbits && + pool.finals.test(bit_i)) + attrs |= BlkAttr.FINALIZE; + if (pool.noscan.test(bit_i)) + attrs |= BlkAttr.NO_SCAN; // if (pool.nomove.nbits && // pool.nomove.test(bit_i)) // attrs |= BlkAttr.NO_MOVE; - return attrs; - } + return attrs; +} - /** - * - */ - void setAttr(Pool* pool, size_t bit_i, uint mask) - in +/** + * + */ +void setAttr(Pool* pool, size_t bit_i, uint mask) +in +{ + assert( pool ); +} +body +{ + if (mask & BlkAttr.FINALIZE) { - assert( pool ); + if (!pool.finals.nbits) + pool.finals.alloc(pool.mark.nbits); + pool.finals.set(bit_i); } - body + if (mask & BlkAttr.NO_SCAN) { - if (mask & BlkAttr.FINALIZE) - { - if (!pool.finals.nbits) - pool.finals.alloc(pool.mark.nbits); - pool.finals.set(bit_i); - } - if (mask & BlkAttr.NO_SCAN) - { - pool.noscan.set(bit_i); - } + pool.noscan.set(bit_i); + } // if (mask & BlkAttr.NO_MOVE) // { // if (!pool.nomove.nbits) // pool.nomove.alloc(pool.mark.nbits); // pool.nomove.set(bit_i); // } - } +} - /** - * - */ - void clrAttr(Pool* pool, size_t bit_i, uint mask) - in - { - assert( pool ); - } - body - { - if (mask & BlkAttr.FINALIZE && pool.finals.nbits) - pool.finals.clear(bit_i); - if (mask & BlkAttr.NO_SCAN) - pool.noscan.clear(bit_i); +/** + * + */ +void clrAttr(Pool* pool, size_t bit_i, uint mask) +in +{ + assert( pool ); +} +body +{ + if (mask & BlkAttr.FINALIZE && pool.finals.nbits) + pool.finals.clear(bit_i); + if (mask & BlkAttr.NO_SCAN) + pool.noscan.clear(bit_i); // if (mask & BlkAttr.NO_MOVE && pool.nomove.nbits) // pool.nomove.clear(bit_i); - } +} - void initialize() - { - int dummy; - stackBottom = cast(char*)&dummy; - opts.parse(cstdlib.getenv("D_GC_OPTS")); - lock = GCLock.classinfo; - inited = 1; - setStackBottom(rt_stackBottom()); - stats = Stats(this); - } +void initialize() +{ + int dummy; + gc.stack_bottom = cast(char*)&dummy; + opts.parse(cstdlib.getenv("D_GC_OPTS")); + gc.lock = GCLock.classinfo; + gc.inited = 1; + setStackBottom(rt_stackBottom()); + gc.stats = Stats(gc); +} - /** - * - */ - void enable() +/** + * + */ +void enable() +{ + if (!thread_needLock()) { - if (!thread_needLock()) - { - assert(this.disabled > 0); - this.disabled--; - } - else synchronized (lock) - { - assert(this.disabled > 0); - this.disabled--; - } + assert(gc.disabled > 0); + gc.disabled--; } - - - /** - * - */ - void disable() + else synchronized (gc.lock) { - if (!thread_needLock()) - { - this.disabled++; - } - else synchronized (lock) - { - this.disabled++; - } + assert(gc.disabled > 0); + gc.disabled--; } +} - /** - * - */ - uint getAttr(void* p) +/** + * + */ +void disable() +{ + if (!thread_needLock()) { - if (!p) - { - return 0; - } + gc.disabled++; + } + else synchronized (gc.lock) + { + gc.disabled++; + } +} - uint go() - { - Pool* pool = this.findPool(p); - uint old_attrs = 0; - if (pool) - { - auto bit_i = cast(size_t)(p - pool.baseAddr) / 16; +/** + * + */ +uint getAttr(void* p) +{ + if (!p) + { + return 0; + } - old_attrs = this.getAttr(pool, bit_i); - } - return old_attrs; - } + uint go() + { + Pool* pool = findPool(p); + uint old_attrs = 0; - if (!thread_needLock()) - { - return go(); - } - else synchronized (lock) + if (pool) { - return go(); + auto bit_i = cast(size_t)(p - pool.baseAddr) / 16; + + old_attrs = getAttr(pool, bit_i); } + return old_attrs; } - - /** - * - */ - uint setAttr(void* p, uint mask) + if (!thread_needLock()) { - if (!p) - { - return 0; - } + return go(); + } + else synchronized (gc.lock) + { + return go(); + } +} - uint go() - { - Pool* pool = this.findPool(p); - uint old_attrs = 0; - if (pool) - { - auto bit_i = cast(size_t)(p - pool.baseAddr) / 16; +/** + * + */ +uint setAttr(void* p, uint mask) +{ + if (!p) + { + return 0; + } - old_attrs = this.getAttr(pool, bit_i); - this.setAttr(pool, bit_i, mask); - } - return old_attrs; - } + uint go() + { + Pool* pool = findPool(p); + uint old_attrs = 0; - if (!thread_needLock()) - { - return go(); - } - else synchronized (lock) + if (pool) { - return go(); + auto bit_i = cast(size_t)(p - pool.baseAddr) / 16; + + old_attrs = getAttr(pool, bit_i); + setAttr(pool, bit_i, mask); } + return old_attrs; } - - /** - * - */ - uint clrAttr(void* p, uint mask) + if (!thread_needLock()) { - if (!p) - { - return 0; - } + return go(); + } + else synchronized (gc.lock) + { + return go(); + } +} - uint go() - { - Pool* pool = this.findPool(p); - uint old_attrs = 0; - if (pool) - { - auto bit_i = cast(size_t)(p - pool.baseAddr) / 16; +/** + * + */ +uint clrAttr(void* p, uint mask) +{ + if (!p) + { + return 0; + } - old_attrs = this.getAttr(pool, bit_i); - this.clrAttr(pool, bit_i, mask); - } - return old_attrs; - } + uint go() + { + Pool* pool = findPool(p); + uint old_attrs = 0; - if (!thread_needLock()) - { - return go(); - } - else synchronized (lock) + if (pool) { - return go(); + auto bit_i = cast(size_t)(p - pool.baseAddr) / 16; + + old_attrs = getAttr(pool, bit_i); + clrAttr(pool, bit_i, mask); } + return old_attrs; + } + + if (!thread_needLock()) + { + return go(); + } + else synchronized (gc.lock) + { + return go(); } +} - /** - * - */ - void *malloc(size_t size, uint attrs, PointerMap ptrmap) +/** + * + */ +void *malloc(size_t size, uint attrs, PointerMap ptrmap) +{ + if (!size) { - if (!size) - { - return null; - } + return null; + } - if (!thread_needLock()) - { - return mallocNoSync(size, attrs, ptrmap.bits.ptr); - } - else synchronized (lock) - { - return mallocNoSync(size, attrs, ptrmap.bits.ptr); - } + if (!thread_needLock()) + { + return mallocNoSync(size, attrs, ptrmap.bits.ptr); + } + else synchronized (gc.lock) + { + return mallocNoSync(size, attrs, ptrmap.bits.ptr); } +} - // - // - // - private void *mallocNoSync(size_t size, uint attrs, size_t* pm_bitmask) - { - assert(size != 0); +// +// +// +private void *mallocNoSync(size_t size, uint attrs, size_t* pm_bitmask) +{ + assert(size != 0); - stats.malloc_started(size, attrs, pm_bitmask); - scope (exit) - stats.malloc_finished(p); + gc.stats.malloc_started(size, attrs, pm_bitmask); + scope (exit) + gc.stats.malloc_finished(p); - void *p = null; - Bins bin; + void *p = null; + Bins bin; - if (opts.options.sentinel) - size += SENTINEL_EXTRA; + if (opts.options.sentinel) + size += SENTINEL_EXTRA; - bool has_pm = has_pointermap(attrs); - if (has_pm) - size += size_t.sizeof; - - // Compute size bin - // Cache previous binsize lookup - Dave Fladebo. - static size_t lastsize = -1; - static Bins lastbin; - if (size == lastsize) - bin = lastbin; - else - { - bin = this.findBin(size); - lastsize = size; - lastbin = bin; - } + bool has_pm = has_pointermap(attrs); + if (has_pm) + size += size_t.sizeof; - size_t capacity; // to figure out where to store the bitmask - if (bin < B_PAGE) + // Compute size bin + // Cache previous binsize lookup - Dave Fladebo. + static size_t lastsize = -1; + static Bins lastbin; + if (size == lastsize) + bin = lastbin; + else + { + bin = findBin(size); + lastsize = size; + lastbin = bin; + } + + size_t capacity; // to figure out where to store the bitmask + if (bin < B_PAGE) + { + p = gc.free_list[bin]; + if (p is null) { - p = this.bucket[bin]; - if (p is null) + if (!allocPage(bin) && !gc.disabled) // try to find a new page { - if (!this.allocPage(bin) && !this.disabled) // try to find a new page + if (!thread_needLock()) { - if (!thread_needLock()) + /* Then we haven't locked it yet. Be sure + * and gc.lock for a collection, since a finalizer + * may start a new thread. + */ + synchronized (gc.lock) { - /* Then we haven't locked it yet. Be sure - * and lock for a collection, since a finalizer - * may start a new thread. - */ - synchronized (lock) - { - this.fullcollectshell(); - } - } - else if (!this.fullcollectshell()) // collect to find a new page - { - //this.newPool(1); + fullcollectshell(); } } - if (!this.bucket[bin] && !this.allocPage(bin)) + else if (!fullcollectshell()) // collect to find a new page { - this.newPool(1); // allocate new pool to find a new page - int result = this.allocPage(bin); - if (!result) - onOutOfMemoryError(); + //newPool(1); } - p = this.bucket[bin]; } - capacity = binsize[bin]; - - // Return next item from free list - this.bucket[bin] = (cast(List*)p).next; - if (!(attrs & BlkAttr.NO_SCAN)) - memset(p + size, 0, capacity - size); - if (opts.options.mem_stomp) - memset(p, 0xF0, size); - } - else - { - p = this.bigAlloc(size); - if (!p) - onOutOfMemoryError(); - // Round the size up to the number of pages needed to store it - size_t npages = (size + PAGESIZE - 1) / PAGESIZE; - capacity = npages * PAGESIZE; - } - - // Store the bit mask AFTER SENTINEL_POST - // TODO: store it BEFORE, so the bitmask is protected too - if (has_pm) { - auto end_of_blk = cast(size_t**)(p + capacity - size_t.sizeof); - *end_of_blk = pm_bitmask; - size -= size_t.sizeof; - } - - if (opts.options.sentinel) { - size -= SENTINEL_EXTRA; - p = sentinel_add(p); - sentinel_init(p, size); + if (!gc.free_list[bin] && !allocPage(bin)) + { + newPool(1); // allocate new pool to find a new page + int result = allocPage(bin); + if (!result) + onOutOfMemoryError(); + } + p = gc.free_list[bin]; } + capacity = binsize[bin]; - if (attrs) - { - Pool *pool = this.findPool(p); - assert(pool); + // Return next item from free list + gc.free_list[bin] = (cast(List*)p).next; + if (!(attrs & BlkAttr.NO_SCAN)) + memset(p + size, 0, capacity - size); + if (opts.options.mem_stomp) + memset(p, 0xF0, size); + } + else + { + p = bigAlloc(size); + if (!p) + onOutOfMemoryError(); + // Round the size up to the number of pages needed to store it + size_t npages = (size + PAGESIZE - 1) / PAGESIZE; + capacity = npages * PAGESIZE; + } - this.setAttr(pool, cast(size_t)(p - pool.baseAddr) / 16, attrs); - } - return p; + // Store the bit mask AFTER SENTINEL_POST + // TODO: store it BEFORE, so the bitmask is protected too + if (has_pm) { + auto end_of_blk = cast(size_t**)(p + capacity - size_t.sizeof); + *end_of_blk = pm_bitmask; + size -= size_t.sizeof; } + if (opts.options.sentinel) { + size -= SENTINEL_EXTRA; + p = sentinel_add(p); + sentinel_init(p, size); + } - /** - * - */ - void *calloc(size_t size, uint attrs, PointerMap ptrmap) + if (attrs) { - if (!size) - { - return null; - } + Pool *pool = findPool(p); + assert(pool); - if (!thread_needLock()) - { - return callocNoSync(size, attrs, ptrmap.bits.ptr); - } - else synchronized (lock) - { - return callocNoSync(size, attrs, ptrmap.bits.ptr); - } + setAttr(pool, cast(size_t)(p - pool.baseAddr) / 16, attrs); } + return p; +} - // - // - // - private void *callocNoSync(size_t size, uint attrs, size_t* pm_bitmask) +/** + * + */ +void *calloc(size_t size, uint attrs, PointerMap ptrmap) +{ + if (!size) { - assert(size != 0); + return null; + } - void *p = mallocNoSync(size, attrs, pm_bitmask); - memset(p, 0, size); - return p; + if (!thread_needLock()) + { + return callocNoSync(size, attrs, ptrmap.bits.ptr); } + else synchronized (gc.lock) + { + return callocNoSync(size, attrs, ptrmap.bits.ptr); + } +} - /** - * - */ - void *realloc(void *p, size_t size, uint attrs, PointerMap ptrmap) +// +// +// +private void *callocNoSync(size_t size, uint attrs, size_t* pm_bitmask) +{ + assert(size != 0); + + void *p = mallocNoSync(size, attrs, pm_bitmask); + memset(p, 0, size); + return p; +} + + +/** + * + */ +void *realloc(void *p, size_t size, uint attrs, PointerMap ptrmap) +{ + if (!thread_needLock()) { - if (!thread_needLock()) - { - return reallocNoSync(p, size, attrs, ptrmap.bits.ptr); - } - else synchronized (lock) - { - return reallocNoSync(p, size, attrs, ptrmap.bits.ptr); - } + return reallocNoSync(p, size, attrs, ptrmap.bits.ptr); } + else synchronized (gc.lock) + { + return reallocNoSync(p, size, attrs, ptrmap.bits.ptr); + } +} - // - // - // - private void *reallocNoSync(void *p, size_t size, uint attrs, - size_t* pm_bitmask) +// +// +// +private void *reallocNoSync(void *p, size_t size, uint attrs, + size_t* pm_bitmask) +{ + if (!size) { - if (!size) + if (p) { - if (p) - { - freeNoSync(p); - p = null; - } + freeNoSync(p); + p = null; } - else if (!p) - { - p = mallocNoSync(size, attrs, pm_bitmask); + } + else if (!p) + { + p = mallocNoSync(size, attrs, pm_bitmask); + } + else + { + Pool* pool = findPool(p); + if (pool is null) + return null; + + // Set or retrieve attributes as appropriate + auto bit_i = cast(size_t)(p - pool.baseAddr) / 16; + if (attrs) { + clrAttr(pool, bit_i, BlkAttr.ALL_BITS); + setAttr(pool, bit_i, attrs); } else - { - Pool* pool = this.findPool(p); - if (pool is null) - return null; + attrs = getAttr(pool, bit_i); - // Set or retrieve attributes as appropriate - auto bit_i = cast(size_t)(p - pool.baseAddr) / 16; - if (attrs) { - this.clrAttr(pool, bit_i, BlkAttr.ALL_BITS); - this.setAttr(pool, bit_i, attrs); - } - else - attrs = this.getAttr(pool, bit_i); - - void* blk_base_addr = this.findBase(p); - size_t blk_size = this.findSize(p); - bool has_pm = has_pointermap(attrs); - size_t pm_bitmask_size = 0; - if (has_pm) { - pm_bitmask_size = size_t.sizeof; - // Retrieve pointer map bit mask if appropriate - if (pm_bitmask is null) { - auto end_of_blk = cast(size_t**)(blk_base_addr + - blk_size - size_t.sizeof); - pm_bitmask = *end_of_blk; - } + void* blk_base_addr = findBase(p); + size_t blk_size = findSize(p); + bool has_pm = has_pointermap(attrs); + size_t pm_bitmask_size = 0; + if (has_pm) { + pm_bitmask_size = size_t.sizeof; + // Retrieve pointer map bit mask if appropriate + if (pm_bitmask is null) { + auto end_of_blk = cast(size_t**)(blk_base_addr + + blk_size - size_t.sizeof); + pm_bitmask = *end_of_blk; } + } - if (opts.options.sentinel) + if (opts.options.sentinel) + { + sentinel_Invariant(p); + size_t sentinel_stored_size = *sentinel_size(p); + if (sentinel_stored_size != size) { - sentinel_Invariant(p); - size_t sentinel_stored_size = *sentinel_size(p); - if (sentinel_stored_size != size) - { - void* p2 = mallocNoSync(size, attrs, pm_bitmask); - if (sentinel_stored_size < size) - size = sentinel_stored_size; - cstring.memcpy(p2, p, size); - p = p2; - } + void* p2 = mallocNoSync(size, attrs, pm_bitmask); + if (sentinel_stored_size < size) + size = sentinel_stored_size; + cstring.memcpy(p2, p, size); + p = p2; } - else + } + else + { + size += pm_bitmask_size; + if (blk_size >= PAGESIZE && size >= PAGESIZE) { - size += pm_bitmask_size; - if (blk_size >= PAGESIZE && size >= PAGESIZE) - { - auto psz = blk_size / PAGESIZE; - auto newsz = (size + PAGESIZE - 1) / PAGESIZE; - if (newsz == psz) - return p; + auto psz = blk_size / PAGESIZE; + auto newsz = (size + PAGESIZE - 1) / PAGESIZE; + if (newsz == psz) + return p; - auto pagenum = (p - pool.baseAddr) / PAGESIZE; + auto pagenum = (p - pool.baseAddr) / PAGESIZE; - if (newsz < psz) + if (newsz < psz) + { + // Shrink in place + synchronized (gc.lock) { - // Shrink in place - synchronized (lock) - { - if (opts.options.mem_stomp) - memset(p + size - pm_bitmask_size, 0xF2, - blk_size - size - pm_bitmask_size); - pool.freePages(pagenum + newsz, psz - newsz); - } - if (has_pm) { - auto end_of_blk = cast(size_t**)( - blk_base_addr + (PAGESIZE * newsz) - - pm_bitmask_size); - *end_of_blk = pm_bitmask; - } - return p; + if (opts.options.mem_stomp) + memset(p + size - pm_bitmask_size, 0xF2, + blk_size - size - pm_bitmask_size); + pool.freePages(pagenum + newsz, psz - newsz); + } + if (has_pm) { + auto end_of_blk = cast(size_t**)( + blk_base_addr + (PAGESIZE * newsz) - + pm_bitmask_size); + *end_of_blk = pm_bitmask; } - else if (pagenum + newsz <= pool.npages) + return p; + } + else if (pagenum + newsz <= pool.npages) + { + // Attempt to expand in place + synchronized (gc.lock) { - // Attempt to expand in place - synchronized (lock) + for (size_t i = pagenum + psz; 1;) { - for (size_t i = pagenum + psz; 1;) + if (i == pagenum + newsz) { - if (i == pagenum + newsz) - { - if (opts.options.mem_stomp) - memset(p + blk_size - pm_bitmask_size, - 0xF0, size - blk_size - - pm_bitmask_size); - memset(pool.pagetable + pagenum + - psz, B_PAGEPLUS, newsz - psz); - if (has_pm) { - auto end_of_blk = cast(size_t**)( - blk_base_addr + - (PAGESIZE * newsz) - - pm_bitmask_size); - *end_of_blk = pm_bitmask; - } - return p; - } - if (i == pool.npages) - { - break; + if (opts.options.mem_stomp) + memset(p + blk_size - pm_bitmask_size, + 0xF0, size - blk_size + - pm_bitmask_size); + memset(pool.pagetable + pagenum + + psz, B_PAGEPLUS, newsz - psz); + if (has_pm) { + auto end_of_blk = cast(size_t**)( + blk_base_addr + + (PAGESIZE * newsz) - + pm_bitmask_size); + *end_of_blk = pm_bitmask; } - if (pool.pagetable[i] != B_FREE) - break; - i++; + return p; + } + if (i == pool.npages) + { + break; } + if (pool.pagetable[i] != B_FREE) + break; + i++; } } } - // if new size is bigger or less than half - if (blk_size < size || blk_size > size * 2) - { - size -= pm_bitmask_size; - blk_size -= pm_bitmask_size; - void* p2 = mallocNoSync(size, attrs, pm_bitmask); - if (blk_size < size) - size = blk_size; - cstring.memcpy(p2, p, size); - p = p2; - } + } + // if new size is bigger or less than half + if (blk_size < size || blk_size > size * 2) + { + size -= pm_bitmask_size; + blk_size -= pm_bitmask_size; + void* p2 = mallocNoSync(size, attrs, pm_bitmask); + if (blk_size < size) + size = blk_size; + cstring.memcpy(p2, p, size); + p = p2; } } - return p; } + return p; +} - /** - * Attempt to in-place enlarge the memory block pointed to by p by at least - * minbytes beyond its current capacity, up to a maximum of maxsize. This - * does not attempt to move the memory block (like realloc() does). - * - * Returns: - * 0 if could not extend p, - * total size of entire memory block if successful. - */ - size_t extend(void* p, size_t minsize, size_t maxsize) +/** + * Attempt to in-place enlarge the memory block pointed to by p by at least + * minbytes beyond its current capacity, up to a maximum of maxsize. This + * does not attempt to move the memory block (like realloc() does). + * + * Returns: + * 0 if could not extend p, + * total size of entire memory block if successful. + */ +size_t extend(void* p, size_t minsize, size_t maxsize) +{ + if (!thread_needLock()) { - if (!thread_needLock()) - { - return extendNoSync(p, minsize, maxsize); - } - else synchronized (lock) - { - return extendNoSync(p, minsize, maxsize); - } + return extendNoSync(p, minsize, maxsize); } - - - // - // - // - private size_t extendNoSync(void* p, size_t minsize, size_t maxsize) - in + else synchronized (gc.lock) { - assert( minsize <= maxsize ); + return extendNoSync(p, minsize, maxsize); } - body - { - if (opts.options.sentinel) - return 0; +} - Pool* pool = this.findPool(p); - if (pool is null) - return 0; - // Retrieve attributes - auto bit_i = cast(size_t)(p - pool.baseAddr) / 16; - uint attrs = this.getAttr(pool, bit_i); - - void* blk_base_addr = this.findBase(p); - size_t blk_size = this.findSize(p); - bool has_pm = has_pointermap(attrs); - size_t* pm_bitmask = null; - size_t pm_bitmask_size = 0; - if (has_pm) { - pm_bitmask_size = size_t.sizeof; - // Retrieve pointer map bit mask - auto end_of_blk = cast(size_t**)(blk_base_addr + - blk_size - size_t.sizeof); - pm_bitmask = *end_of_blk; +// +// +// +private size_t extendNoSync(void* p, size_t minsize, size_t maxsize) +in +{ + assert( minsize <= maxsize ); +} +body +{ + if (opts.options.sentinel) + return 0; - minsize += size_t.sizeof; - maxsize += size_t.sizeof; - } + Pool* pool = findPool(p); + if (pool is null) + return 0; - if (blk_size < PAGESIZE) - return 0; // cannot extend buckets + // Retrieve attributes + auto bit_i = cast(size_t)(p - pool.baseAddr) / 16; + uint attrs = getAttr(pool, bit_i); - auto psz = blk_size / PAGESIZE; - auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE; - auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE; + void* blk_base_addr = findBase(p); + size_t blk_size = findSize(p); + bool has_pm = has_pointermap(attrs); + size_t* pm_bitmask = null; + size_t pm_bitmask_size = 0; + if (has_pm) { + pm_bitmask_size = size_t.sizeof; + // Retrieve pointer map bit mask + auto end_of_blk = cast(size_t**)(blk_base_addr + + blk_size - size_t.sizeof); + pm_bitmask = *end_of_blk; - auto pagenum = (p - pool.baseAddr) / PAGESIZE; + minsize += size_t.sizeof; + maxsize += size_t.sizeof; + } - size_t sz; - for (sz = 0; sz < maxsz; sz++) - { - auto i = pagenum + psz + sz; - if (i == pool.npages) - break; - if (pool.pagetable[i] != B_FREE) - { - if (sz < minsz) - return 0; - break; - } - } - if (sz < minsz) - return 0; + if (blk_size < PAGESIZE) + return 0; // cannot extend buckets - size_t new_size = (psz + sz) * PAGESIZE; + auto psz = blk_size / PAGESIZE; + auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE; + auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE; - if (opts.options.mem_stomp) - memset(p + blk_size - pm_bitmask_size, 0xF0, - new_size - blk_size - pm_bitmask_size); - memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz); - this.p_cache = null; - this.size_cache = 0; + auto pagenum = (p - pool.baseAddr) / PAGESIZE; - if (has_pm) { - new_size -= size_t.sizeof; - auto end_of_blk = cast(size_t**)(blk_base_addr + new_size); - *end_of_blk = pm_bitmask; + size_t sz; + for (sz = 0; sz < maxsz; sz++) + { + auto i = pagenum + psz + sz; + if (i == pool.npages) + break; + if (pool.pagetable[i] != B_FREE) + { + if (sz < minsz) + return 0; + break; } - return new_size; } + if (sz < minsz) + return 0; + size_t new_size = (psz + sz) * PAGESIZE; - /** - * - */ - size_t reserve(size_t size) - { - if (!size) - { - return 0; - } + if (opts.options.mem_stomp) + memset(p + blk_size - pm_bitmask_size, 0xF0, + new_size - blk_size - pm_bitmask_size); + memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz); + gc.p_cache = null; + gc.size_cache = 0; - if (!thread_needLock()) - { - return reserveNoSync(size); - } - else synchronized (lock) - { - return reserveNoSync(size); - } + if (has_pm) { + new_size -= size_t.sizeof; + auto end_of_blk = cast(size_t**)(blk_base_addr + new_size); + *end_of_blk = pm_bitmask; } + return new_size; +} - /** - * - */ - void free(void *p) +/** + * + */ +size_t reserve(size_t size) +{ + if (!size) { - if (!p) - { - return; - } + return 0; + } - if (!thread_needLock()) - { - return freeNoSync(p); - } - else synchronized (lock) - { - return freeNoSync(p); - } + if (!thread_needLock()) + { + return reserveNoSync(size); } + else synchronized (gc.lock) + { + return reserveNoSync(size); + } +} - // - // - // - private void freeNoSync(void *p) +/** + * + */ +void free(void *p) +{ + if (!p) { - assert (p); + return; + } - Pool* pool; - size_t pagenum; - Bins bin; - size_t bit_i; + if (!thread_needLock()) + { + return freeNoSync(p); + } + else synchronized (gc.lock) + { + return freeNoSync(p); + } +} - // Find which page it is in - pool = this.findPool(p); - if (!pool) // if not one of ours - return; // ignore - if (opts.options.sentinel) { - sentinel_Invariant(p); - p = sentinel_sub(p); - } - pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE; - bit_i = cast(size_t)(p - pool.baseAddr) / 16; - this.clrAttr(pool, bit_i, BlkAttr.ALL_BITS); - bin = cast(Bins)pool.pagetable[pagenum]; - if (bin == B_PAGE) // if large alloc - { - // Free pages - size_t npages = 1; - size_t n = pagenum; - while (++n < pool.npages && pool.pagetable[n] == B_PAGEPLUS) - npages++; - if (opts.options.mem_stomp) - memset(p, 0xF2, npages * PAGESIZE); - pool.freePages(pagenum, npages); - } - else - { - // Add to free list - List *list = cast(List*)p; +// +// +// +private void freeNoSync(void *p) +{ + assert (p); + + Pool* pool; + size_t pagenum; + Bins bin; + size_t bit_i; + + // Find which page it is in + pool = findPool(p); + if (!pool) // if not one of ours + return; // ignore + if (opts.options.sentinel) { + sentinel_Invariant(p); + p = sentinel_sub(p); + } + pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE; + bit_i = cast(size_t)(p - pool.baseAddr) / 16; + clrAttr(pool, bit_i, BlkAttr.ALL_BITS); + + bin = cast(Bins)pool.pagetable[pagenum]; + if (bin == B_PAGE) // if large alloc + { + // Free pages + size_t npages = 1; + size_t n = pagenum; + while (++n < pool.npages && pool.pagetable[n] == B_PAGEPLUS) + npages++; + if (opts.options.mem_stomp) + memset(p, 0xF2, npages * PAGESIZE); + pool.freePages(pagenum, npages); + } + else + { + // Add to free list + List *list = cast(List*)p; - if (opts.options.mem_stomp) - memset(p, 0xF2, binsize[bin]); + if (opts.options.mem_stomp) + memset(p, 0xF2, binsize[bin]); - list.next = this.bucket[bin]; - this.bucket[bin] = list; - } + list.next = gc.free_list[bin]; + gc.free_list[bin] = list; } +} - /** - * Determine the base address of the block containing p. If p is not a gc - * allocated pointer, return null. - */ - void* addrOf(void *p) +/** + * Determine the base address of the block containing p. If p is not a gc + * allocated pointer, return null. + */ +void* addrOf(void *p) +{ + if (!p) { - if (!p) - { - return null; - } - - if (!thread_needLock()) - { - return addrOfNoSync(p); - } - else synchronized (lock) - { - return addrOfNoSync(p); - } + return null; } - - // - // - // - void* addrOfNoSync(void *p) + if (!thread_needLock()) { - if (!p) - { - return null; - } - - return this.findBase(p); + return addrOfNoSync(p); + } + else synchronized (gc.lock) + { + return addrOfNoSync(p); } +} - /** - * Determine the allocated size of pointer p. If p is an interior pointer - * or not a gc allocated pointer, return 0. - */ - size_t sizeOf(void *p) +// +// +// +void* addrOfNoSync(void *p) +{ + if (!p) { - if (!p) - { - return 0; - } - - if (!thread_needLock()) - { - return sizeOfNoSync(p); - } - else synchronized (lock) - { - return sizeOfNoSync(p); - } + return null; } + return findBase(p); +} - // - // - // - private size_t sizeOfNoSync(void *p) - { - assert (p); - if (opts.options.sentinel) - p = sentinel_sub(p); +/** + * Determine the allocated size of pointer p. If p is an interior pointer + * or not a gc allocated pointer, return 0. + */ +size_t sizeOf(void *p) +{ + if (!p) + { + return 0; + } - Pool* pool = this.findPool(p); - if (pool is null) - return 0; + if (!thread_needLock()) + { + return sizeOfNoSync(p); + } + else synchronized (gc.lock) + { + return sizeOfNoSync(p); + } +} - auto biti = cast(size_t)(p - pool.baseAddr) / 16; - uint attrs = this.getAttr(pool, biti); - size_t size = this.findSize(p); - size_t pm_bitmask_size = 0; - if (has_pointermap(attrs)) - pm_bitmask_size = size_t.sizeof; +// +// +// +private size_t sizeOfNoSync(void *p) +{ + assert (p); - if (opts.options.sentinel) { - // Check for interior pointer - // This depends on: - // 1) size is a power of 2 for less than PAGESIZE values - // 2) base of memory pool is aligned on PAGESIZE boundary - if (cast(size_t)p & (size - 1) & (PAGESIZE - 1)) - return 0; - return size - SENTINEL_EXTRA - pm_bitmask_size; - } - else { - if (p == this.p_cache) - return this.size_cache; - - // Check for interior pointer - // This depends on: - // 1) size is a power of 2 for less than PAGESIZE values - // 2) base of memory pool is aligned on PAGESIZE boundary - if (cast(size_t)p & (size - 1) & (PAGESIZE - 1)) - return 0; + if (opts.options.sentinel) + p = sentinel_sub(p); - this.p_cache = p; - this.size_cache = size - pm_bitmask_size; + Pool* pool = findPool(p); + if (pool is null) + return 0; - return this.size_cache; - } + auto biti = cast(size_t)(p - pool.baseAddr) / 16; + uint attrs = getAttr(pool, biti); + + size_t size = findSize(p); + size_t pm_bitmask_size = 0; + if (has_pointermap(attrs)) + pm_bitmask_size = size_t.sizeof; + + if (opts.options.sentinel) { + // Check for interior pointer + // This depends on: + // 1) size is a power of 2 for less than PAGESIZE values + // 2) base of memory pool is aligned on PAGESIZE boundary + if (cast(size_t)p & (size - 1) & (PAGESIZE - 1)) + return 0; + return size - SENTINEL_EXTRA - pm_bitmask_size; } + else { + if (p == gc.p_cache) + return gc.size_cache; + // Check for interior pointer + // This depends on: + // 1) size is a power of 2 for less than PAGESIZE values + // 2) base of memory pool is aligned on PAGESIZE boundary + if (cast(size_t)p & (size - 1) & (PAGESIZE - 1)) + return 0; - /** - * Determine the base address of the block containing p. If p is not a gc - * allocated pointer, return null. - */ - BlkInfo query(void *p) - { - if (!p) - { - BlkInfo i; - return i; - } + gc.p_cache = p; + gc.size_cache = size - pm_bitmask_size; - if (!thread_needLock()) - { - return queryNoSync(p); - } - else synchronized (lock) - { - return queryNoSync(p); - } + return gc.size_cache; } +} - // - // - // - BlkInfo queryNoSync(void *p) +/** + * Determine the base address of the block containing p. If p is not a gc + * allocated pointer, return null. + */ +BlkInfo query(void *p) +{ + if (!p) { - assert(p); + BlkInfo i; + return i; + } - return this.getInfo(p); + if (!thread_needLock()) + { + return queryNoSync(p); + } + else synchronized (gc.lock) + { + return queryNoSync(p); } +} - /** - * Verify that pointer p: - * 1) belongs to this memory pool - * 2) points to the start of an allocated piece of memory - * 3) is not on a free list - */ - void check(void *p) +// +// +// +BlkInfo queryNoSync(void *p) +{ + assert(p); + + return getInfo(p); +} + + +/** + * Verify that pointer p: + * 1) belongs to this memory pool + * 2) points to the start of an allocated piece of memory + * 3) is not on a free list + */ +void check(void *p) +{ + if (!p) { - if (!p) - { - return; - } + return; + } - if (!thread_needLock()) - { - checkNoSync(p); - } - else synchronized (lock) - { - checkNoSync(p); - } + if (!thread_needLock()) + { + checkNoSync(p); } + else synchronized (gc.lock) + { + checkNoSync(p); + } +} + +// +// +// +private void checkNoSync(void *p) +{ + assert(p); - // - // - // - private void checkNoSync(void *p) + if (opts.options.sentinel) + sentinel_Invariant(p); + debug (PTRCHECK) { - assert(p); + Pool* pool; + size_t pagenum; + Bins bin; + size_t size; if (opts.options.sentinel) - sentinel_Invariant(p); - debug (PTRCHECK) - { - Pool* pool; - size_t pagenum; - Bins bin; - size_t size; - - if (opts.options.sentinel) - p = sentinel_sub(p); - pool = this.findPool(p); - assert(pool); - pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE; - bin = cast(Bins)pool.pagetable[pagenum]; - assert(bin <= B_PAGE); - size = binsize[bin]; - assert((cast(size_t)p & (size - 1)) == 0); + p = sentinel_sub(p); + pool = findPool(p); + assert(pool); + pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE; + bin = cast(Bins)pool.pagetable[pagenum]; + assert(bin <= B_PAGE); + size = binsize[bin]; + assert((cast(size_t)p & (size - 1)) == 0); - debug (PTRCHECK2) + debug (PTRCHECK2) + { + if (bin < B_PAGE) { - if (bin < B_PAGE) - { - // Check that p is not on a free list - List *list; + // Check that p is not on a free list + List *list; - for (list = this.bucket[bin]; list; list = list.next) - { - assert(cast(void*)list != p); - } + for (list = gc.free_list[bin]; list; list = list.next) + { + assert(cast(void*)list != p); } } } } +} - // - // - // - private void setStackBottom(void *p) +// +// +// +private void setStackBottom(void *p) +{ + version (STACKGROWSDOWN) { - version (STACKGROWSDOWN) + //p = (void *)((uint *)p + 4); + if (p > gc.stack_bottom) { - //p = (void *)((uint *)p + 4); - if (p > this.stackBottom) - { - this.stackBottom = p; - } + gc.stack_bottom = p; } - else + } + else + { + //p = (void *)((uint *)p - 4); + if (p < gc.stack_bottom) { - //p = (void *)((uint *)p - 4); - if (p < this.stackBottom) - { - this.stackBottom = cast(char*)p; - } + gc.stack_bottom = cast(char*)p; } } +} - /** - * add p to list of roots - */ - void addRoot(void *p) +/** + * add p to list of roots + */ +void addRoot(void *p) +{ + if (!p) { - if (!p) - { - return; - } + return; + } - if (!thread_needLock()) - { - if (roots.append(p) is null) - onOutOfMemoryError(); - } - else synchronized (lock) - { - if (roots.append(p) is null) - onOutOfMemoryError(); - } + if (!thread_needLock()) + { + if (gc.roots.append(p) is null) + onOutOfMemoryError(); + } + else synchronized (gc.lock) + { + if (gc.roots.append(p) is null) + onOutOfMemoryError(); } +} - /** - * remove p from list of roots - */ - void removeRoot(void *p) +/** + * remove p from list of roots + */ +void removeRoot(void *p) +{ + if (!p) { - if (!p) - { - return; - } + return; + } - bool r; - if (!thread_needLock()) - { - r = roots.remove(p); - } - else synchronized (lock) - { - r = roots.remove(p); - } - assert (r); + bool r; + if (!thread_needLock()) + { + r = gc.roots.remove(p); + } + else synchronized (gc.lock) + { + r = gc.roots.remove(p); } + assert (r); +} - /** - * add range to scan for roots - */ - void addRange(void *p, size_t sz) +/** + * add range to scan for roots + */ +void addRange(void *p, size_t sz) +{ + if (!p || !sz) { - if (!p || !sz) - { - return; - } + return; + } - if (!thread_needLock()) - { - if (ranges.append(Range(p, p+sz)) is null) - onOutOfMemoryError(); - } - else synchronized (lock) - { - if (ranges.append(Range(p, p+sz)) is null) - onOutOfMemoryError(); - } + if (!thread_needLock()) + { + if (gc.ranges.append(Range(p, p+sz)) is null) + onOutOfMemoryError(); + } + else synchronized (gc.lock) + { + if (gc.ranges.append(Range(p, p+sz)) is null) + onOutOfMemoryError(); } +} - /** - * remove range - */ - void removeRange(void *p) +/** + * remove range + */ +void removeRange(void *p) +{ + if (!p) { - if (!p) - { - return; - } - - bool r; - if (!thread_needLock()) - { - r = ranges.remove(Range(p, null)); - } - else synchronized (lock) - { - r = ranges.remove(Range(p, null)); - } - assert (r); + return; } - - /** - * do full garbage collection - */ - void fullCollect() + bool r; + if (!thread_needLock()) { + r = gc.ranges.remove(Range(p, null)); + } + else synchronized (gc.lock) + { + r = gc.ranges.remove(Range(p, null)); + } + assert (r); +} - if (!thread_needLock()) - { - this.fullcollectshell(); - } - else synchronized (lock) - { - this.fullcollectshell(); - } - version (none) - { - GCStats stats; - getStats(stats); - } +/** + * do full garbage collection + */ +void fullCollect() +{ + if (!thread_needLock()) + { + fullcollectshell(); + } + else synchronized (gc.lock) + { + fullcollectshell(); } - - /** - * do full garbage collection ignoring roots - */ - void fullCollectNoStack() + version (none) { - if (!thread_needLock()) - { - this.noStack++; - this.fullcollectshell(); - this.noStack--; - } - else synchronized (lock) - { - this.noStack++; - this.fullcollectshell(); - this.noStack--; - } + GCStats stats; + getStats(stats); } +} + - /** - * minimize free space usage - */ - void minimize() +/** + * do full garbage collection ignoring roots + */ +void fullCollectNoStack() +{ + if (!thread_needLock()) { - if (!thread_needLock()) - { - this.minimizeNoSync(); - } - else synchronized (lock) - { - this.minimizeNoSync(); - } + gc.no_stack++; + fullcollectshell(); + gc.no_stack--; + } + else synchronized (gc.lock) + { + gc.no_stack++; + fullcollectshell(); + gc.no_stack--; } +} - /** - * Retrieve statistics about garbage collection. - * Useful for debugging and tuning. - */ - void getStats(out GCStats stats) +/** + * minimize free space usage + */ +void minimize() +{ + if (!thread_needLock()) { - if (!thread_needLock()) - { - getStatsNoSync(stats); - } - else synchronized (lock) - { - getStatsNoSync(stats); - } + minimizeNoSync(); + } + else synchronized (gc.lock) + { + minimizeNoSync(); } +} - // - // - // - private void getStatsNoSync(out GCStats stats) +/** + * Retrieve statistics about garbage collection. + * Useful for debugging and tuning. + */ +void getStats(out GCStats stats) +{ + if (!thread_needLock()) { - size_t psize = 0; - size_t usize = 0; - size_t flsize = 0; + getStatsNoSync(stats); + } + else synchronized (gc.lock) + { + getStatsNoSync(stats); + } +} - size_t n; - size_t bsize = 0; - memset(&stats, 0, GCStats.sizeof); +// +// +// +private void getStatsNoSync(out GCStats stats) +{ + size_t psize = 0; + size_t usize = 0; + size_t flsize = 0; - for (n = 0; n < pools.length; n++) - { - Pool* pool = pools[n]; - psize += pool.npages * PAGESIZE; - for (size_t j = 0; j < pool.npages; j++) - { - Bins bin = cast(Bins)pool.pagetable[j]; - if (bin == B_FREE) - stats.freeblocks++; - else if (bin == B_PAGE) - stats.pageblocks++; - else if (bin < B_PAGE) - bsize += PAGESIZE; - } - } + size_t n; + size_t bsize = 0; + + memset(&stats, 0, GCStats.sizeof); - for (n = 0; n < B_PAGE; n++) + for (n = 0; n < gc.pools.length; n++) + { + Pool* pool = gc.pools[n]; + psize += pool.npages * PAGESIZE; + for (size_t j = 0; j < pool.npages; j++) { - for (List *list = this.bucket[n]; list; list = list.next) - flsize += binsize[n]; + Bins bin = cast(Bins)pool.pagetable[j]; + if (bin == B_FREE) + stats.freeblocks++; + else if (bin == B_PAGE) + stats.pageblocks++; + else if (bin < B_PAGE) + bsize += PAGESIZE; } - - usize = bsize - flsize; - - stats.poolsize = psize; - stats.usedsize = bsize - flsize; - stats.freelistsize = flsize; } - /******************* weak-reference support *********************/ - - // call locked if necessary - private T locked(T)(in T delegate() code) + for (n = 0; n < B_PAGE; n++) { - if (thread_needLock) - synchronized(lock) return code(); - else - return code(); + for (List *list = gc.free_list[n]; list; list = list.next) + flsize += binsize[n]; } - private struct WeakPointer - { - Object reference; + usize = bsize - flsize; - void ondestroy(Object r) - { - assert(r is reference); - // lock for memory consistency (parallel readers) - // also ensures that weakpointerDestroy can be called while another - // thread is freeing the reference with "delete" - locked!(void)({ reference = null; }); - } + stats.poolsize = psize; + stats.usedsize = bsize - flsize; + stats.freelistsize = flsize; +} + +/******************* weak-reference support *********************/ + +// call locked if necessary +private T locked(T)(in T delegate() code) +{ + if (thread_needLock) + synchronized(gc.lock) return code(); + else + return code(); +} + +private struct WeakPointer +{ + Object reference; + + void ondestroy(Object r) + { + assert(r is reference); + // lock for memory consistency (parallel readers) + // also ensures that weakpointerDestroy can be called while another + // thread is freeing the reference with "delete" + locked!(void)({ reference = null; }); } +} - /** - * Create a weak pointer to the given object. - * Returns a pointer to an opaque struct allocated in C memory. - */ - void* weakpointerCreate( Object r ) - { - if (r) - { - // must be allocated in C memory - // 1. to hide the reference from the GC - // 2. the GC doesn't scan delegates added by rt_attachDisposeEvent - // for references - auto wp = cast(WeakPointer*)(cstdlib.malloc(WeakPointer.sizeof)); - if (!wp) - onOutOfMemoryError(); - wp.reference = r; - rt_attachDisposeEvent(r, &wp.ondestroy); - return wp; - } - return null; +/** + * Create a weak pointer to the given object. + * Returns a pointer to an opaque struct allocated in C memory. + */ +void* weakpointerCreate( Object r ) +{ + if (r) + { + // must be allocated in C memory + // 1. to hide the reference from the GC + // 2. the GC doesn't scan delegates added by rt_attachDisposeEvent + // for references + auto wp = cast(WeakPointer*)(cstdlib.malloc(WeakPointer.sizeof)); + if (!wp) + onOutOfMemoryError(); + wp.reference = r; + rt_attachDisposeEvent(r, &wp.ondestroy); + return wp; } + return null; +} - /** - * Destroy a weak pointer returned by weakpointerCreate(). - * If null is passed, nothing happens. - */ - void weakpointerDestroy( void* p ) +/** + * Destroy a weak pointer returned by weakpointerCreate(). + * If null is passed, nothing happens. + */ +void weakpointerDestroy( void* p ) +{ + if (p) { - if (p) - { - auto wp = cast(WeakPointer*)p; - // must be extra careful about the GC or parallel threads - // finalizing the reference at the same time - locked!(void)({ - if (wp.reference) - rt_detachDisposeEvent(wp.reference, &wp.ondestroy); - }); - cstdlib.free(wp); - } + auto wp = cast(WeakPointer*)p; + // must be extra careful about the GC or parallel threads + // finalizing the reference at the same time + locked!(void)({ + if (wp.reference) + rt_detachDisposeEvent(wp.reference, &wp.ondestroy); + }); + cstdlib.free(wp); } +} - /** - * Query a weak pointer and return either the object passed to - * weakpointerCreate, or null if it was free'd in the meantime. - * If null is passed, null is returned. - */ - Object weakpointerGet( void* p ) +/** + * Query a weak pointer and return either the object passed to + * weakpointerCreate, or null if it was free'd in the meantime. + * If null is passed, null is returned. + */ +Object weakpointerGet( void* p ) +{ + if (p) { - if (p) - { - // NOTE: could avoid the lock by using Fawzi style GC counters but - // that'd require core.sync.Atomic and lots of care about memory - // consistency it's an optional optimization see - // http://dsource.org/projects/tango/browser/trunk/user/tango/core/Lifetime.d?rev=5100#L158 - return locked!(Object)({ - return (cast(WeakPointer*)p).reference; - }); - } - } + // NOTE: could avoid the lock by using Fawzi style GC counters but + // that'd require core.sync.Atomic and lots of care about memory + // consistency it's an optional optimization see + // http://dsource.org/projects/tango/browser/trunk/user/tango/core/Lifetime.d?rev=5100#L158 + return locked!(Object)({ + return (cast(WeakPointer*)p).reference; + }); + } } @@ -2538,7 +2542,10 @@ struct Pool } - void Invariant() { } + bool Invariant() + { + return true; + } invariant @@ -2655,7 +2662,6 @@ void *sentinel_sub(void *p) private int _termCleanupLevel=1; -private GC* _gc; /// sets the cleanup level done by gc /// (0: none, 1: fullCollect, 2: fullCollectNoStack (might crash daemonThreads)) @@ -2679,8 +2685,10 @@ extern (C) void thread_init(); extern (C) void gc_init() { - _gc = cast(GC*) cstdlib.calloc(1, GC.sizeof); - _gc.initialize(); + scope (exit) assert (Invariant()); + gc = cast(GC*) cstdlib.calloc(1, GC.sizeof); + *gc = GC.init; + initialize(); version (DigitalMars) version(OSX) { _d_osx_image_init(); } @@ -2691,6 +2699,7 @@ extern (C) void gc_init() extern (C) void gc_term() { + assert (Invariant()); if (_termCleanupLevel<1) { // no cleanup } else if (_termCleanupLevel==2){ @@ -2705,140 +2714,164 @@ extern (C) void gc_term() // I'm disabling cleanup for now until I can think about it some // more. // - _gc.fullCollectNoStack(); // not really a 'collect all' -- still scans + fullCollectNoStack(); // not really a 'collect all' -- still scans // static data area, roots, and ranges. } else { // default (safe) clenup - _gc.fullCollect(); + fullCollect(); } } extern (C) void gc_enable() { - _gc.enable(); + assert (Invariant()); scope (exit) assert (Invariant()); + enable(); } extern (C) void gc_disable() { - _gc.disable(); + assert (Invariant()); scope (exit) assert (Invariant()); + disable(); } extern (C) void gc_collect() { - _gc.fullCollect(); + assert (Invariant()); scope (exit) assert (Invariant()); + fullCollect(); } extern (C) void gc_minimize() { - _gc.minimize(); + assert (Invariant()); scope (exit) assert (Invariant()); + minimize(); } extern (C) uint gc_getAttr( void* p ) { - return _gc.getAttr( p ); + assert (Invariant()); scope (exit) assert (Invariant()); + return getAttr(p); } extern (C) uint gc_setAttr( void* p, uint a ) { - return _gc.setAttr( p, a ); + assert (Invariant()); scope (exit) assert (Invariant()); + return setAttr(p, a); } extern (C) uint gc_clrAttr( void* p, uint a ) { - return _gc.clrAttr( p, a ); + assert (Invariant()); scope (exit) assert (Invariant()); + return clrAttr(p, a); } extern (C) void* gc_malloc(size_t sz, uint attrs = 0, PointerMap ptrmap = PointerMap.init) { - return _gc.malloc(sz, attrs, ptrmap); + assert (Invariant()); scope (exit) assert (Invariant()); + return malloc(sz, attrs, ptrmap); } extern (C) void* gc_calloc(size_t sz, uint attrs = 0, PointerMap ptrmap = PointerMap.init) { - return _gc.calloc(sz, attrs, ptrmap); + assert (Invariant()); scope (exit) assert (Invariant()); + return calloc(sz, attrs, ptrmap); } extern (C) void* gc_realloc(void* p, size_t sz, uint attrs = 0, PointerMap ptrmap = PointerMap.init) { - return _gc.realloc(p, sz, attrs, ptrmap); + assert (Invariant()); scope (exit) assert (Invariant()); + return realloc(p, sz, attrs, ptrmap); } extern (C) size_t gc_extend( void* p, size_t mx, size_t sz ) { - return _gc.extend( p, mx, sz ); + assert (Invariant()); scope (exit) assert (Invariant()); + return extend(p, mx, sz); } extern (C) size_t gc_reserve( size_t sz ) { - return _gc.reserve( sz ); + assert (Invariant()); scope (exit) assert (Invariant()); + return reserve(sz); } extern (C) void gc_free( void* p ) { - _gc.free( p ); + assert (Invariant()); scope (exit) assert (Invariant()); + free(p); } extern (C) void* gc_addrOf( void* p ) { - return _gc.addrOf( p ); + assert (Invariant()); scope (exit) assert (Invariant()); + return addrOf(p); } extern (C) size_t gc_sizeOf( void* p ) { - return _gc.sizeOf( p ); + assert (Invariant()); scope (exit) assert (Invariant()); + return sizeOf(p); } extern (C) BlkInfo gc_query( void* p ) { - return _gc.query( p ); + assert (Invariant()); scope (exit) assert (Invariant()); + return query(p); } // NOTE: This routine is experimental. The stats or function name may change // before it is made officially available. extern (C) GCStats gc_stats() { + assert (Invariant()); scope (exit) assert (Invariant()); GCStats stats = void; - _gc.getStats( stats ); + getStats(stats); return stats; } extern (C) void gc_addRoot( void* p ) { - _gc.addRoot( p ); + assert (Invariant()); scope (exit) assert (Invariant()); + addRoot(p); } extern (C) void gc_addRange( void* p, size_t sz ) { - _gc.addRange( p, sz ); + assert (Invariant()); scope (exit) assert (Invariant()); + addRange(p, sz); } extern (C) void gc_removeRoot( void *p ) { - _gc.removeRoot( p ); + assert (Invariant()); scope (exit) assert (Invariant()); + removeRoot(p); } extern (C) void gc_removeRange( void *p ) { - _gc.removeRange( p ); + assert (Invariant()); scope (exit) assert (Invariant()); + removeRange(p); } extern (C) void* gc_weakpointerCreate( Object r ) { - return _gc.weakpointerCreate(r); + assert (Invariant()); scope (exit) assert (Invariant()); + return weakpointerCreate(r); } extern (C) void gc_weakpointerDestroy( void* wp ) { - _gc.weakpointerDestroy(wp); + assert (Invariant()); scope (exit) assert (Invariant()); + weakpointerDestroy(wp); } extern (C) Object gc_weakpointerGet( void* wp ) { - return _gc.weakpointerGet(wp); + assert (Invariant()); scope (exit) assert (Invariant()); + return weakpointerGet(wp); } -- 2.43.0