/************** Debugging ***************************/
//debug = COLLECT_PRINTF; // turn on printf's
-//debug = LOGGING; // log allocations / frees
-//debug = MEMSTOMP; // stomp on memory
-//debug = SENTINEL; // add underrun/overrrun protection
//debug = PTRCHECK; // more pointer checking
//debug = PTRCHECK2; // thorough but slow pointer checking
/***************************************************/
import rt.gc.cdgc.bits: GCBits;
-import rt.gc.cdgc.stats: GCStats;
+import rt.gc.cdgc.stats: GCStats, Stats;
+import rt.gc.cdgc.dynarray: DynArray;
import alloc = rt.gc.cdgc.alloc;
+import opts = rt.gc.cdgc.opts;
import cstdlib = tango.stdc.stdlib;
import cstring = tango.stdc.string;
+/*
+ * This is a small optimization that proved it's usefulness. For small chunks
+ * or memory memset() seems to be slower (probably because of the call) that
+ * simply doing a simple loop to set the memory.
+ */
+void memset(void* dst, int c, size_t n)
+{
+ // This number (32) has been determined empirically
+ if (n > 32) {
+ cstring.memset(dst, c, n);
+ return;
+ }
+ auto p = cast(ubyte*)(dst);
+ while (n-- > 0)
+ *p++ = c;
+}
version (GNU)
{
static import gcc.builtins; // for __builtin_unwind_int
}
-
struct BlkInfo
{
void* base;
uint attr;
}
+package enum BlkAttr : uint
+{
+ FINALIZE = 0b0000_0001,
+ NO_SCAN = 0b0000_0010,
+ NO_MOVE = 0b0000_0100,
+ ALL_BITS = 0b1111_1111
+}
+
private
{
- enum BlkAttr : uint
- {
- FINALIZE = 0b0000_0001,
- NO_SCAN = 0b0000_0010,
- NO_MOVE = 0b0000_0100,
- ALL_BITS = 0b1111_1111
- }
extern (C) void* rt_stackBottom();
extern (C) void* rt_stackTop();
alias GC gc_t;
-/* ======================= Leak Detector =========================== */
-
-
-debug (LOGGING)
-{
- struct Log
- {
- void* p;
- size_t size;
- size_t line;
- char* file;
- void* parent;
-
- void print()
- {
- printf(" p = %x, size = %d, parent = %x ", p, size, parent);
- if (file)
- {
- printf("%s(%u)", file, line);
- }
- printf("\n");
- }
- }
-
-
- struct LogArray
- {
- size_t dim;
- size_t allocdim;
- Log *data;
-
- void Dtor()
- {
- if (data)
- cstdlib.free(data);
- data = null;
- }
-
- void reserve(size_t nentries)
- {
- assert(dim <= allocdim);
- if (allocdim - dim < nentries)
- {
- allocdim = (dim + nentries) * 2;
- assert(dim + nentries <= allocdim);
- if (!data)
- {
- data = cast(Log*) cstdlib.malloc(allocdim * Log.sizeof);
- if (!data && allocdim)
- onOutOfMemoryError();
- }
- else
- {
- Log *newdata = cast(Log*) cstdlib.malloc(
- allocdim * Log.sizeof);
- if (!newdata && allocdim)
- onOutOfMemoryError();
- cstring.memcpy(newdata, data, dim * Log.sizeof);
- cstdlib.free(data);
- data = newdata;
- }
- }
- }
-
-
- void push(Log log)
- {
- reserve(1);
- data[dim++] = log;
- }
-
- void remove(size_t i)
- {
- cstring.memmove(data + i, data + i + 1, (dim - i) * Log.sizeof);
- dim--;
- }
-
-
- size_t find(void *p)
- {
- for (size_t i = 0; i < dim; i++)
- {
- if (data[i].p == p)
- return i;
- }
- return OPFAIL; // not found
- }
-
-
- void copy(LogArray *from)
- {
- reserve(from.dim - dim);
- assert(from.dim <= allocdim);
- cstring.memcpy(data, from.data, from.dim * Log.sizeof);
- dim = from.dim;
- }
- }
-}
-
-
/* ============================ GC =============================== */
const uint GCVERSION = 1; // increment every time we change interface
// to GC.
+Stats stats;
+
class GC
{
// For passing to debug code
void initialize()
{
+ opts.parse(cstdlib.getenv("D_GC_OPTS"));
gcLock = GCLock.classinfo;
gcx = cast(Gcx*) cstdlib.calloc(1, Gcx.sizeof);
if (!gcx)
onOutOfMemoryError();
gcx.initialize();
setStackBottom(rt_stackBottom());
- }
-
-
- void Dtor()
- {
- if (gcx)
- {
- gcx.Dtor();
- cstdlib.free(gcx);
- gcx = null;
- }
+ stats = Stats(this);
}
uint go()
{
Pool* pool = gcx.findPool(p);
- uint oldb = 0;
+ uint old_attrs = 0;
if (pool)
{
- auto biti = cast(size_t)(p - pool.baseAddr) / 16;
+ auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
- oldb = gcx.getBits(pool, biti);
+ old_attrs = gcx.getAttr(pool, bit_i);
}
- return oldb;
+ return old_attrs;
}
if (!thread_needLock())
uint go()
{
Pool* pool = gcx.findPool(p);
- uint oldb = 0;
+ uint old_attrs = 0;
if (pool)
{
- auto biti = cast(size_t)(p - pool.baseAddr) / 16;
+ auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
- oldb = gcx.getBits(pool, biti);
- gcx.setBits(pool, biti, mask);
+ old_attrs = gcx.getAttr(pool, bit_i);
+ gcx.setAttr(pool, bit_i, mask);
}
- return oldb;
+ return old_attrs;
}
if (!thread_needLock())
uint go()
{
Pool* pool = gcx.findPool(p);
- uint oldb = 0;
+ uint old_attrs = 0;
if (pool)
{
- auto biti = cast(size_t)(p - pool.baseAddr) / 16;
+ auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
- oldb = gcx.getBits(pool, biti);
- gcx.clrBits(pool, biti, mask);
+ old_attrs = gcx.getAttr(pool, bit_i);
+ gcx.clrAttr(pool, bit_i, mask);
}
- return oldb;
+ return old_attrs;
}
if (!thread_needLock())
/**
*
*/
- void *malloc(size_t size, uint bits = 0)
+ void *malloc(size_t size, uint attrs, PointerMap ptrmap)
{
if (!size)
{
if (!thread_needLock())
{
- return mallocNoSync(size, bits);
+ return mallocNoSync(size, attrs, ptrmap.bits.ptr);
}
else synchronized (gcLock)
{
- return mallocNoSync(size, bits);
+ return mallocNoSync(size, attrs, ptrmap.bits.ptr);
}
}
//
//
//
- private void *mallocNoSync(size_t size, uint bits = 0)
+ private void *mallocNoSync(size_t size, uint attrs, size_t* pm_bitmask)
{
assert(size != 0);
+ stats.malloc_started(size, attrs, pm_bitmask);
+ scope (exit)
+ stats.malloc_finished(p);
+
void *p = null;
Bins bin;
assert(gcx);
- size += SENTINEL_EXTRA;
+ if (opts.options.sentinel)
+ size += SENTINEL_EXTRA;
+
+ bool has_pm = !(attrs & BlkAttr.NO_SCAN);
+ size_t pm_bitmask_size;
+ if (has_pm)
+ pm_bitmask_size = (size_t*).sizeof;
+ size += pm_bitmask_size;
// Compute size bin
// Cache previous binsize lookup - Dave Fladebo.
lastbin = bin;
}
+ size_t capacity; // to figure out where to store the bitmask
if (bin < B_PAGE)
{
p = gcx.bucket[bin];
}
p = gcx.bucket[bin];
}
+ capacity = binsize[bin];
// Return next item from free list
gcx.bucket[bin] = (cast(List*)p).next;
- if( !(bits & BlkAttr.NO_SCAN) )
- cstring.memset(p + size, 0, binsize[bin] - size);
- debug (MEMSTOMP) cstring.memset(p, 0xF0, size);
+ if( !(attrs & BlkAttr.NO_SCAN) )
+ memset(p + size, 0, capacity - size);
+ if (opts.options.mem_stomp)
+ memset(p, 0xF0, size);
}
else
{
p = gcx.bigAlloc(size);
if (!p)
onOutOfMemoryError();
+ // Round the size up to the number of pages needed to store it
+ size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
+ capacity = npages * PAGESIZE;
}
- size -= SENTINEL_EXTRA;
- p = sentinel_add(p);
- sentinel_init(p, size);
- gcx.log_malloc(p, size);
- if (bits)
+ // Store the bit mask AFTER SENTINEL_POST
+ // TODO: store it BEFORE, so the bitmask is protected too
+ if (has_pm) {
+ auto end_of_blk = cast(size_t**)(p + capacity - pm_bitmask_size);
+ *end_of_blk = pm_bitmask;
+ size -= pm_bitmask_size;
+ }
+
+ if (opts.options.sentinel) {
+ size -= SENTINEL_EXTRA;
+ p = sentinel_add(p);
+ sentinel_init(p, size);
+ }
+
+ if (attrs)
{
Pool *pool = gcx.findPool(p);
assert(pool);
- gcx.setBits(pool, cast(size_t)(p - pool.baseAddr) / 16, bits);
+ gcx.setAttr(pool, cast(size_t)(p - pool.baseAddr) / 16, attrs);
}
return p;
}
/**
*
*/
- void *calloc(size_t size, uint bits = 0)
+ void *calloc(size_t size, uint attrs, PointerMap ptrmap)
{
if (!size)
{
if (!thread_needLock())
{
- return callocNoSync(size, bits);
+ return callocNoSync(size, attrs, ptrmap.bits.ptr);
}
else synchronized (gcLock)
{
- return callocNoSync(size, bits);
+ return callocNoSync(size, attrs, ptrmap.bits.ptr);
}
}
//
//
//
- private void *callocNoSync(size_t size, uint bits = 0)
+ private void *callocNoSync(size_t size, uint attrs, size_t* pm_bitmask)
{
assert(size != 0);
- void *p = mallocNoSync(size, bits);
- cstring.memset(p, 0, size);
+ void *p = mallocNoSync(size, attrs, pm_bitmask);
+ memset(p, 0, size);
return p;
}
/**
*
*/
- void *realloc(void *p, size_t size, uint bits = 0)
+ void *realloc(void *p, size_t size, uint attrs, PointerMap ptrmap)
{
if (!thread_needLock())
{
- return reallocNoSync(p, size, bits);
+ return reallocNoSync(p, size, attrs, ptrmap.bits.ptr);
}
else synchronized (gcLock)
{
- return reallocNoSync(p, size, bits);
+ return reallocNoSync(p, size, attrs, ptrmap.bits.ptr);
}
}
//
//
//
- private void *reallocNoSync(void *p, size_t size, uint bits = 0)
+ private void *reallocNoSync(void *p, size_t size, uint attrs,
+ size_t* pm_bitmask)
{
if (!size)
{
}
else if (!p)
{
- p = mallocNoSync(size, bits);
+ p = mallocNoSync(size, attrs, pm_bitmask);
}
else
{
- void *p2;
- size_t psize;
+ Pool* pool = gcx.findPool(p);
+ if (pool is null)
+ return null;
- version (SENTINEL)
+ // Set or retrieve attributes as appropriate
+ auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
+ if (attrs) {
+ gcx.clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
+ gcx.setAttr(pool, bit_i, attrs);
+ }
+ else
+ attrs = gcx.getAttr(pool, bit_i);
+
+ void* blk_base_addr = gcx.findBase(p);
+ size_t blk_size = gcx.findSize(p);
+ bool has_pm = !(attrs & BlkAttr.NO_SCAN);
+ size_t pm_bitmask_size = 0;
+ if (has_pm) {
+ pm_bitmask_size = (size_t*).sizeof;
+ // Retrieve pointer map bit mask if appropriate
+ if (pm_bitmask is null) {
+ auto end_of_blk = cast(size_t**)(blk_base_addr +
+ blk_size - pm_bitmask_size);
+ pm_bitmask = *end_of_blk;
+ }
+ }
+
+ if (opts.options.sentinel)
{
sentinel_Invariant(p);
- psize = *sentinel_size(p);
- if (psize != size)
+ size_t sentinel_stored_size = *sentinel_size(p);
+ if (sentinel_stored_size != size)
{
- if (psize)
- {
- Pool *pool = gcx.findPool(p);
-
- if (pool)
- {
- auto biti = cast(size_t)(p - pool.baseAddr) / 16;
-
- if (bits)
- {
- gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
- gcx.setBits(pool, biti, bits);
- }
- else
- {
- bits = gcx.getBits(pool, biti);
- }
- }
- }
- p2 = mallocNoSync(size, bits);
- if (psize < size)
- size = psize;
+ void* p2 = mallocNoSync(size, attrs, pm_bitmask);
+ if (sentinel_stored_size < size)
+ size = sentinel_stored_size;
cstring.memcpy(p2, p, size);
p = p2;
}
}
else
{
- psize = gcx.findSize(p); // find allocated size
- if (psize >= PAGESIZE && size >= PAGESIZE)
+ size += pm_bitmask_size;
+ if (blk_size >= PAGESIZE && size >= PAGESIZE)
{
- auto psz = psize / PAGESIZE;
+ auto psz = blk_size / PAGESIZE;
auto newsz = (size + PAGESIZE - 1) / PAGESIZE;
if (newsz == psz)
return p;
- auto pool = gcx.findPool(p);
auto pagenum = (p - pool.baseAddr) / PAGESIZE;
if (newsz < psz)
// Shrink in place
synchronized (gcLock)
{
- debug (MEMSTOMP)
- cstring.memset(p + size, 0xF2, psize - size);
+ if (opts.options.mem_stomp)
+ memset(p + size - pm_bitmask_size, 0xF2,
+ blk_size - size - pm_bitmask_size);
pool.freePages(pagenum + newsz, psz - newsz);
}
+ if (has_pm) {
+ auto end_of_blk = cast(size_t**)(
+ blk_base_addr + (PAGESIZE * newsz) -
+ pm_bitmask_size);
+ *end_of_blk = pm_bitmask;
+ }
return p;
}
else if (pagenum + newsz <= pool.npages)
{
if (i == pagenum + newsz)
{
- debug (MEMSTOMP)
- cstring.memset(p + psize, 0xF0,
- size - psize);
- cstring.memset(pool.pagetable + pagenum +
+ if (opts.options.mem_stomp)
+ memset(p + blk_size - pm_bitmask_size,
+ 0xF0, size - blk_size
+ - pm_bitmask_size);
+ memset(pool.pagetable + pagenum +
psz, B_PAGEPLUS, newsz - psz);
+ if (has_pm) {
+ auto end_of_blk = cast(size_t**)(
+ blk_base_addr +
+ (PAGESIZE * newsz) -
+ pm_bitmask_size);
+ *end_of_blk = pm_bitmask;
+ }
return p;
}
if (i == pool.npages)
}
}
}
- if (psize < size || // if new size is bigger
- psize > size * 2) // or less than half
+ // if new size is bigger or less than half
+ if (blk_size < size || blk_size > size * 2)
{
- if (psize)
- {
- Pool *pool = gcx.findPool(p);
-
- if (pool)
- {
- auto biti = cast(size_t)(p - pool.baseAddr) / 16;
-
- if (bits)
- {
- gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
- gcx.setBits(pool, biti, bits);
- }
- else
- {
- bits = gcx.getBits(pool, biti);
- }
- }
- }
- p2 = mallocNoSync(size, bits);
- if (psize < size)
- size = psize;
+ size -= pm_bitmask_size;
+ blk_size -= pm_bitmask_size;
+ void* p2 = mallocNoSync(size, attrs, pm_bitmask);
+ if (blk_size < size)
+ size = blk_size;
cstring.memcpy(p2, p, size);
p = p2;
}
}
body
{
- version (SENTINEL)
- {
+ if (opts.options.sentinel)
return 0;
+
+ Pool* pool = gcx.findPool(p);
+ if (pool is null)
+ return 0;
+
+ // Retrieve attributes
+ auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
+ uint attrs = gcx.getAttr(pool, bit_i);
+
+ void* blk_base_addr = gcx.findBase(p);
+ size_t blk_size = gcx.findSize(p);
+ bool has_pm = !(attrs & BlkAttr.NO_SCAN);
+ size_t* pm_bitmask = null;
+ size_t pm_bitmask_size = 0;
+ if (has_pm) {
+ pm_bitmask_size = (size_t*).sizeof;
+ // Retrieve pointer map bit mask
+ auto end_of_blk = cast(size_t**)(blk_base_addr +
+ blk_size - pm_bitmask_size);
+ pm_bitmask = *end_of_blk;
}
- auto psize = gcx.findSize(p); // find allocated size
- if (psize < PAGESIZE)
- return 0; // cannot extend buckets
- auto psz = psize / PAGESIZE;
+ if (blk_size < PAGESIZE)
+ return 0; // cannot extend buckets
+
+ minsize += pm_bitmask_size;
+ maxsize += pm_bitmask_size;
+
+ auto psz = blk_size / PAGESIZE;
auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE;
auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE;
- auto pool = gcx.findPool(p);
auto pagenum = (p - pool.baseAddr) / PAGESIZE;
size_t sz;
}
if (sz < minsz)
return 0;
- debug (MEMSTOMP)
- cstring.memset(p + psize, 0xF0, (psz + sz) * PAGESIZE - psize);
- cstring.memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
+
+ size_t new_size = (psz + sz) * PAGESIZE;
+
+ if (opts.options.mem_stomp)
+ memset(p + blk_size - pm_bitmask_size, 0xF0,
+ new_size - blk_size - pm_bitmask_size);
+ memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
gcx.p_cache = null;
gcx.size_cache = 0;
- return (psz + sz) * PAGESIZE;
+
+ if (has_pm) {
+ new_size -= pm_bitmask_size;
+ auto end_of_blk = cast(size_t**)(blk_base_addr + new_size);
+ *end_of_blk = pm_bitmask;
+ }
+ return new_size;
}
Pool* pool;
size_t pagenum;
Bins bin;
- size_t biti;
+ size_t bit_i;
// Find which page it is in
pool = gcx.findPool(p);
if (!pool) // if not one of ours
return; // ignore
- sentinel_Invariant(p);
- p = sentinel_sub(p);
+ if (opts.options.sentinel) {
+ sentinel_Invariant(p);
+ p = sentinel_sub(p);
+ }
pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
- biti = cast(size_t)(p - pool.baseAddr) / 16;
- gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
+ bit_i = cast(size_t)(p - pool.baseAddr) / 16;
+ gcx.clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
bin = cast(Bins)pool.pagetable[pagenum];
if (bin == B_PAGE) // if large alloc
size_t n = pagenum;
while (++n < pool.npages && pool.pagetable[n] == B_PAGEPLUS)
npages++;
- debug (MEMSTOMP) cstring.memset(p, 0xF2, npages * PAGESIZE);
+ if (opts.options.mem_stomp)
+ memset(p, 0xF2, npages * PAGESIZE);
pool.freePages(pagenum, npages);
}
else
// Add to free list
List *list = cast(List*)p;
- debug (MEMSTOMP) cstring.memset(p, 0xF2, binsize[bin]);
+ if (opts.options.mem_stomp)
+ memset(p, 0xF2, binsize[bin]);
list.next = gcx.bucket[bin];
gcx.bucket[bin] = list;
}
- gcx.log_free(sentinel_add(p));
}
{
assert (p);
- version (SENTINEL)
- {
+ if (opts.options.sentinel)
p = sentinel_sub(p);
- size_t size = gcx.findSize(p);
+ Pool* pool = gcx.findPool(p);
+ if (pool is null)
+ return 0;
+
+ auto biti = cast(size_t)(p - pool.baseAddr) / 16;
+ uint attrs = gcx.getAttr(pool, biti);
+
+ size_t size = gcx.findSize(p);
+ bool has_pm = !(attrs & BlkAttr.NO_SCAN);
+ size_t pm_bitmask_size = 0;
+ if (has_pm)
+ pm_bitmask_size = (size_t*).sizeof;
+
+ if (opts.options.sentinel) {
// Check for interior pointer
// This depends on:
// 1) size is a power of 2 for less than PAGESIZE values
// 2) base of memory pool is aligned on PAGESIZE boundary
if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
- size = 0;
- return size ? size - SENTINEL_EXTRA : 0;
+ return 0;
+ return size - SENTINEL_EXTRA - pm_bitmask_size;
}
- else
- {
+ else {
if (p == gcx.p_cache)
return gcx.size_cache;
- size_t size = gcx.findSize(p);
-
// Check for interior pointer
// This depends on:
// 1) size is a power of 2 for less than PAGESIZE values
// 2) base of memory pool is aligned on PAGESIZE boundary
if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
- size = 0;
- else
- {
- gcx.p_cache = p;
- gcx.size_cache = size;
- }
+ return 0;
+
+ gcx.p_cache = p;
+ gcx.size_cache = size - pm_bitmask_size;
- return size;
+ return gcx.size_cache;
}
}
{
assert(p);
- sentinel_Invariant(p);
+ if (opts.options.sentinel)
+ sentinel_Invariant(p);
debug (PTRCHECK)
{
Pool* pool;
Bins bin;
size_t size;
- p = sentinel_sub(p);
+ if (opts.options.sentinel)
+ p = sentinel_sub(p);
pool = gcx.findPool(p);
assert(pool);
pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
if (!thread_needLock())
{
- gcx.addRoot(p);
+ if (roots.append(p) is null)
+ onOutOfMemoryError();
}
else synchronized (gcLock)
{
- gcx.addRoot(p);
+ if (roots.append(p) is null)
+ onOutOfMemoryError();
}
}
return;
}
+ bool r;
if (!thread_needLock())
{
- gcx.removeRoot(p);
+ r = roots.remove(p);
}
else synchronized (gcLock)
{
- gcx.removeRoot(p);
+ r = roots.remove(p);
}
+ assert (r);
}
if (!thread_needLock())
{
- gcx.addRange(p, p + sz);
+ if (ranges.append(Range(p, p+sz)) is null)
+ onOutOfMemoryError();
}
else synchronized (gcLock)
{
- gcx.addRange(p, p + sz);
+ if (ranges.append(Range(p, p+sz)) is null)
+ onOutOfMemoryError();
}
}
return;
}
+ bool r;
if (!thread_needLock())
{
- gcx.removeRange(p);
+ r = ranges.remove(Range(p, null));
}
else synchronized (gcLock)
{
- gcx.removeRange(p);
+ r = ranges.remove(Range(p, null));
}
+ assert (r);
}
getStats(stats);
}
- gcx.log_collect();
}
size_t n;
size_t bsize = 0;
- cstring.memset(&stats, 0, GCStats.sizeof);
+ memset(&stats, 0, GCStats.sizeof);
- for (n = 0; n < gcx.npools; n++)
+ for (n = 0; n < pools.length; n++)
{
- Pool *pool = gcx.pooltable[n];
+ Pool* pool = pools[n];
psize += pool.npages * PAGESIZE;
for (size_t j = 0; j < pool.npages; j++)
{
{
void *pbot;
void *ptop;
+ int opCmp(in Range other)
+ {
+ if (pbot < other.pbot)
+ return -1;
+ else
+ return cast(int)(pbot > other.pbot);
+ }
}
const uint notbinsize[B_MAX] = [ ~(16u-1),~(32u-1),~(64u-1),~(128u-1),~(256u-1),
~(512u-1),~(1024u-1),~(2048u-1),~(4096u-1) ];
+DynArray!(void*) roots;
+
+DynArray!(Range) ranges;
+
+DynArray!(Pool) pools;
+
+
/* ============================ Gcx =============================== */
void *p_cache;
size_t size_cache;
- size_t nroots;
- size_t rootdim;
- void **roots;
-
- size_t nranges;
- size_t rangedim;
- Range *ranges;
-
uint noStack; // !=0 means don't scan stack
uint log; // turn on logging
uint anychanges;
byte *minAddr; // min(baseAddr)
byte *maxAddr; // max(topAddr)
- size_t npools;
- Pool **pooltable;
-
List *bucket[B_MAX]; // free list for each size
int dummy;
(cast(byte*)this)[0 .. Gcx.sizeof] = 0;
stackBottom = cast(char*)&dummy;
- log_init();
//printf("gcx = %p, self = %x\n", this, self);
inited = 1;
}
- void Dtor()
- {
- inited = 0;
-
- for (size_t i = 0; i < npools; i++)
- {
- Pool *pool = pooltable[i];
- pool.Dtor();
- cstdlib.free(pool);
- }
- if (pooltable)
- cstdlib.free(pooltable);
-
- if (roots)
- cstdlib.free(roots);
-
- if (ranges)
- cstdlib.free(ranges);
- }
-
-
void Invariant() { }
//printf("Gcx.invariant(): this = %p\n", this);
size_t i;
- for (i = 0; i < npools; i++)
+ for (i = 0; i < pools.length; i++)
{
- Pool *pool = pooltable[i];
+ Pool* pool = pools[i];
pool.Invariant();
if (i == 0)
{
assert(minAddr == pool.baseAddr);
}
- if (i + 1 < npools)
+ if (i + 1 < pools.length)
{
- assert(pool.opCmp(pooltable[i + 1]) < 0);
+ assert(*pool < pools[i + 1]);
}
- else if (i + 1 == npools)
+ else if (i + 1 == pools.length)
{
assert(maxAddr == pool.topAddr);
}
}
- if (roots)
- {
- assert(rootdim != 0);
- assert(nroots <= rootdim);
- }
+ roots.Invariant();
+ ranges.Invariant();
- if (ranges)
+ for (i = 0; i < ranges.length; i++)
{
- assert(rangedim != 0);
- assert(nranges <= rangedim);
-
- for (i = 0; i < nranges; i++)
- {
- assert(ranges[i].pbot);
- assert(ranges[i].ptop);
- assert(ranges[i].pbot <= ranges[i].ptop);
- }
+ assert(ranges[i].pbot);
+ assert(ranges[i].ptop);
+ assert(ranges[i].pbot <= ranges[i].ptop);
}
for (i = 0; i < B_PAGE; i++)
}
- /**
- *
- */
- void addRoot(void *p)
- {
- if (nroots == rootdim)
- {
- size_t newdim = rootdim * 2 + 16;
- void** newroots;
-
- newroots = cast(void**) cstdlib.malloc(newdim * newroots[0].sizeof);
- if (!newroots)
- onOutOfMemoryError();
- if (roots)
- {
- cstring.memcpy(newroots, roots, nroots * newroots[0].sizeof);
- cstdlib.free(roots);
- }
- roots = newroots;
- rootdim = newdim;
- }
- roots[nroots] = p;
- nroots++;
- }
-
-
- /**
- *
- */
- void removeRoot(void *p)
- {
- for (size_t i = nroots; i--;)
- {
- if (roots[i] == p)
- {
- nroots--;
- cstring.memmove(roots + i, roots + i + 1,
- (nroots - i) * roots[0].sizeof);
- return;
- }
- }
- assert(0);
- }
-
-
- /**
- *
- */
- void addRange(void *pbot, void *ptop)
- {
- if (nranges == rangedim)
- {
- size_t newdim = rangedim * 2 + 16;
- Range *newranges;
-
- newranges = cast(Range*) cstdlib.malloc(newdim * Range.sizeof);
- if (!newranges)
- onOutOfMemoryError();
- if (ranges)
- {
- cstring.memcpy(newranges, ranges, nranges * Range.sizeof);
- cstdlib.free(ranges);
- }
- ranges = newranges;
- rangedim = newdim;
- }
- ranges[nranges].pbot = pbot;
- ranges[nranges].ptop = ptop;
- nranges++;
- }
-
-
- /**
- *
- */
- void removeRange(void *pbot)
- {
- for (size_t i = nranges; i--;)
- {
- if (ranges[i].pbot == pbot)
- {
- nranges--;
- cstring.memmove(ranges + i, ranges + i + 1,
- (nranges - i) * ranges[0].sizeof);
- return;
- }
- }
-
- // This is a fatal error, but ignore it.
- // The problem is that we can get a Close() call on a thread
- // other than the one the range was allocated on.
- //assert(zero);
- }
-
-
/**
* Find Pool that pointer is in.
* Return null if not in a Pool.
- * Assume pooltable[] is sorted.
+ * Assume pools is sorted.
*/
Pool *findPool(void *p)
{
if (p >= minAddr && p < maxAddr)
{
- if (npools == 1)
+ if (pools.length == 1)
{
- return pooltable[0];
+ return pools[0];
}
- for (size_t i = 0; i < npools; i++)
+ for (size_t i = 0; i < pools.length; i++)
{
- Pool *pool;
-
- pool = pooltable[i];
+ Pool* pool = pools[i];
if (p < pool.topAddr)
{
if (pool.baseAddr <= p)
}
////////////////////////////////////////////////////////////////////
- // getBits
+ // getAttr
////////////////////////////////////////////////////////////////////
- info.attr = getBits(pool, cast(size_t)(offset / 16));
+ info.attr = getAttr(pool, cast(size_t)(offset / 16));
+ if (!(info.attr & BlkAttr.NO_SCAN))
+ info.size -= (size_t*).sizeof; // bitmask
}
return info;
}
/**
* Allocate a new pool of at least size bytes.
- * Sort it into pooltable[].
+ * Sort it into pools.
* Mark all memory in the pool as B_FREE.
* Return the actual number of bytes reserved or 0 on error.
*/
size_t pn;
Pool* pool;
- for (n = 0; n < npools; n++)
+ for (n = 0; n < pools.length; n++)
{
- pool = pooltable[n];
+ pool = pools[n];
for (pn = 0; pn < pool.npages; pn++)
{
if (cast(Bins)pool.pagetable[pn] != B_FREE)
break;
}
if (pn < pool.npages)
- {
- n++;
continue;
- }
pool.Dtor();
- cstdlib.free(pool);
- cstring.memmove(pooltable + n,
- pooltable + n + 1,
- (--npools - n) * (Pool*).sizeof);
- minAddr = pooltable[0].baseAddr;
- maxAddr = pooltable[npools - 1].topAddr;
+ pools.remove_at(n);
+ n--;
}
+ minAddr = pools[0].baseAddr;
+ maxAddr = pools[pools.length - 1].topAddr;
}
// This code could use some refinement when repeatedly
// allocating very large arrays.
- for (n = 0; n < npools; n++)
+ for (n = 0; n < pools.length; n++)
{
- pool = pooltable[n];
+ pool = pools[n];
pn = pool.allocPages(npages);
if (pn != OPFAIL)
goto L1;
}
// Try collecting
freedpages = fullcollectshell();
- if (freedpages >= npools * ((POOLSIZE / PAGESIZE) / 4))
+ if (freedpages >= pools.length * ((POOLSIZE / PAGESIZE) / 4))
{
state = 1;
continue;
L1:
pool.pagetable[pn] = B_PAGE;
if (npages > 1)
- cstring.memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
+ memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
p = pool.baseAddr + pn * PAGESIZE;
- cstring.memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
- debug (MEMSTOMP) cstring.memset(p, 0xF1, size);
+ memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
+ if (opts.options.mem_stomp)
+ memset(p, 0xF1, size);
return p;
Lnomemory:
/**
* Allocate a new pool with at least npages in it.
- * Sort it into pooltable[].
+ * Sort it into pools.
* Return null if failed.
*/
Pool *newPool(size_t npages)
{
- Pool* pool;
- Pool** newpooltable;
- size_t newnpools;
- size_t i;
-
// Minimum of POOLSIZE
if (npages < POOLSIZE/PAGESIZE)
npages = POOLSIZE/PAGESIZE;
}
// Allocate successively larger pools up to 8 megs
- if (npools)
+ if (pools.length)
{
- size_t n = npools;
+ size_t n = pools.length;
if (n > 8)
n = 8; // cap pool size at 8 megs
n *= (POOLSIZE / PAGESIZE);
npages = n;
}
- pool = cast(Pool *) cstdlib.calloc(1, Pool.sizeof);
- if (pool)
+ Pool p;
+ p.initialize(npages);
+ if (!p.baseAddr)
{
- pool.initialize(npages);
- if (!pool.baseAddr)
- goto Lerr;
-
- newnpools = npools + 1;
- newpooltable = cast(Pool **) cstdlib.realloc(pooltable,
- newnpools * (Pool *).sizeof);
- if (!newpooltable)
- goto Lerr;
-
- // Sort pool into newpooltable[]
- for (i = 0; i < npools; i++)
- {
- if (pool.opCmp(newpooltable[i]) < 0)
- break;
- }
- cstring.memmove(newpooltable + i + 1, newpooltable + i,
- (npools - i) * (Pool *).sizeof);
- newpooltable[i] = pool;
-
- pooltable = newpooltable;
- npools = newnpools;
+ p.Dtor();
+ return null;
+ }
- minAddr = pooltable[0].baseAddr;
- maxAddr = pooltable[npools - 1].topAddr;
+ Pool* pool = pools.insert_sorted(p);
+ if (pool)
+ {
+ minAddr = pools[0].baseAddr;
+ maxAddr = pools[pools.length - 1].topAddr;
}
return pool;
-
- Lerr:
- pool.Dtor();
- cstdlib.free(pool);
- return null;
}
byte* p;
byte* ptop;
- for (n = 0; n < npools; n++)
+ for (n = 0; n < pools.length; n++)
{
- pool = pooltable[n];
+ pool = pools[n];
pn = pool.allocPages(1);
if (pn != OPFAIL)
goto L1;
}
+ /**
+ * Marks a range of memory using the conservative bit mask. Used for
+ * the stack, for the data segment, and additional memory ranges.
+ */
+ void mark_conservative(void* pbot, void* ptop)
+ {
+ mark(pbot, ptop, PointerMap.init.bits.ptr);
+ }
+
+
/**
* Search a range of memory values and mark any pointers into the GC pool.
*/
- void mark(void *pbot, void *ptop)
+ void mark(void *pbot, void *ptop, size_t* pm_bitmask)
{
+ const BITS_PER_WORD = size_t.sizeof * 8;
+
void **p1 = cast(void **)pbot;
void **p2 = cast(void **)ptop;
size_t pcache = 0;
uint changes = 0;
+ // TODO: add option to be conservative
+ // force conservative scanning
+ //pm_bitmask = PointerMap.init.bits.ptr;
+
+ size_t type_size = pm_bitmask[0];
+ size_t* pm_bits = pm_bitmask + 1;
+
//printf("marking range: %p -> %p\n", pbot, ptop);
- for (; p1 < p2; p1++)
- {
- Pool *pool;
- byte *p = cast(byte *)(*p1);
+ for (; p1 + type_size <= p2; p1 += type_size) {
+ for (size_t n = 0; n < type_size; n++) {
+ // scan bit set for this word
+ if (!(pm_bits[n / BITS_PER_WORD] & (1 << (n % BITS_PER_WORD))))
+ continue;
+
+ void* p = *(p1 + n);
+
+ if (p < minAddr || p >= maxAddr)
+ continue;
- if (p >= minAddr && p < maxAddr)
- {
if ((cast(size_t)p & ~(PAGESIZE-1)) == pcache)
continue;
- pool = findPool(p);
+ Pool* pool = findPool(p);
if (pool)
{
size_t offset = cast(size_t)(p - pool.baseAddr);
- size_t biti;
+ size_t bit_i;
size_t pn = offset / PAGESIZE;
Bins bin = cast(Bins)pool.pagetable[pn];
// Adjust bit to be at start of allocated memory block
if (bin <= B_PAGE)
- biti = (offset & notbinsize[bin]) >> 4;
+ bit_i = (offset & notbinsize[bin]) >> 4;
else if (bin == B_PAGEPLUS)
{
do
--pn;
}
while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
- biti = pn * (PAGESIZE / 16);
+ bit_i = pn * (PAGESIZE / 16);
}
else
{
if (bin >= B_PAGE) // Cache B_PAGE and B_PAGEPLUS lookups
pcache = cast(size_t)p & ~(PAGESIZE-1);
- if (!pool.mark.test(biti))
+ if (!pool.mark.test(bit_i))
{
- pool.mark.set(biti);
- if (!pool.noscan.test(biti))
+ pool.mark.set(bit_i);
+ if (!pool.noscan.test(bit_i))
{
- pool.scan.set(biti);
+ pool.scan.set(bit_i);
changes = 1;
}
- log_parent(sentinel_add(pool.baseAddr + biti * 16), sentinel_add(pbot));
}
}
}
anychanges |= changes;
}
-
/**
* Return number of full pages free'd.
*/
size_t fullcollectshell()
{
+ stats.collection_started();
+ scope (exit)
+ stats.collection_finished();
+
// The purpose of the 'shell' is to ensure all the registers
// get put on the stack so they'll be scanned
void *sp;
debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n");
thread_suspendAll();
+ stats.world_stopped();
p_cache = null;
size_cache = 0;
anychanges = 0;
- for (n = 0; n < npools; n++)
+ for (n = 0; n < pools.length; n++)
{
- pool = pooltable[n];
+ pool = pools[n];
pool.mark.zero();
pool.scan.zero();
pool.freebits.zero();
}
}
- for (n = 0; n < npools; n++)
+ for (n = 0; n < pools.length; n++)
{
- pool = pooltable[n];
+ pool = pools[n];
pool.mark.copy(&pool.freebits);
}
- rt_scanStaticData( &mark );
+ rt_scanStaticData( &mark_conservative );
if (!noStack)
{
// Scan stacks and registers for each paused thread
- thread_scanAll( &mark, stackTop );
+ thread_scanAll( &mark_conservative, stackTop );
}
- // Scan roots[]
+ // Scan roots
debug(COLLECT_PRINTF) printf("scan roots[]\n");
- mark(roots, roots + nroots);
+ mark_conservative(roots.ptr, roots.ptr + roots.length);
- // Scan ranges[]
+ // Scan ranges
debug(COLLECT_PRINTF) printf("scan ranges[]\n");
//log++;
- for (n = 0; n < nranges; n++)
+ for (n = 0; n < ranges.length; n++)
{
debug(COLLECT_PRINTF) printf("\t%x .. %x\n", ranges[n].pbot, ranges[n].ptop);
- mark(ranges[n].pbot, ranges[n].ptop);
+ mark_conservative(ranges[n].pbot, ranges[n].ptop);
}
//log--;
while (anychanges)
{
anychanges = 0;
- for (n = 0; n < npools; n++)
+ for (n = 0; n < pools.length; n++)
{
uint *bbase;
uint *b;
uint *btop;
- pool = pooltable[n];
+ pool = pools[n];
bbase = pool.scan.base();
btop = bbase + pool.scan.nwords;
pn = cast(size_t)(o - pool.baseAddr) / PAGESIZE;
bin = cast(Bins)pool.pagetable[pn];
- if (bin < B_PAGE)
- {
- mark(o, o + binsize[bin]);
+ if (bin < B_PAGE) {
+ auto end_of_blk = cast(size_t**)(o + binsize[bin] -
+ (size_t*).sizeof);
+ size_t* pm_bitmask = *end_of_blk;
+ mark(o, end_of_blk, pm_bitmask);
}
else if (bin == B_PAGE || bin == B_PAGEPLUS)
{
pn--;
}
u = 1;
- while (pn + u < pool.npages && pool.pagetable[pn + u] == B_PAGEPLUS)
+ while (pn + u < pool.npages &&
+ pool.pagetable[pn + u] == B_PAGEPLUS)
u++;
- mark(o, o + u * PAGESIZE);
+
+ size_t blk_size = u * PAGESIZE;
+ auto end_of_blk = cast(size_t**)(o + blk_size -
+ (size_t*).sizeof);
+ size_t* pm_bitmask = *end_of_blk;
+ mark(o, end_of_blk, pm_bitmask);
}
}
}
}
thread_resumeAll();
+ stats.world_started();
// Free up everything not marked
debug(COLLECT_PRINTF) printf("\tfree'ing\n");
size_t freedpages = 0;
size_t freed = 0;
- for (n = 0; n < npools; n++)
+ for (n = 0; n < pools.length; n++)
{
- pool = pooltable[n];
+ pool = pools[n];
uint* bbase = pool.mark.base();
size_t pn;
for (pn = 0; pn < pool.npages; pn++, bbase += PAGESIZE / (32 * 16))
auto size = binsize[bin];
byte* p = pool.baseAddr + pn * PAGESIZE;
byte* ptop = p + PAGESIZE;
- size_t biti = pn * (PAGESIZE/16);
- size_t bitstride = size / 16;
+ size_t bit_i = pn * (PAGESIZE/16);
+ size_t bit_stride = size / 16;
version(none) // BUG: doesn't work because freebits() must also be cleared
{
// If free'd entire page
- if (bbase[0] == 0 && bbase[1] == 0 && bbase[2] == 0 && bbase[3] == 0 &&
- bbase[4] == 0 && bbase[5] == 0 && bbase[6] == 0 && bbase[7] == 0)
+ if (bbase[0] == 0 && bbase[1] == 0 && bbase[2] == 0 &&
+ bbase[3] == 0 && bbase[4] == 0 && bbase[5] == 0 &&
+ bbase[6] == 0 && bbase[7] == 0)
{
- for (; p < ptop; p += size, biti += bitstride)
+ for (; p < ptop; p += size, bit_i += bit_stride)
{
- if (pool.finals.nbits && pool.finals.testClear(biti))
- rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
- gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
+ if (pool.finals.nbits && pool.finals.testClear(bit_i)) {
+ if (opts.options.sentinel)
+ rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
+ else
+ rt_finalize(cast(List *)p, false/*noStack > 0*/);
+ }
+ gcx.clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
List *list = cast(List *)p;
- log_free(sentinel_add(list));
- debug (MEMSTOMP) cstring.memset(p, 0xF3, size);
+ if (opts.options.mem_stomp)
+ memset(p, 0xF3, size);
}
pool.pagetable[pn] = B_FREE;
freed += PAGESIZE;
continue;
}
}
- for (; p < ptop; p += size, biti += bitstride)
+ for (; p < ptop; p += size, bit_i += bit_stride)
{
- if (!pool.mark.test(biti))
+ if (!pool.mark.test(bit_i))
{
- sentinel_Invariant(sentinel_add(p));
-
- pool.freebits.set(biti);
- if (pool.finals.nbits && pool.finals.testClear(biti))
- rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
- clrBits(pool, biti, BlkAttr.ALL_BITS);
+ if (opts.options.sentinel)
+ sentinel_Invariant(sentinel_add(p));
+
+ pool.freebits.set(bit_i);
+ if (pool.finals.nbits && pool.finals.testClear(bit_i)) {
+ if (opts.options.sentinel)
+ rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
+ else
+ rt_finalize(cast(List *)p, false/*noStack > 0*/);
+ }
+ clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
List *list = cast(List *)p;
- log_free(sentinel_add(list));
- debug (MEMSTOMP) cstring.memset(p, 0xF3, size);
+ if (opts.options.mem_stomp)
+ memset(p, 0xF3, size);
freed += size;
}
}
else if (bin == B_PAGE)
{
- size_t biti = pn * (PAGESIZE / 16);
- if (!pool.mark.test(biti))
+ size_t bit_i = pn * (PAGESIZE / 16);
+ if (!pool.mark.test(bit_i))
{
byte *p = pool.baseAddr + pn * PAGESIZE;
- sentinel_Invariant(sentinel_add(p));
- if (pool.finals.nbits && pool.finals.testClear(biti))
- rt_finalize(sentinel_add(p), false/*noStack > 0*/);
- clrBits(pool, biti, BlkAttr.ALL_BITS);
+ if (opts.options.sentinel)
+ sentinel_Invariant(sentinel_add(p));
+ if (pool.finals.nbits && pool.finals.testClear(bit_i)) {
+ if (opts.options.sentinel)
+ rt_finalize(sentinel_add(p), false/*noStack > 0*/);
+ else
+ rt_finalize(p, false/*noStack > 0*/);
+ }
+ clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
debug(COLLECT_PRINTF) printf("\tcollecting big %x\n", p);
- log_free(sentinel_add(p));
pool.pagetable[pn] = B_FREE;
freedpages++;
- debug (MEMSTOMP) cstring.memset(p, 0xF3, PAGESIZE);
+ if (opts.options.mem_stomp)
+ memset(p, 0xF3, PAGESIZE);
while (pn + 1 < pool.npages && pool.pagetable[pn + 1] == B_PAGEPLUS)
{
pn++;
pool.pagetable[pn] = B_FREE;
freedpages++;
- debug (MEMSTOMP)
+ if (opts.options.mem_stomp)
{
p += PAGESIZE;
- cstring.memset(p, 0xF3, PAGESIZE);
+ memset(p, 0xF3, PAGESIZE);
}
}
}
// Free complete pages, rebuild free list
debug(COLLECT_PRINTF) printf("\tfree complete pages\n");
size_t recoveredpages = 0;
- for (n = 0; n < npools; n++)
+ for (n = 0; n < pools.length; n++)
{
- pool = pooltable[n];
+ pool = pools[n];
for (size_t pn = 0; pn < pool.npages; pn++)
{
Bins bin = cast(Bins)pool.pagetable[pn];
- size_t biti;
+ size_t bit_i;
size_t u;
if (bin < B_PAGE)
{
size_t size = binsize[bin];
- size_t bitstride = size / 16;
- size_t bitbase = pn * (PAGESIZE / 16);
- size_t bittop = bitbase + (PAGESIZE / 16);
+ size_t bit_stride = size / 16;
+ size_t bit_base = pn * (PAGESIZE / 16);
+ size_t bit_top = bit_base + (PAGESIZE / 16);
byte* p;
- biti = bitbase;
- for (biti = bitbase; biti < bittop; biti += bitstride)
+ bit_i = bit_base;
+ for (; bit_i < bit_top; bit_i += bit_stride)
{
- if (!pool.freebits.test(biti))
+ if (!pool.freebits.test(bit_i))
goto Lnotfree;
}
pool.pagetable[pn] = B_FREE;
p = pool.baseAddr + pn * PAGESIZE;
for (u = 0; u < PAGESIZE; u += size)
{
- biti = bitbase + u / 16;
- if (pool.freebits.test(biti))
+ bit_i = bit_base + u / 16;
+ if (pool.freebits.test(bit_i))
{
List *list = cast(List *)(p + u);
- if (list.next != bucket[bin]) // avoid unnecessary writes
+ // avoid unnecessary writes
+ if (list.next != bucket[bin])
list.next = bucket[bin];
bucket[bin] = list;
}
}
debug(COLLECT_PRINTF) printf("recovered pages = %d\n", recoveredpages);
- debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedpages, npools);
+ debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedpages, pools.length);
return freedpages + recoveredpages;
}
/**
*
*/
- uint getBits(Pool* pool, size_t biti)
+ uint getAttr(Pool* pool, size_t bit_i)
in
{
assert( pool );
}
body
{
- uint bits;
+ uint attrs;
if (pool.finals.nbits &&
- pool.finals.test(biti))
- bits |= BlkAttr.FINALIZE;
- if (pool.noscan.test(biti))
- bits |= BlkAttr.NO_SCAN;
+ pool.finals.test(bit_i))
+ attrs |= BlkAttr.FINALIZE;
+ if (pool.noscan.test(bit_i))
+ attrs |= BlkAttr.NO_SCAN;
// if (pool.nomove.nbits &&
-// pool.nomove.test(biti))
-// bits |= BlkAttr.NO_MOVE;
- return bits;
+// pool.nomove.test(bit_i))
+// attrs |= BlkAttr.NO_MOVE;
+ return attrs;
}
/**
*
*/
- void setBits(Pool* pool, size_t biti, uint mask)
+ void setAttr(Pool* pool, size_t bit_i, uint mask)
in
{
assert( pool );
{
if (!pool.finals.nbits)
pool.finals.alloc(pool.mark.nbits);
- pool.finals.set(biti);
+ pool.finals.set(bit_i);
}
if (mask & BlkAttr.NO_SCAN)
{
- pool.noscan.set(biti);
+ pool.noscan.set(bit_i);
}
// if (mask & BlkAttr.NO_MOVE)
// {
// if (!pool.nomove.nbits)
// pool.nomove.alloc(pool.mark.nbits);
-// pool.nomove.set(biti);
+// pool.nomove.set(bit_i);
// }
}
/**
*
*/
- void clrBits(Pool* pool, size_t biti, uint mask)
+ void clrAttr(Pool* pool, size_t bit_i, uint mask)
in
{
assert( pool );
body
{
if (mask & BlkAttr.FINALIZE && pool.finals.nbits)
- pool.finals.clear(biti);
+ pool.finals.clear(bit_i);
if (mask & BlkAttr.NO_SCAN)
- pool.noscan.clear(biti);
+ pool.noscan.clear(bit_i);
// if (mask & BlkAttr.NO_MOVE && pool.nomove.nbits)
-// pool.nomove.clear(biti);
+// pool.nomove.clear(bit_i);
}
-
- /***** Leak Detector ******/
-
-
- debug (LOGGING)
- {
- LogArray current;
- LogArray prev;
-
-
- void log_init()
- {
- current.reserve(1000);
- prev.reserve(1000);
- }
-
-
- void log_malloc(void *p, size_t size)
- {
- Log log;
-
- log.p = p;
- log.size = size;
- log.line = GC.line;
- log.file = GC.file;
- log.parent = null;
-
- GC.line = 0;
- GC.file = null;
-
- current.push(log);
- }
-
-
- void log_free(void *p)
- {
- size_t i;
-
- i = current.find(p);
- if (i != OPFAIL)
- current.remove(i);
- }
-
-
- void log_collect()
- {
- // Print everything in current that is not in prev
- size_t used = 0;
- for (size_t i = 0; i < current.dim; i++)
- {
- size_t j;
-
- j = prev.find(current.data[i].p);
- if (j == OPFAIL)
- current.data[i].print();
- else
- used++;
- }
-
- for (size_t i = 0; i < current.dim; i++)
- {
- void *p;
- size_t j;
-
- p = current.data[i].p;
- if (!findPool(current.data[i].parent))
- {
- j = prev.find(current.data[i].p);
- current.data[i].print();
- }
- }
-
- prev.copy(¤t);
- }
-
-
- void log_parent(void *p, void *parent)
- {
- size_t i;
-
- i = current.find(p);
- if (i == OPFAIL)
- {
- Pool *pool;
- pool = findPool(p);
- assert(pool);
- size_t offset = cast(size_t)(p - pool.baseAddr);
- size_t biti;
- size_t pn = offset / PAGESIZE;
- Bins bin = cast(Bins)pool.pagetable[pn];
- biti = (offset & notbinsize[bin]);
- }
- else
- current.data[i].parent = parent;
- }
-
- }
- else
- {
- void log_init() { }
- void log_malloc(void *p, size_t size) { }
- void log_free(void *p) { }
- void log_collect() { }
- void log_parent(void *p, void *parent) { }
- }
}
pagetable = cast(ubyte*) cstdlib.malloc(npages);
if (!pagetable)
onOutOfMemoryError();
- cstring.memset(pagetable, B_FREE, npages);
+ memset(pagetable, B_FREE, npages);
this.npages = npages;
}
baseAddr = null;
topAddr = null;
}
+ // See Gcx.Dtor() for the rationale of the null check.
if (pagetable)
cstdlib.free(pagetable);
*/
void freePages(size_t pagenum, size_t npages)
{
- cstring.memset(&pagetable[pagenum], B_FREE, npages);
+ memset(&pagetable[pagenum], B_FREE, npages);
}
/**
- * Used for sorting pooltable[]
+ * Used for sorting pools
*/
- int opCmp(Pool *p2)
+ int opCmp(in Pool other)
{
- if (baseAddr < p2.baseAddr)
+ if (baseAddr < other.baseAddr)
return -1;
else
- return cast(int)(baseAddr > p2.baseAddr);
+ return cast(int)(baseAddr > other.baseAddr);
}
}
/* ============================ SENTINEL =============================== */
-version (SENTINEL)
-{
- const size_t SENTINEL_PRE = cast(size_t) 0xF4F4F4F4F4F4F4F4UL; // 32 or 64 bits
- const ubyte SENTINEL_POST = 0xF5; // 8 bits
- const uint SENTINEL_EXTRA = 2 * size_t.sizeof + 1;
-
-
- size_t* sentinel_size(void *p) { return &(cast(size_t *)p)[-2]; }
- size_t* sentinel_pre(void *p) { return &(cast(size_t *)p)[-1]; }
- ubyte* sentinel_post(void *p) { return &(cast(ubyte *)p)[*sentinel_size(p)]; }
+const size_t SENTINEL_PRE = cast(size_t) 0xF4F4F4F4F4F4F4F4UL; // 32 or 64 bits
+const ubyte SENTINEL_POST = 0xF5; // 8 bits
+const uint SENTINEL_EXTRA = 2 * size_t.sizeof + 1;
- void sentinel_init(void *p, size_t size)
- {
- *sentinel_size(p) = size;
- *sentinel_pre(p) = SENTINEL_PRE;
- *sentinel_post(p) = SENTINEL_POST;
- }
-
-
- void sentinel_Invariant(void *p)
- {
- assert(*sentinel_pre(p) == SENTINEL_PRE);
- assert(*sentinel_post(p) == SENTINEL_POST);
- }
-
-
- void *sentinel_add(void *p)
- {
- return p + 2 * size_t.sizeof;
- }
+size_t* sentinel_size(void *p) { return &(cast(size_t *)p)[-2]; }
+size_t* sentinel_pre(void *p) { return &(cast(size_t *)p)[-1]; }
+ubyte* sentinel_post(void *p) { return &(cast(ubyte *)p)[*sentinel_size(p)]; }
- void *sentinel_sub(void *p)
- {
- return p - 2 * size_t.sizeof;
- }
-}
-else
+void sentinel_init(void *p, size_t size)
{
- const uint SENTINEL_EXTRA = 0;
-
-
- void sentinel_init(void *p, size_t size)
- {
- }
+ *sentinel_size(p) = size;
+ *sentinel_pre(p) = SENTINEL_PRE;
+ *sentinel_post(p) = SENTINEL_POST;
+}
- void sentinel_Invariant(void *p)
- {
- }
+void sentinel_Invariant(void *p)
+{
+ assert(*sentinel_pre(p) == SENTINEL_PRE);
+ assert(*sentinel_post(p) == SENTINEL_POST);
+}
- void *sentinel_add(void *p)
- {
- return p;
- }
+void *sentinel_add(void *p)
+{
+ return p + 2 * size_t.sizeof;
+}
- void *sentinel_sub(void *p)
- {
- return p;
- }
+void *sentinel_sub(void *p)
+{
+ return p - 2 * size_t.sizeof;
}