struct List
{
- List *next;
+ List* next;
+ Pool* pool;
}
dynarray.DynArray!(void*) roots;
dynarray.DynArray!(Range) ranges;
- dynarray.DynArray!(Pool) pools;
+ dynarray.DynArray!(Pool*) pools;
Stats stats;
}
if (i == 0)
assert(gc.min_addr == pool.baseAddr);
if (i + 1 < gc.pools.length)
- assert(*pool < gc.pools[i + 1]);
+ assert(*pool < *gc.pools[i + 1]);
else if (i + 1 == gc.pools.length)
assert(gc.max_addr == pool.topAddr);
}
assert(gc.ranges[i].pbot <= gc.ranges[i].ptop);
}
- for (size_t i = 0; i < B_PAGE; i++)
- for (List *list = gc.free_list[i]; list; list = list.next)
- {
+ for (size_t i = 0; i < B_PAGE; i++) {
+ for (List *list = gc.free_list[i]; list; list = list.next) {
+ assert (list.pool !is null);
+ auto p = cast(byte*) list;
+ assert (p >= list.pool.baseAddr);
+ assert (p < list.pool.topAddr);
}
+ }
}
return true;
}
{
size_t n;
size_t pn;
- Pool* pool;
+ Pool* pool;
for (n = 0; n < gc.pools.length; n++)
{
if (pn < pool.npages)
continue;
pool.Dtor();
+ cstdlib.free(pool);
gc.pools.remove_at(n);
n--;
}
* Allocate a chunk of memory that is larger than a page.
* Return null if out of memory.
*/
-void *bigAlloc(size_t size)
+void* bigAlloc(size_t size, out Pool* pool)
{
- Pool* pool;
size_t npages;
size_t n;
size_t pn;
npages = n;
}
- Pool p;
- p.initialize(npages);
- if (!p.baseAddr)
+ auto pool = cast(Pool*) cstdlib.calloc(1, Pool.sizeof);
+ if (pool is null)
+ return null;
+ pool.initialize(npages);
+ if (!pool.baseAddr)
{
- p.Dtor();
+ pool.Dtor();
return null;
}
- Pool* pool = gc.pools.insert_sorted(p);
- if (pool)
- {
- gc.min_addr = gc.pools[0].baseAddr;
- gc.max_addr = gc.pools[gc.pools.length - 1].topAddr;
+ auto inserted_pool = *gc.pools.insert_sorted!("*a < *b")(pool);
+ if (inserted_pool is null) {
+ pool.Dtor();
+ return null;
}
+ assert (inserted_pool is pool);
+ gc.min_addr = gc.pools[0].baseAddr;
+ gc.max_addr = gc.pools[gc.pools.length - 1].topAddr;
return pool;
}
// Convert page to free list
size_t size = binsize[bin];
- List **b = &gc.free_list[bin];
+ auto list_head = &gc.free_list[bin];
p = pool.baseAddr + pn * PAGESIZE;
ptop = p + PAGESIZE;
for (; p < ptop; p += size)
{
- (cast(List *)p).next = *b;
- *b = cast(List *)p;
+ List* l = cast(List *) p;
+ l.next = *list_head;
+ l.pool = pool;
+ *list_head = l;
}
return 1;
}
/**
- * Marks a range of memory using the conservative bit mask. Used for
- * the stack, for the data segment, and additional memory ranges.
+ * Search a range of memory values and mark any pointers into the GC pool using
+ * type information (bitmask of pointer locations).
*/
-void mark_conservative(void* pbot, void* ptop)
-{
- mark(pbot, ptop, PointerMap.init.bits.ptr);
-}
-
-
-/**
- * Search a range of memory values and mark any pointers into the GC pool.
- */
-void mark(void *pbot, void *ptop, size_t* pm_bitmask)
+void mark_range(void *pbot, void *ptop, size_t* pm_bitmask)
{
// TODO: make our own assert because assert uses the GC
assert (pbot <= ptop);
void **p1 = cast(void **)pbot;
void **p2 = cast(void **)ptop;
size_t pcache = 0;
- uint changes = 0;
+ bool changes = false;
size_t type_size = pm_bitmask[0];
size_t* pm_bits = pm_bitmask + 1;
if (pool)
{
size_t offset = cast(size_t)(p - pool.baseAddr);
- size_t bit_i;
+ size_t bit_i = void;
size_t pn = offset / PAGESIZE;
Bins bin = cast(Bins)pool.pagetable[pn];
+ // Cache B_PAGE, B_PAGEPLUS and B_FREE lookups
+ if (bin >= B_PAGE)
+ pcache = cast(size_t)p & ~(PAGESIZE-1);
+
// Adjust bit to be at start of allocated memory block
if (bin <= B_PAGE)
- bit_i = (offset & notbinsize[bin]) >> 4;
+ bit_i = (offset & notbinsize[bin]) / 16;
else if (bin == B_PAGEPLUS)
{
do
while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
bit_i = pn * (PAGESIZE / 16);
}
- else
- {
- // Don't mark bits in B_FREE pages
+ else // Don't mark bits in B_FREE pages
continue;
- }
-
- if (bin >= B_PAGE) // Cache B_PAGE and B_PAGEPLUS lookups
- pcache = cast(size_t)p & ~(PAGESIZE-1);
if (!pool.mark.test(bit_i))
{
if (!pool.noscan.test(bit_i))
{
pool.scan.set(bit_i);
- changes = 1;
+ changes = true;
}
}
}
*/
size_t fullcollect(void *stackTop)
{
- size_t n;
- Pool* pool;
-
debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n");
+ // we always need to stop the world to make threads save the CPU registers
+ // in the stack and prepare themselves for thread_scanAll()
thread_suspendAll();
gc.stats.world_stopped();
+ if (opts.options.fork) {
+ os.pid_t child_pid = os.fork();
+ assert (child_pid != -1); // don't accept errors in non-release mode
+ switch (child_pid) {
+ case -1: // if fork() fails, fallback to stop-the-world
+ opts.options.fork = false;
+ break;
+ case 0: // child process (i.e. the collectors mark phase)
+ mark(stackTop);
+ cstdlib.exit(0);
+ break; // bogus, will never reach here
+ default: // parent process (i.e. the mutator)
+ // start the world again and wait for the mark phase to finish
+ thread_resumeAll();
+ gc.stats.world_started();
+ int status = void;
+ os.pid_t wait_pid = os.waitpid(child_pid, &status, 0);
+ assert (wait_pid == child_pid);
+ return sweep();
+ }
+
+ }
+
+ // if we reach here, we are using the standard stop-the-world collection
+ mark(stackTop);
+ thread_resumeAll();
+ gc.stats.world_started();
+
+ return sweep();
+}
+
+
+/**
+ *
+ */
+void mark(void *stackTop)
+{
+ debug(COLLECT_PRINTF) printf("\tmark()\n");
+
gc.p_cache = null;
gc.size_cache = 0;
gc.any_changes = false;
- for (n = 0; n < gc.pools.length; n++)
+ for (size_t n = 0; n < gc.pools.length; n++)
{
- pool = gc.pools[n];
+ Pool* pool = gc.pools[n];
pool.mark.zero();
pool.scan.zero();
pool.freebits.zero();
}
// Mark each free entry, so it doesn't get scanned
- for (n = 0; n < B_PAGE; n++)
+ for (size_t n = 0; n < B_PAGE; n++)
{
for (List *list = gc.free_list[n]; list; list = list.next)
{
- pool = findPool(list);
- assert(pool);
- pool.freebits.set(cast(size_t)(cast(byte*)list - pool.baseAddr) / 16);
+ Pool* pool = list.pool;
+ auto ptr = cast(byte*) list;
+ assert (pool);
+ assert (pool.baseAddr <= ptr);
+ assert (ptr < pool.topAddr);
+ size_t bit_i = cast(size_t)(ptr - pool.baseAddr) / 16;
+ pool.freebits.set(bit_i);
}
}
- for (n = 0; n < gc.pools.length; n++)
+ for (size_t n = 0; n < gc.pools.length; n++)
{
- pool = gc.pools[n];
+ Pool* pool = gc.pools[n];
pool.mark.copy(&pool.freebits);
}
- void mark_conservative_dg(void* pbot, void* ptop)
+ /// Marks a range of memory in conservative mode.
+ void mark_conservative_range(void* pbot, void* ptop)
{
- mark_conservative(pbot, ptop);
+ mark_range(pbot, ptop, PointerMap.init.bits.ptr);
}
- rt_scanStaticData(&mark_conservative_dg);
+ rt_scanStaticData(&mark_conservative_range);
if (!gc.no_stack)
{
// Scan stacks and registers for each paused thread
- thread_scanAll(&mark_conservative_dg, stackTop);
+ thread_scanAll(&mark_conservative_range, stackTop);
}
// Scan roots
debug(COLLECT_PRINTF) printf("scan roots[]\n");
- mark_conservative(gc.roots.ptr, gc.roots.ptr + gc.roots.length);
+ mark_conservative_range(gc.roots.ptr, gc.roots.ptr + gc.roots.length);
// Scan ranges
debug(COLLECT_PRINTF) printf("scan ranges[]\n");
- for (n = 0; n < gc.ranges.length; n++)
+ for (size_t n = 0; n < gc.ranges.length; n++)
{
debug(COLLECT_PRINTF) printf("\t%x .. %x\n", gc.ranges[n].pbot, gc.ranges[n].ptop);
- mark_conservative(gc.ranges[n].pbot, gc.ranges[n].ptop);
+ mark_conservative_range(gc.ranges[n].pbot, gc.ranges[n].ptop);
}
debug(COLLECT_PRINTF) printf("\tscan heap\n");
while (gc.any_changes)
{
gc.any_changes = false;
- for (n = 0; n < gc.pools.length; n++)
+ for (size_t n = 0; n < gc.pools.length; n++)
{
uint *bbase;
uint *b;
uint *btop;
- pool = gc.pools[n];
+ Pool* pool = gc.pools[n];
bbase = pool.scan.base();
btop = bbase + pool.scan.nwords;
bin = cast(Bins)pool.pagetable[pn];
if (bin < B_PAGE) {
if (opts.options.conservative)
- mark_conservative(o, o + binsize[bin]);
+ mark_conservative_range(o, o + binsize[bin]);
else {
auto end_of_blk = cast(size_t**)(o +
binsize[bin] - size_t.sizeof);
size_t* pm_bitmask = *end_of_blk;
- mark(o, end_of_blk, pm_bitmask);
+ mark_range(o, end_of_blk, pm_bitmask);
}
}
else if (bin == B_PAGE || bin == B_PAGEPLUS)
size_t blk_size = u * PAGESIZE;
if (opts.options.conservative)
- mark_conservative(o, o + blk_size);
+ mark_conservative_range(o, o + blk_size);
else {
auto end_of_blk = cast(size_t**)(o + blk_size -
size_t.sizeof);
size_t* pm_bitmask = *end_of_blk;
- mark(o, end_of_blk, pm_bitmask);
+ mark_range(o, end_of_blk, pm_bitmask);
}
}
}
}
}
}
-
- thread_resumeAll();
- gc.stats.world_started();
-
- return sweep();
}
{
if (pool.finals.nbits && pool.finals.testClear(bit_i)) {
if (opts.options.sentinel)
- rt_finalize(cast(List *)sentinel_add(p), false/*gc.no_stack > 0*/);
+ rt_finalize(sentinel_add(p), false/*gc.no_stack > 0*/);
else
- rt_finalize(cast(List *)p, false/*gc.no_stack > 0*/);
+ rt_finalize(p, false/*gc.no_stack > 0*/);
}
clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
- List *list = cast(List *)p;
-
if (opts.options.mem_stomp)
memset(p, 0xF3, size);
}
pool.freebits.set(bit_i);
if (pool.finals.nbits && pool.finals.testClear(bit_i)) {
if (opts.options.sentinel)
- rt_finalize(cast(List *)sentinel_add(p), false/*gc.no_stack > 0*/);
+ rt_finalize(sentinel_add(p), false/*gc.no_stack > 0*/);
else
- rt_finalize(cast(List *)p, false/*gc.no_stack > 0*/);
+ rt_finalize(p, false/*gc.no_stack > 0*/);
}
clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
- List *list = cast(List *)p;
-
if (opts.options.mem_stomp)
memset(p, 0xF3, size);
bit_i = bit_base + u / 16;
if (pool.freebits.test(bit_i))
{
- List *list = cast(List *)(p + u);
- // avoid unnecessary writes
+ assert ((p+u) >= pool.baseAddr);
+ assert ((p+u) < pool.topAddr);
+ List* list = cast(List*) (p + u);
+ // avoid unnecesary writes (it really saves time)
if (list.next != gc.free_list[bin])
list.next = gc.free_list[bin];
+ if (list.pool != pool)
+ list.pool = pool;
gc.free_list[bin] = list;
}
}
int dummy;
gc.stack_bottom = cast(char*)&dummy;
opts.parse(cstdlib.getenv("D_GC_OPTS"));
+ // If we are going to fork, make sure we have the needed OS support
+ if (opts.options.fork)
+ opts.options.fork = os.HAVE_SHARED && os.HAVE_FORK;
gc.lock = GCLock.classinfo;
gc.inited = 1;
setStackBottom(rt_stackBottom());
lastbin = bin;
}
- size_t capacity; // to figure out where to store the bitmask
+ Pool* pool = void;
+ size_t capacity = void; // to figure out where to store the bitmask
if (bin < B_PAGE)
{
p = gc.free_list[bin];
if (!gc.free_list[bin] && !allocPage(bin))
{
newPool(1); // allocate new pool to find a new page
+ // TODO: hint allocPage() to use the pool we just created
int result = allocPage(bin);
if (!result)
onOutOfMemoryError();
capacity = binsize[bin];
// Return next item from free list
- gc.free_list[bin] = (cast(List*)p).next;
+ List* list = cast(List*) p;
+ assert ((cast(byte*)list) >= list.pool.baseAddr);
+ assert ((cast(byte*)list) < list.pool.topAddr);
+ gc.free_list[bin] = list.next;
+ pool = list.pool;
if (!(attrs & BlkAttr.NO_SCAN))
memset(p + size, 0, capacity - size);
if (opts.options.mem_stomp)
}
else
{
- p = bigAlloc(size);
+ p = bigAlloc(size, pool);
if (!p)
onOutOfMemoryError();
+ assert (pool !is null);
// Round the size up to the number of pages needed to store it
size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
capacity = npages * PAGESIZE;
}
if (attrs)
- {
- Pool *pool = findPool(p);
- assert(pool);
-
setAttr(pool, cast(size_t)(p - pool.baseAddr) / 16, attrs);
- }
+
return p;
}
memset(p + size - pm_bitmask_size, 0xF2,
blk_size - size - pm_bitmask_size);
pool.freePages(pagenum + newsz, psz - newsz);
+ auto new_blk_size = (PAGESIZE * newsz);
+ // update the size cache, assuming that is very likely the
+ // size of this block will be queried in the near future
+ pool.update_cache(p, new_blk_size);
if (has_pm) {
- auto end_of_blk = cast(size_t**)(
- blk_base_addr + (PAGESIZE * newsz) -
- pm_bitmask_size);
+ auto end_of_blk = cast(size_t**)(blk_base_addr +
+ new_blk_size - pm_bitmask_size);
*end_of_blk = pm_bitmask;
}
return p;
- pm_bitmask_size);
memset(pool.pagetable + pagenum +
psz, B_PAGEPLUS, newsz - psz);
+ auto new_blk_size = (PAGESIZE * newsz);
+ // update the size cache, assuming that is very
+ // likely the size of this block will be queried in
+ // the near future
+ pool.update_cache(p, new_blk_size);
if (has_pm) {
auto end_of_blk = cast(size_t**)(
- blk_base_addr +
- (PAGESIZE * newsz) -
+ blk_base_addr + new_blk_size -
pm_bitmask_size);
*end_of_blk = pm_bitmask;
}
memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
gc.p_cache = null;
gc.size_cache = 0;
+ // update the size cache, assuming that is very likely the size of this
+ // block will be queried in the near future
+ pool.update_cache(p, new_size);
if (has_pm) {
new_size -= size_t.sizeof;
if (opts.options.mem_stomp)
memset(p, 0xF2, npages * PAGESIZE);
pool.freePages(pagenum, npages);
+ // just in case we were caching this pointer
+ pool.clear_cache(p);
}
else
{
// Add to free list
- List *list = cast(List*)p;
+ List* list = cast(List*) p;
if (opts.options.mem_stomp)
memset(p, 0xF2, binsize[bin]);
list.next = gc.free_list[bin];
+ list.pool = pool;
gc.free_list[bin] = list;
}
}
if (bin < B_PAGE)
{
// Check that p is not on a free list
- List *list;
-
- for (list = gc.free_list[bin]; list; list = list.next)
+ for (List* list = gc.free_list[bin]; list; list = list.next)
{
assert(cast(void*)list != p);
}
for (n = 0; n < B_PAGE; n++)
{
- for (List *list = gc.free_list[n]; list; list = list.next)
+ for (List* list = gc.free_list[n]; list; list = list.next)
flsize += binsize[n];
}
size_t cached_size;
void* cached_ptr;
- void clear_cache()
+ void clear_cache(void* ptr = null)
{
- this.cached_ptr = null;
- this.cached_size = 0;
+ if (ptr is null || ptr is this.cached_ptr) {
+ this.cached_ptr = null;
+ this.cached_size = 0;
+ }
+ }
+
+ void update_cache(void* ptr, size_t size)
+ {
+ this.cached_ptr = ptr;
+ this.cached_size = size;
}
void initialize(size_t npages)
npages = 0;
poolsize = 0;
}
- //assert(baseAddr);
topAddr = baseAddr + poolsize;
- mark.alloc(cast(size_t)poolsize / 16);
- scan.alloc(cast(size_t)poolsize / 16);
- freebits.alloc(cast(size_t)poolsize / 16);
- noscan.alloc(cast(size_t)poolsize / 16);
+ size_t nbits = cast(size_t)poolsize / 16;
+
+ // if the GC will run in parallel in a fork()ed process, we need to
+ // share the mark bits
+ os.Vis vis = os.Vis.PRIV;
+ if (opts.options.fork)
+ vis = os.Vis.SHARED;
+ mark.alloc(nbits, vis); // shared between mark and sweep
+ freebits.alloc(nbits, vis); // ditto
+ scan.alloc(nbits); // only used in the mark phase
+ finals.alloc(nbits); // mark phase *MUST* have a snapshot
+ noscan.alloc(nbits); // ditto
pagetable = cast(ubyte*) cstdlib.malloc(npages);
if (!pagetable)
if (pagetable)
cstdlib.free(pagetable);
- mark.Dtor();
+ os.Vis vis = os.Vis.PRIV;
+ if (opts.options.fork)
+ vis = os.Vis.SHARED;
+ mark.Dtor(vis);
+ freebits.Dtor(vis);
scan.Dtor();
- freebits.Dtor();
finals.Dtor();
noscan.Dtor();
}
void sentinel_Invariant(void *p)
{
- assert(*sentinel_pre(p) == SENTINEL_PRE);
- assert(*sentinel_post(p) == SENTINEL_POST);
+ if (*sentinel_pre(p) != SENTINEL_PRE ||
+ *sentinel_post(p) != SENTINEL_POST)
+ cstdlib.abort();
}