struct List
{
- List *next;
+ List* next;
+ Pool* pool;
}
dynarray.DynArray!(void*) roots;
dynarray.DynArray!(Range) ranges;
- dynarray.DynArray!(Pool) pools;
+ dynarray.DynArray!(Pool*) pools;
Stats stats;
}
if (i == 0)
assert(gc.min_addr == pool.baseAddr);
if (i + 1 < gc.pools.length)
- assert(*pool < gc.pools[i + 1]);
+ assert(*pool < *gc.pools[i + 1]);
else if (i + 1 == gc.pools.length)
assert(gc.max_addr == pool.topAddr);
}
assert(gc.ranges[i].pbot <= gc.ranges[i].ptop);
}
- for (size_t i = 0; i < B_PAGE; i++)
- for (List *list = gc.free_list[i]; list; list = list.next)
- {
+ for (size_t i = 0; i < B_PAGE; i++) {
+ for (List *list = gc.free_list[i]; list; list = list.next) {
+ assert (list.pool !is null);
+ auto p = cast(byte*) list;
+ assert (p >= list.pool.baseAddr);
+ assert (p < list.pool.topAddr);
}
+ }
}
return true;
}
if (pn < pool.npages)
continue;
pool.Dtor();
+ cstdlib.free(pool);
gc.pools.remove_at(n);
n--;
}
* Allocate a chunk of memory that is larger than a page.
* Return null if out of memory.
*/
-void *bigAlloc(size_t size)
+void* bigAlloc(size_t size, out Pool* pool)
{
- Pool* pool;
size_t npages;
size_t n;
size_t pn;
npages = n;
}
- Pool p;
- p.initialize(npages);
- if (!p.baseAddr)
+ auto pool = cast(Pool*) cstdlib.calloc(1, Pool.sizeof);
+ if (pool is null)
+ return null;
+ pool.initialize(npages);
+ if (!pool.baseAddr)
{
- p.Dtor();
+ pool.Dtor();
return null;
}
- Pool* pool = gc.pools.insert_sorted(p);
- if (pool)
- {
- gc.min_addr = gc.pools[0].baseAddr;
- gc.max_addr = gc.pools[gc.pools.length - 1].topAddr;
+ auto inserted_pool = *gc.pools.insert_sorted!("*a < *b")(pool);
+ if (inserted_pool is null) {
+ pool.Dtor();
+ return null;
}
+ assert (inserted_pool is pool);
+ gc.min_addr = gc.pools[0].baseAddr;
+ gc.max_addr = gc.pools[gc.pools.length - 1].topAddr;
return pool;
}
// Convert page to free list
size_t size = binsize[bin];
- List **b = &gc.free_list[bin];
+ auto list_head = &gc.free_list[bin];
p = pool.baseAddr + pn * PAGESIZE;
ptop = p + PAGESIZE;
for (; p < ptop; p += size)
{
- (cast(List *)p).next = *b;
- *b = cast(List *)p;
+ List* l = cast(List *) p;
+ l.next = *list_head;
+ l.pool = pool;
+ *list_head = l;
}
return 1;
}
size_t fullcollect(void *stackTop)
{
debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n");
+
+ // we always need to stop the world to make threads save the CPU registers
+ // in the stack and prepare themselves for thread_scanAll()
+ thread_suspendAll();
+ gc.stats.world_stopped();
+
+ if (opts.options.fork) {
+ os.pid_t child_pid = os.fork();
+ assert (child_pid != -1); // don't accept errors in non-release mode
+ switch (child_pid) {
+ case -1: // if fork() fails, fallback to stop-the-world
+ opts.options.fork = false;
+ break;
+ case 0: // child process (i.e. the collectors mark phase)
+ mark(stackTop);
+ cstdlib.exit(0);
+ break; // bogus, will never reach here
+ default: // parent process (i.e. the mutator)
+ // start the world again and wait for the mark phase to finish
+ thread_resumeAll();
+ gc.stats.world_started();
+ int status = void;
+ os.pid_t wait_pid = os.waitpid(child_pid, &status, 0);
+ assert (wait_pid == child_pid);
+ return sweep();
+ }
+
+ }
+
+ // if we reach here, we are using the standard stop-the-world collection
mark(stackTop);
+ thread_resumeAll();
+ gc.stats.world_started();
+
return sweep();
}
{
debug(COLLECT_PRINTF) printf("\tmark()\n");
- thread_suspendAll();
- gc.stats.world_stopped();
-
gc.p_cache = null;
gc.size_cache = 0;
{
for (List *list = gc.free_list[n]; list; list = list.next)
{
- Pool* pool = findPool(list);
- assert(pool);
- pool.freebits.set(cast(size_t)(cast(byte*)list - pool.baseAddr) / 16);
+ Pool* pool = list.pool;
+ auto ptr = cast(byte*) list;
+ assert (pool);
+ assert (pool.baseAddr <= ptr);
+ assert (ptr < pool.topAddr);
+ size_t bit_i = cast(size_t)(ptr - pool.baseAddr) / 16;
+ pool.freebits.set(bit_i);
}
}
}
}
}
-
- thread_resumeAll();
- gc.stats.world_started();
}
bit_i = bit_base + u / 16;
if (pool.freebits.test(bit_i))
{
- List *list = cast(List *)(p + u);
- // avoid unnecessary writes
+ assert ((p+u) >= pool.baseAddr);
+ assert ((p+u) < pool.topAddr);
+ List* list = cast(List*) (p + u);
+ // avoid unnecesary writes (it really saves time)
if (list.next != gc.free_list[bin])
list.next = gc.free_list[bin];
+ if (list.pool != pool)
+ list.pool = pool;
gc.free_list[bin] = list;
}
}
int dummy;
gc.stack_bottom = cast(char*)&dummy;
opts.parse(cstdlib.getenv("D_GC_OPTS"));
+ // If we are going to fork, make sure we have the needed OS support
+ if (opts.options.fork)
+ opts.options.fork = os.HAVE_SHARED && os.HAVE_FORK;
gc.lock = GCLock.classinfo;
gc.inited = 1;
setStackBottom(rt_stackBottom());
lastbin = bin;
}
- size_t capacity; // to figure out where to store the bitmask
+ Pool* pool = void;
+ size_t capacity = void; // to figure out where to store the bitmask
if (bin < B_PAGE)
{
p = gc.free_list[bin];
capacity = binsize[bin];
// Return next item from free list
- gc.free_list[bin] = (cast(List*)p).next;
+ List* list = cast(List*) p;
+ assert ((cast(byte*)list) >= list.pool.baseAddr);
+ assert ((cast(byte*)list) < list.pool.topAddr);
+ gc.free_list[bin] = list.next;
+ pool = list.pool;
if (!(attrs & BlkAttr.NO_SCAN))
memset(p + size, 0, capacity - size);
if (opts.options.mem_stomp)
}
else
{
- p = bigAlloc(size);
+ p = bigAlloc(size, pool);
if (!p)
onOutOfMemoryError();
+ assert (pool !is null);
// Round the size up to the number of pages needed to store it
size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
capacity = npages * PAGESIZE;
}
if (attrs)
- {
- Pool *pool = findPool(p);
- assert(pool);
-
setAttr(pool, cast(size_t)(p - pool.baseAddr) / 16, attrs);
- }
+
return p;
}
memset(p, 0xF2, binsize[bin]);
list.next = gc.free_list[bin];
+ list.pool = pool;
gc.free_list[bin] = list;
}
}
//assert(baseAddr);
topAddr = baseAddr + poolsize;
- mark.alloc(cast(size_t)poolsize / 16);
- scan.alloc(cast(size_t)poolsize / 16);
- freebits.alloc(cast(size_t)poolsize / 16);
- noscan.alloc(cast(size_t)poolsize / 16);
+ size_t nbits = cast(size_t)poolsize / 16;
+
+ // if the GC will run in parallel in a fork()ed process, we need to
+ // share the mark bits
+ os.Vis vis = os.Vis.PRIV;
+ if (opts.options.fork)
+ vis = os.Vis.SHARED;
+ mark.alloc(nbits, vis); // shared between mark and sweep
+ freebits.alloc(nbits, vis); // ditto
+ scan.alloc(nbits); // only used in the mark phase
+ finals.alloc(nbits); // mark phase *MUST* have a snapshot
+ noscan.alloc(nbits); // ditto
pagetable = cast(ubyte*) cstdlib.malloc(npages);
if (!pagetable)
if (pagetable)
cstdlib.free(pagetable);
- mark.Dtor();
+ os.Vis vis = os.Vis.PRIV;
+ if (opts.options.fork)
+ vis = os.Vis.SHARED;
+ mark.Dtor(vis);
+ freebits.Dtor(vis);
scan.Dtor();
- freebits.Dtor();
finals.Dtor();
noscan.Dtor();
}
void sentinel_Invariant(void *p)
{
- assert(*sentinel_pre(p) == SENTINEL_PRE);
- assert(*sentinel_post(p) == SENTINEL_POST);
+ if (*sentinel_pre(p) != SENTINEL_PRE ||
+ *sentinel_post(p) != SENTINEL_POST)
+ cstdlib.abort();
}