import rt.gc.cdgc.bits: GCBits;
import rt.gc.cdgc.stats: GCStats, Stats;
import dynarray = rt.gc.cdgc.dynarray;
-import alloc = rt.gc.cdgc.alloc;
+import os = rt.gc.cdgc.os;
import opts = rt.gc.cdgc.opts;
import cstdlib = tango.stdc.stdlib;
import cstring = tango.stdc.string;
+import cstdio = tango.stdc.stdio;
+debug(COLLECT_PRINTF) alias cstdio.printf printf;
/*
* This is a small optimization that proved it's usefulness. For small chunks
return !opts.options.conservative && !(attrs & BlkAttr.NO_SCAN);
}
-private
+private size_t round_up(size_t n, size_t to)
{
+ return (n + to - 1) / to;
+}
- extern (C) void* rt_stackBottom();
- extern (C) void* rt_stackTop();
-
- extern (C) void rt_finalize( void* p, bool det = true );
-
+private
+{
alias void delegate(Object) DEvent;
- extern (C) void rt_attachDisposeEvent(Object h, DEvent e);
- extern (C) bool rt_detachDisposeEvent(Object h, DEvent e);
-
-
alias void delegate( void*, void* ) scanFn;
+ enum { OPFAIL = ~cast(size_t)0 }
- extern (C) void rt_scanStaticData( scanFn scan );
-
- extern (C) bool thread_needLock();
- extern (C) void thread_suspendAll();
- extern (C) void thread_resumeAll();
+ extern (C)
+ {
+ version (DigitalMars) version(OSX)
+ oid _d_osx_image_init();
- extern (C) void thread_scanAll( scanFn fn, void* curStackTop = null );
+ void* rt_stackBottom();
+ void* rt_stackTop();
+ void rt_finalize( void* p, bool det = true );
+ void rt_attachDisposeEvent(Object h, DEvent e);
+ bool rt_detachDisposeEvent(Object h, DEvent e);
+ void rt_scanStaticData( scanFn scan );
- extern (C) void onOutOfMemoryError();
+ void thread_init();
+ bool thread_needLock();
+ void thread_suspendAll();
+ void thread_resumeAll();
+ void thread_scanAll( scanFn fn, void* curStackTop = null );
- enum
- {
- OPFAIL = ~cast(size_t)0
+ void onOutOfMemoryError();
}
}
struct List
{
- List *next;
+ List* next;
+ Pool* pool;
}
/// Turn off collections if > 0
int disabled;
+ // PID of the fork()ed process doing the mark() (0 if is not running)
+ int mark_proc_pid;
+
/// min(pool.baseAddr)
byte *min_addr;
/// max(pool.topAddr)
byte *max_addr;
+ /// Total heap memory
+ size_t total_mem;
+ /// Free heap memory
+ size_t free_mem;
+
/// Free list for each size
List*[B_MAX] free_list;
dynarray.DynArray!(void*) roots;
dynarray.DynArray!(Range) ranges;
- dynarray.DynArray!(Pool) pools;
+ dynarray.DynArray!(Pool*) pools;
Stats stats;
}
+// call locked if necessary
+private T locked(T, alias Code)()
+{
+ if (thread_needLock())
+ synchronized (gc.lock) return Code();
+ else
+ return Code();
+}
+
private GC* gc;
+
+bool collect_in_progress()
+{
+ return gc.mark_proc_pid != 0;
+}
+
+
bool Invariant()
{
assert (gc !is null);
if (gc.inited) {
+ size_t total_mem = 0;
+ size_t free_mem = 0;
for (size_t i = 0; i < gc.pools.length; i++) {
Pool* pool = gc.pools[i];
pool.Invariant();
if (i == 0)
assert(gc.min_addr == pool.baseAddr);
if (i + 1 < gc.pools.length)
- assert(*pool < gc.pools[i + 1]);
+ assert(*pool < *gc.pools[i + 1]);
else if (i + 1 == gc.pools.length)
assert(gc.max_addr == pool.topAddr);
+ total_mem += pool.npages * PAGESIZE;
+ for (size_t pn = 0; pn < pool.npages; ++pn)
+ if (pool.pagetable[pn] == B_FREE)
+ free_mem += PAGESIZE;
}
gc.roots.Invariant();
assert(gc.ranges[i].pbot <= gc.ranges[i].ptop);
}
- for (size_t i = 0; i < B_PAGE; i++)
- for (List *list = gc.free_list[i]; list; list = list.next)
- {
+ for (size_t i = 0; i < B_PAGE; i++) {
+ for (List *list = gc.free_list[i]; list; list = list.next) {
+ auto pool = list.pool;
+ assert (pool !is null);
+ auto p = cast(byte*) list;
+ assert (p >= pool.baseAddr);
+ assert (p < pool.topAddr);
+ assert (pool.freebits.test((p - pool.baseAddr) / 16));
+ free_mem += binsize[i];
}
+ }
+ assert (gc.total_mem == total_mem);
+ assert (gc.free_mem == free_mem);
}
return true;
}
* Return null if not in a Pool.
* Assume pools is sorted.
*/
-Pool *findPool(void *p)
-{
- if (p >= gc.min_addr && p < gc.max_addr)
- {
- if (gc.pools.length == 1)
- {
- return gc.pools[0];
- }
-
- for (size_t i = 0; i < gc.pools.length; i++)
- {
- Pool* pool = gc.pools[i];
- if (p < pool.topAddr)
- {
- if (pool.baseAddr <= p)
- return pool;
- break;
- }
- }
- }
- return null;
-}
-
-
-/**
- * Find base address of block containing pointer p.
- * Returns null if not a gc'd pointer
- */
-void* findBase(void *p)
+Pool* findPool(void* p)
{
- Pool *pool;
-
- pool = findPool(p);
- if (pool)
- {
- size_t offset = cast(size_t)(p - pool.baseAddr);
- size_t pn = offset / PAGESIZE;
- Bins bin = cast(Bins)pool.pagetable[pn];
-
- // Adjust bit to be at start of allocated memory block
- if (bin <= B_PAGE)
- {
- return pool.baseAddr + (offset & notbinsize[bin]);
- }
- else if (bin == B_PAGEPLUS)
- {
- do
- {
- --pn, offset -= PAGESIZE;
- } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
-
- return pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
- }
+ if (p < gc.min_addr || p >= gc.max_addr)
+ return null;
+ if (gc.pools.length == 0)
+ return null;
+ if (gc.pools.length == 1)
+ return gc.pools[0];
+ /// The pooltable[] is sorted by address, so do a binary search
+ size_t low = 0;
+ size_t high = gc.pools.length - 1;
+ while (low <= high) {
+ size_t mid = (low + high) / 2;
+ auto pool = gc.pools[mid];
+ if (p < pool.baseAddr)
+ high = mid - 1;
+ else if (p >= pool.topAddr)
+ low = mid + 1;
else
- {
- // we are in a B_FREE page
- return null;
- }
+ return pool;
}
+ // Not found
return null;
}
/**
- * Find size of pointer p.
- * Returns 0 if not a gc'd pointer
- */
-size_t findSize(void *p)
-{
- Pool* pool;
- size_t size = 0;
-
- pool = findPool(p);
- if (pool)
- {
- size_t pagenum;
- Bins bin;
-
- pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
- bin = cast(Bins)pool.pagetable[pagenum];
- size = binsize[bin];
- if (bin == B_PAGE)
- {
- ubyte* pt;
- size_t i;
-
- pt = &pool.pagetable[0];
- for (i = pagenum + 1; i < pool.npages; i++)
- {
- if (pt[i] != B_PAGEPLUS)
- break;
- }
- size = (i - pagenum) * PAGESIZE;
- }
- }
- return size;
-}
-
-
-/**
- *
+ * Determine the base address of the block containing p. If p is not a gc
+ * allocated pointer, return null.
*/
BlkInfo getInfo(void* p)
{
- Pool* pool;
+ assert (p !is null);
+ Pool* pool = findPool(p);
+ if (pool is null)
+ return BlkInfo.init;
BlkInfo info;
-
- pool = findPool(p);
- if (pool)
- {
- size_t offset = cast(size_t)(p - pool.baseAddr);
- size_t pn = offset / PAGESIZE;
- Bins bin = cast(Bins)pool.pagetable[pn];
-
- ////////////////////////////////////////////////////////////////////
- // findAddr
- ////////////////////////////////////////////////////////////////////
-
- if (bin <= B_PAGE)
- {
- info.base = pool.baseAddr + (offset & notbinsize[bin]);
- }
- else if (bin == B_PAGEPLUS)
- {
- do
- {
- --pn, offset -= PAGESIZE;
- }
- while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
-
- info.base = pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
-
- // fix bin for use by size calc below
- bin = cast(Bins)pool.pagetable[pn];
- }
-
- ////////////////////////////////////////////////////////////////////
- // findSize
- ////////////////////////////////////////////////////////////////////
-
- info.size = binsize[bin];
- if (bin == B_PAGE)
- {
- ubyte* pt;
- size_t i;
-
- pt = &pool.pagetable[0];
- for (i = pn + 1; i < pool.npages; i++)
- {
- if (pt[i] != B_PAGEPLUS)
- break;
- }
- info.size = (i - pn) * PAGESIZE;
+ info.base = pool.findBase(p);
+ if (info.base is null)
+ return BlkInfo.init;
+ info.size = pool.findSize(info.base);
+ size_t bit_i = (info.base - pool.baseAddr) / 16;
+ info.attr = getAttr(pool, bit_i);
+ if (has_pointermap(info.attr)) {
+ info.size -= size_t.sizeof; // PointerMap bitmask
+ // Points to the PointerMap bitmask pointer, not user data
+ if (p >= (info.base + info.size)) {
+ return BlkInfo.init;
}
-
- ////////////////////////////////////////////////////////////////////
- // getAttr
- ////////////////////////////////////////////////////////////////////
-
- info.attr = getAttr(pool, cast(size_t)(offset / 16));
- if (!(info.attr & BlkAttr.NO_SCAN))
- info.size -= (size_t*).sizeof; // bitmask
+ }
+ if (opts.options.sentinel) {
+ info.base = sentinel_add(info.base);
+ // points to sentinel data, not user data
+ if (p < info.base || p >= sentinel_post(info.base))
+ return BlkInfo.init;
+ info.size -= SENTINEL_EXTRA;
}
return info;
}
/**
* Compute bin for size.
*/
-static Bins findBin(size_t size)
+Bins findBin(size_t size)
{
Bins bin;
if (size <= 256)
* Mark all memory in the pool as B_FREE.
* Return the actual number of bytes reserved or 0 on error.
*/
-size_t reserveNoSync(size_t size)
+size_t reserve(size_t size)
{
assert(size != 0);
- size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
+ size_t npages = round_up(size, PAGESIZE);
Pool* pool = newPool(npages);
if (!pool)
/**
* Minimizes physical memory usage by returning free pools to the OS.
+ *
+ * If full is false, keep some pools alive if the resulting free memory would
+ * be too small.
*/
-void minimizeNoSync()
+void minimize(bool full = true)
{
- size_t n;
- size_t pn;
- Pool* pool;
+ // The shared mark bits of the freed pool might be used by the mark process
+ if (collect_in_progress())
+ return;
- for (n = 0; n < gc.pools.length; n++)
+ if (gc.pools.length == 0)
+ return;
+
+ for (size_t n = 0; n < gc.pools.length; n++)
{
- pool = gc.pools[n];
+ Pool* pool = gc.pools[n];
+ size_t pn;
for (pn = 0; pn < pool.npages; pn++)
{
if (cast(Bins)pool.pagetable[pn] != B_FREE)
}
if (pn < pool.npages)
continue;
+ // Free pool
+ size_t pool_size = pool.npages * PAGESIZE;
+ if (!full) {
+ double percent_free = (gc.free_mem - pool_size) * 100.0 /
+ (gc.total_mem - pool_size);
+ if (percent_free < opts.options.min_free)
+ continue; // not enough free, don't remove this pool
+ }
+ gc.total_mem -= pool_size;
+ gc.free_mem -= pool_size;
pool.Dtor();
+ cstdlib.free(pool);
gc.pools.remove_at(n);
n--;
}
* Allocate a chunk of memory that is larger than a page.
* Return null if out of memory.
*/
-void *bigAlloc(size_t size)
+void* bigAlloc(size_t npages, out Pool* pool, size_t* pn, bool* collected)
{
- Pool* pool;
- size_t npages;
- size_t n;
- size_t pn;
- size_t freedpages;
- void* p;
- int state;
+ *collected = false;
+ // This code could use some refinement when repeatedly
+ // allocating very large arrays.
- npages = (size + PAGESIZE - 1) / PAGESIZE;
-
- for (state = 0; ; )
+ void* find_block()
{
- // This code could use some refinement when repeatedly
- // allocating very large arrays.
-
- for (n = 0; n < gc.pools.length; n++)
+ for (size_t n = 0; n < gc.pools.length; n++)
{
pool = gc.pools[n];
- pn = pool.allocPages(npages);
- if (pn != OPFAIL)
- goto L1;
+ *pn = pool.allocPages(npages);
+ if (*pn != OPFAIL)
+ return pool.baseAddr + *pn * PAGESIZE;
}
+ return null;
+ }
- // Failed
- switch (state)
- {
- case 0:
- if (gc.disabled)
- {
- state = 1;
- continue;
- }
- // Try collecting
- freedpages = fullcollectshell();
- if (freedpages >= gc.pools.length * ((POOLSIZE / PAGESIZE) / 4))
- {
- state = 1;
- continue;
- }
- // Release empty pools to prevent bloat
- minimize();
- // Allocate new pool
- pool = newPool(npages);
- if (!pool)
- {
- state = 2;
- continue;
- }
- pn = pool.allocPages(npages);
- assert(pn != OPFAIL);
- goto L1;
- case 1:
- // Release empty pools to prevent bloat
- minimize();
- // Allocate new pool
- pool = newPool(npages);
- if (!pool)
- goto Lnomemory;
- pn = pool.allocPages(npages);
- assert(pn != OPFAIL);
- goto L1;
- case 2:
- goto Lnomemory;
- default:
- assert(false);
- }
+ void* alloc_more()
+ {
+ // Allocate new pool
+ pool = newPool(npages);
+ if (!pool)
+ return null; // let malloc handle the error
+ *pn = pool.allocPages(npages);
+ assert(*pn != OPFAIL);
+ return pool.baseAddr + *pn * PAGESIZE;
}
- L1:
- pool.pagetable[pn] = B_PAGE;
- if (npages > 1)
- memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
- p = pool.baseAddr + pn * PAGESIZE;
- memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
- if (opts.options.mem_stomp)
- memset(p, 0xF1, size);
- return p;
+ if (void* p = find_block())
+ return p;
+
+ if (gc.disabled)
+ return alloc_more();
- Lnomemory:
- return null; // let mallocNoSync handle the error
+ // Try collecting
+ size_t freedpages = fullcollectshell();
+ *collected = true;
+ if (freedpages >= npages) {
+ if (void* p = find_block())
+ return p;
+ }
+
+ return alloc_more();
}
npages = n;
}
- Pool p;
- p.initialize(npages);
- if (!p.baseAddr)
+ auto pool = cast(Pool*) cstdlib.calloc(1, Pool.sizeof);
+ if (pool is null)
+ return null;
+ pool.initialize(npages);
+ if (!pool.baseAddr)
{
- p.Dtor();
+ pool.Dtor();
return null;
}
- Pool* pool = gc.pools.insert_sorted(p);
- if (pool)
- {
- gc.min_addr = gc.pools[0].baseAddr;
- gc.max_addr = gc.pools[gc.pools.length - 1].topAddr;
+ auto inserted_pool = *gc.pools.insert_sorted!("*a < *b")(pool);
+ if (inserted_pool is null) {
+ pool.Dtor();
+ return null;
}
+ assert (inserted_pool is pool);
+ gc.min_addr = gc.pools[0].baseAddr;
+ gc.max_addr = gc.pools[gc.pools.length - 1].topAddr;
+ size_t pool_size = pool.topAddr - pool.baseAddr;
+ gc.total_mem += pool_size;
+ gc.free_mem += pool_size;
return pool;
}
int allocPage(Bins bin)
{
Pool* pool;
- size_t n;
size_t pn;
- byte* p;
- byte* ptop;
- for (n = 0; n < gc.pools.length; n++)
+ for (size_t n = 0; n < gc.pools.length; n++)
{
pool = gc.pools[n];
pn = pool.allocPages(1);
// Convert page to free list
size_t size = binsize[bin];
- List **b = &gc.free_list[bin];
+ auto list_head = &gc.free_list[bin];
- p = pool.baseAddr + pn * PAGESIZE;
- ptop = p + PAGESIZE;
+ byte* p = pool.baseAddr + pn * PAGESIZE;
+ byte* ptop = p + PAGESIZE;
+ size_t bit_i = pn * (PAGESIZE / 16);
+ pool.freebits.set_group(bit_i, PAGESIZE / 16);
for (; p < ptop; p += size)
{
- (cast(List *)p).next = *b;
- *b = cast(List *)p;
+ List* l = cast(List *) p;
+ l.next = *list_head;
+ l.pool = pool;
+ *list_head = l;
}
return 1;
}
/**
- * Marks a range of memory using the conservative bit mask. Used for
- * the stack, for the data segment, and additional memory ranges.
- */
-void mark_conservative(void* pbot, void* ptop)
-{
- mark(pbot, ptop, PointerMap.init.bits.ptr);
-}
-
-
-/**
- * Search a range of memory values and mark any pointers into the GC pool.
+ * Search a range of memory values and mark any pointers into the GC pool using
+ * type information (bitmask of pointer locations).
*/
-void mark(void *pbot, void *ptop, size_t* pm_bitmask)
+void mark_range(void *pbot, void *ptop, size_t* pm_bitmask)
{
// TODO: make our own assert because assert uses the GC
assert (pbot <= ptop);
void **p1 = cast(void **)pbot;
void **p2 = cast(void **)ptop;
size_t pcache = 0;
- uint changes = 0;
+ bool changes = false;
size_t type_size = pm_bitmask[0];
size_t* pm_bits = pm_bitmask + 1;
+ bool has_type_info = type_size != 1 || pm_bits[0] != 1 || pm_bits[1] != 0;
//printf("marking range: %p -> %p\n", pbot, ptop);
for (; p1 + type_size <= p2; p1 += type_size) {
for (size_t n = 0; n < type_size; n++) {
// scan bit set for this word
- if (!(pm_bits[n / BITS_PER_WORD] & (1 << (n % BITS_PER_WORD))))
+ if (has_type_info &&
+ !(pm_bits[n / BITS_PER_WORD] & (1 << (n % BITS_PER_WORD))))
continue;
void* p = *(p1 + n);
if (pool)
{
size_t offset = cast(size_t)(p - pool.baseAddr);
- size_t bit_i;
+ size_t bit_i = void;
size_t pn = offset / PAGESIZE;
Bins bin = cast(Bins)pool.pagetable[pn];
+ // Cache B_PAGE, B_PAGEPLUS and B_FREE lookups
+ if (bin >= B_PAGE)
+ pcache = cast(size_t)p & ~(PAGESIZE-1);
+
// Adjust bit to be at start of allocated memory block
if (bin <= B_PAGE)
- bit_i = (offset & notbinsize[bin]) >> 4;
+ bit_i = (offset & notbinsize[bin]) / 16;
else if (bin == B_PAGEPLUS)
{
do
while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
bit_i = pn * (PAGESIZE / 16);
}
- else
- {
- // Don't mark bits in B_FREE pages
+ else // Don't mark bits in B_FREE pages
continue;
- }
-
- if (bin >= B_PAGE) // Cache B_PAGE and B_PAGEPLUS lookups
- pcache = cast(size_t)p & ~(PAGESIZE-1);
if (!pool.mark.test(bit_i))
{
if (!pool.noscan.test(bit_i))
{
pool.scan.set(bit_i);
- changes = 1;
+ changes = true;
}
}
}
*/
size_t fullcollect(void *stackTop)
{
- size_t n;
- Pool* pool;
-
debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n");
+ // If eager allocation is used, we need to check first if there is a mark
+ // process running. If there isn't, we start a new one (see the next code
+ // block). If there is, we check if it's still running or already finished.
+ // If it's still running, we tell the caller process no memory has been
+ // recovered (it will allocated more to fulfill the current request). If
+ // the mark process is done, we lunch the sweep phase and hope enough
+ // memory is freed (if that not the case, the caller will allocate more
+ // memory and the next time it's exhausted it will run a new collection).
+ if (opts.options.eager_alloc) {
+ if (collect_in_progress()) {
+ os.WRes r = os.wait_pid(gc.mark_proc_pid, false); // don't block
+ assert (r != os.WRes.ERROR);
+ switch (r) {
+ case os.WRes.DONE:
+ debug(COLLECT_PRINTF) printf("\t\tmark proc DONE\n");
+ gc.mark_proc_pid = 0;
+ return sweep();
+ case os.WRes.RUNNING:
+ debug(COLLECT_PRINTF) printf("\t\tmark proc RUNNING\n");
+ return 0;
+ case os.WRes.ERROR:
+ debug(COLLECT_PRINTF) printf("\t\tmark proc ERROR\n");
+ disable_fork(); // Try to keep going without forking
+ break;
+ }
+ }
+ }
+
+ // We always need to stop the world to make threads save the CPU registers
+ // in the stack and prepare themselves for thread_scanAll()
thread_suspendAll();
gc.stats.world_stopped();
- gc.p_cache = null;
- gc.size_cache = 0;
+ // If forking is enabled, we fork() and start a new mark phase in the
+ // child. The parent process will tell the caller that no memory could be
+ // recycled if eager allocation is used, allowing the mutator to keep going
+ // almost instantly (at the expense of more memory consumption because
+ // a new allocation will be triggered to fulfill the current request). If
+ // no eager allocation is used, the parent will wait for the mark phase to
+ // finish before returning control to the mutator, but other threads are
+ // restarted and may run in parallel with the mark phase (unless they
+ // allocate or use the GC themselves, in which case the global GC lock will
+ // stop them).
+ if (opts.options.fork) {
+ cstdio.fflush(null); // avoid duplicated FILE* output
+ os.pid_t child_pid = os.fork();
+ assert (child_pid != -1); // don't accept errors in non-release mode
+ switch (child_pid) {
+ case -1: // if fork() fails, fall-back to stop-the-world
+ disable_fork();
+ break;
+ case 0: // child process (i.e. the collectors mark phase)
+ mark(stackTop);
+ cstdlib.exit(0);
+ break; // bogus, will never reach here
+ default: // parent process (i.e. the mutator)
+ // start the world again and wait for the mark phase to finish
+ thread_resumeAll();
+ gc.stats.world_started();
+ if (opts.options.eager_alloc) {
+ gc.mark_proc_pid = child_pid;
+ return 0;
+ }
+ os.WRes r = os.wait_pid(child_pid); // block until it finishes
+ assert (r == os.WRes.DONE);
+ debug(COLLECT_PRINTF) printf("\t\tmark proc DONE (block)\n");
+ if (r == os.WRes.DONE)
+ return sweep();
+ debug(COLLECT_PRINTF) printf("\tmark() proc ERROR\n");
+ // If there was some error, try to keep going without forking
+ disable_fork();
+ // Re-suspend the threads to do the marking in this process
+ thread_suspendAll();
+ gc.stats.world_stopped();
+ }
- gc.any_changes = false;
- for (n = 0; n < gc.pools.length; n++)
- {
- pool = gc.pools[n];
- pool.mark.zero();
- pool.scan.zero();
- pool.freebits.zero();
}
- // Mark each free entry, so it doesn't get scanned
- for (n = 0; n < B_PAGE; n++)
- {
- for (List *list = gc.free_list[n]; list; list = list.next)
- {
- pool = findPool(list);
- assert(pool);
- pool.freebits.set(cast(size_t)(cast(byte*)list - pool.baseAddr) / 16);
- }
- }
+ // If we reach here, we are using the standard stop-the-world collection,
+ // either because fork was disabled in the first place, or because it was
+ // disabled because of some error.
+ mark(stackTop);
+ thread_resumeAll();
+ gc.stats.world_started();
- for (n = 0; n < gc.pools.length; n++)
+ return sweep();
+}
+
+
+/**
+ *
+ */
+void mark(void *stackTop)
+{
+ debug(COLLECT_PRINTF) printf("\tmark()\n");
+
+ gc.any_changes = false;
+
+ for (size_t n = 0; n < gc.pools.length; n++)
{
- pool = gc.pools[n];
+ Pool* pool = gc.pools[n];
pool.mark.copy(&pool.freebits);
+ pool.scan.zero();
}
- void mark_conservative_dg(void* pbot, void* ptop)
+ /// Marks a range of memory in conservative mode.
+ void mark_conservative_range(void* pbot, void* ptop)
{
- mark_conservative(pbot, ptop);
+ mark_range(pbot, ptop, PointerMap.init.bits.ptr);
}
- rt_scanStaticData(&mark_conservative_dg);
+ rt_scanStaticData(&mark_conservative_range);
if (!gc.no_stack)
{
// Scan stacks and registers for each paused thread
- thread_scanAll(&mark_conservative_dg, stackTop);
+ thread_scanAll(&mark_conservative_range, stackTop);
}
// Scan roots
debug(COLLECT_PRINTF) printf("scan roots[]\n");
- mark_conservative(gc.roots.ptr, gc.roots.ptr + gc.roots.length);
+ mark_conservative_range(gc.roots.ptr, gc.roots.ptr + gc.roots.length);
// Scan ranges
debug(COLLECT_PRINTF) printf("scan ranges[]\n");
- for (n = 0; n < gc.ranges.length; n++)
+ for (size_t n = 0; n < gc.ranges.length; n++)
{
debug(COLLECT_PRINTF) printf("\t%x .. %x\n", gc.ranges[n].pbot, gc.ranges[n].ptop);
- mark_conservative(gc.ranges[n].pbot, gc.ranges[n].ptop);
+ mark_conservative_range(gc.ranges[n].pbot, gc.ranges[n].ptop);
}
debug(COLLECT_PRINTF) printf("\tscan heap\n");
while (gc.any_changes)
{
gc.any_changes = false;
- for (n = 0; n < gc.pools.length; n++)
+ for (size_t n = 0; n < gc.pools.length; n++)
{
uint *bbase;
uint *b;
uint *btop;
- pool = gc.pools[n];
+ Pool* pool = gc.pools[n];
bbase = pool.scan.base();
btop = bbase + pool.scan.nwords;
bin = cast(Bins)pool.pagetable[pn];
if (bin < B_PAGE) {
if (opts.options.conservative)
- mark_conservative(o, o + binsize[bin]);
+ mark_conservative_range(o, o + binsize[bin]);
else {
auto end_of_blk = cast(size_t**)(o +
binsize[bin] - size_t.sizeof);
size_t* pm_bitmask = *end_of_blk;
- mark(o, end_of_blk, pm_bitmask);
+ mark_range(o, end_of_blk, pm_bitmask);
}
}
else if (bin == B_PAGE || bin == B_PAGEPLUS)
size_t blk_size = u * PAGESIZE;
if (opts.options.conservative)
- mark_conservative(o, o + blk_size);
+ mark_conservative_range(o, o + blk_size);
else {
auto end_of_blk = cast(size_t**)(o + blk_size -
size_t.sizeof);
size_t* pm_bitmask = *end_of_blk;
- mark(o, end_of_blk, pm_bitmask);
+ mark_range(o, end_of_blk, pm_bitmask);
}
}
}
}
}
}
+}
- thread_resumeAll();
- gc.stats.world_started();
+/**
+ *
+ */
+size_t sweep()
+{
// Free up everything not marked
- debug(COLLECT_PRINTF) printf("\tfree'ing\n");
+ debug(COLLECT_PRINTF) printf("\tsweep\n");
+ gc.p_cache = null;
+ gc.size_cache = 0;
+ gc.free_mem = 0; // will be recalculated
size_t freedpages = 0;
size_t freed = 0;
- for (n = 0; n < gc.pools.length; n++)
+ for (size_t n = 0; n < gc.pools.length; n++)
{
- pool = gc.pools[n];
+ Pool* pool = gc.pools[n];
+ pool.clear_cache();
uint* bbase = pool.mark.base();
size_t pn;
for (pn = 0; pn < pool.npages; pn++, bbase += PAGESIZE / (32 * 16))
{
for (; p < ptop; p += size, bit_i += bit_stride)
{
- if (pool.finals.nbits && pool.finals.testClear(bit_i)) {
+ if (pool.finals.testClear(bit_i)) {
if (opts.options.sentinel)
- rt_finalize(cast(List *)sentinel_add(p), false/*gc.no_stack > 0*/);
+ rt_finalize(sentinel_add(p), false/*gc.no_stack > 0*/);
else
- rt_finalize(cast(List *)p, false/*gc.no_stack > 0*/);
+ rt_finalize(p, false/*gc.no_stack > 0*/);
}
clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
- List *list = cast(List *)p;
-
if (opts.options.mem_stomp)
memset(p, 0xF3, size);
}
sentinel_Invariant(sentinel_add(p));
pool.freebits.set(bit_i);
- if (pool.finals.nbits && pool.finals.testClear(bit_i)) {
+ if (pool.finals.testClear(bit_i)) {
if (opts.options.sentinel)
- rt_finalize(cast(List *)sentinel_add(p), false/*gc.no_stack > 0*/);
+ rt_finalize(sentinel_add(p), false/*gc.no_stack > 0*/);
else
- rt_finalize(cast(List *)p, false/*gc.no_stack > 0*/);
+ rt_finalize(p, false/*gc.no_stack > 0*/);
}
clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
- List *list = cast(List *)p;
-
if (opts.options.mem_stomp)
memset(p, 0xF3, size);
}
else if (bin == B_PAGE)
{
- size_t bit_i = pn * (PAGESIZE / 16);
+ size_t bit_stride = PAGESIZE / 16;
+ size_t bit_i = pn * bit_stride;
if (!pool.mark.test(bit_i))
{
byte *p = pool.baseAddr + pn * PAGESIZE;
if (opts.options.sentinel)
sentinel_Invariant(sentinel_add(p));
- if (pool.finals.nbits && pool.finals.testClear(bit_i)) {
+ if (pool.finals.testClear(bit_i)) {
if (opts.options.sentinel)
rt_finalize(sentinel_add(p), false/*gc.no_stack > 0*/);
else
}
clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
- debug(COLLECT_PRINTF) printf("\tcollecting big %x\n", p);
+ debug(COLLECT_PRINTF) printf("\tcollecting big %p\n", p);
pool.pagetable[pn] = B_FREE;
+ pool.freebits.set_group(bit_i, PAGESIZE / 16);
freedpages++;
+ gc.free_mem += PAGESIZE;
if (opts.options.mem_stomp)
memset(p, 0xF3, PAGESIZE);
while (pn + 1 < pool.npages && pool.pagetable[pn + 1] == B_PAGEPLUS)
{
pn++;
pool.pagetable[pn] = B_FREE;
+ bit_i += bit_stride;
+ pool.freebits.set_group(bit_i, PAGESIZE / 16);
freedpages++;
+ gc.free_mem += PAGESIZE;
if (opts.options.mem_stomp)
{
}
}
}
+ else if (bin == B_FREE) {
+ gc.free_mem += PAGESIZE;
+ }
}
}
// Free complete pages, rebuild free list
debug(COLLECT_PRINTF) printf("\tfree complete pages\n");
size_t recoveredpages = 0;
- for (n = 0; n < gc.pools.length; n++)
+ for (size_t n = 0; n < gc.pools.length; n++)
{
- pool = gc.pools[n];
+ Pool* pool = gc.pools[n];
for (size_t pn = 0; pn < pool.npages; pn++)
{
Bins bin = cast(Bins)pool.pagetable[pn];
goto Lnotfree;
}
pool.pagetable[pn] = B_FREE;
+ pool.freebits.set_group(bit_base, PAGESIZE / 16);
recoveredpages++;
+ gc.free_mem += PAGESIZE;
continue;
Lnotfree:
bit_i = bit_base + u / 16;
if (pool.freebits.test(bit_i))
{
- List *list = cast(List *)(p + u);
- // avoid unnecessary writes
+ assert ((p+u) >= pool.baseAddr);
+ assert ((p+u) < pool.topAddr);
+ List* list = cast(List*) (p + u);
+ // avoid unnecesary writes (it really saves time)
if (list.next != gc.free_list[bin])
list.next = gc.free_list[bin];
+ if (list.pool != pool)
+ list.pool = pool;
gc.free_list[bin] = list;
+ gc.free_mem += binsize[bin];
}
}
}
body
{
uint attrs;
-
- if (pool.finals.nbits &&
- pool.finals.test(bit_i))
+ if (pool.finals.test(bit_i))
attrs |= BlkAttr.FINALIZE;
if (pool.noscan.test(bit_i))
attrs |= BlkAttr.NO_SCAN;
-// if (pool.nomove.nbits &&
-// pool.nomove.test(bit_i))
+// if (pool.nomove.test(bit_i))
// attrs |= BlkAttr.NO_MOVE;
return attrs;
}
{
if (mask & BlkAttr.FINALIZE)
{
- if (!pool.finals.nbits)
- pool.finals.alloc(pool.mark.nbits);
pool.finals.set(bit_i);
}
if (mask & BlkAttr.NO_SCAN)
}
body
{
- if (mask & BlkAttr.FINALIZE && pool.finals.nbits)
+ if (mask & BlkAttr.FINALIZE)
pool.finals.clear(bit_i);
if (mask & BlkAttr.NO_SCAN)
pool.noscan.clear(bit_i);
}
+void disable_fork()
+{
+ // we have to disable both options, as eager_alloc assumes fork is enabled
+ opts.options.fork = false;
+ opts.options.eager_alloc = false;
+}
+
void initialize()
{
int dummy;
gc.stack_bottom = cast(char*)&dummy;
opts.parse(cstdlib.getenv("D_GC_OPTS"));
+ // If we are going to fork, make sure we have the needed OS support
+ if (opts.options.fork)
+ opts.options.fork = os.HAVE_SHARED && os.HAVE_FORK;
+ // Eager allocation is only possible when forking
+ if (!opts.options.fork)
+ opts.options.eager_alloc = false;
gc.lock = GCLock.classinfo;
gc.inited = 1;
setStackBottom(rt_stackBottom());
gc.stats = Stats(gc);
+ if (opts.options.prealloc_npools) {
+ size_t pages = round_up(opts.options.prealloc_psize, PAGESIZE);
+ for (size_t i = 0; i < opts.options.prealloc_npools; ++i)
+ newPool(pages);
+ }
}
-/**
- *
- */
-void enable()
+//
+//
+//
+private void *malloc(size_t size, uint attrs, size_t* pm_bitmask)
{
- if (!thread_needLock())
- {
- assert(gc.disabled > 0);
- gc.disabled--;
- }
- else synchronized (gc.lock)
- {
- assert(gc.disabled > 0);
- gc.disabled--;
- }
-}
-
-
-/**
- *
- */
-void disable()
-{
- if (!thread_needLock())
- {
- gc.disabled++;
- }
- else synchronized (gc.lock)
- {
- gc.disabled++;
- }
-}
-
-
-/**
- *
- */
-uint getAttr(void* p)
-{
- if (!p)
- {
- return 0;
- }
-
- uint go()
- {
- Pool* pool = findPool(p);
- uint old_attrs = 0;
-
- if (pool)
- {
- auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
-
- old_attrs = getAttr(pool, bit_i);
- }
- return old_attrs;
- }
-
- if (!thread_needLock())
- {
- return go();
- }
- else synchronized (gc.lock)
- {
- return go();
- }
-}
-
-
-/**
- *
- */
-uint setAttr(void* p, uint mask)
-{
- if (!p)
- {
- return 0;
- }
-
- uint go()
- {
- Pool* pool = findPool(p);
- uint old_attrs = 0;
-
- if (pool)
- {
- auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
-
- old_attrs = getAttr(pool, bit_i);
- setAttr(pool, bit_i, mask);
- }
- return old_attrs;
- }
-
- if (!thread_needLock())
- {
- return go();
- }
- else synchronized (gc.lock)
- {
- return go();
- }
-}
-
-
-/**
- *
- */
-uint clrAttr(void* p, uint mask)
-{
- if (!p)
- {
- return 0;
- }
-
- uint go()
- {
- Pool* pool = findPool(p);
- uint old_attrs = 0;
-
- if (pool)
- {
- auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
-
- old_attrs = getAttr(pool, bit_i);
- clrAttr(pool, bit_i, mask);
- }
- return old_attrs;
- }
-
- if (!thread_needLock())
- {
- return go();
- }
- else synchronized (gc.lock)
- {
- return go();
- }
-}
-
-
-/**
- *
- */
-void *malloc(size_t size, uint attrs, PointerMap ptrmap)
-{
- if (!size)
- {
- return null;
- }
-
- if (!thread_needLock())
- {
- return mallocNoSync(size, attrs, ptrmap.bits.ptr);
- }
- else synchronized (gc.lock)
- {
- return mallocNoSync(size, attrs, ptrmap.bits.ptr);
- }
-}
-
-
-//
-//
-//
-private void *mallocNoSync(size_t size, uint attrs, size_t* pm_bitmask)
-{
- assert(size != 0);
+ assert(size != 0);
gc.stats.malloc_started(size, attrs, pm_bitmask);
scope (exit)
lastbin = bin;
}
- size_t capacity; // to figure out where to store the bitmask
+ Pool* pool = void;
+ size_t bit_i = void;
+ size_t capacity = void; // to figure out where to store the bitmask
+ bool collected = false;
if (bin < B_PAGE)
{
p = gc.free_list[bin];
{
//newPool(1);
}
+ collected = true;
}
if (!gc.free_list[bin] && !allocPage(bin))
{
newPool(1); // allocate new pool to find a new page
+ // TODO: hint allocPage() to use the pool we just created
int result = allocPage(bin);
if (!result)
onOutOfMemoryError();
capacity = binsize[bin];
// Return next item from free list
- gc.free_list[bin] = (cast(List*)p).next;
+ List* list = cast(List*) p;
+ assert ((cast(byte*)list) >= list.pool.baseAddr);
+ assert ((cast(byte*)list) < list.pool.topAddr);
+ gc.free_list[bin] = list.next;
+ pool = list.pool;
+ bit_i = (p - pool.baseAddr) / 16;
+ assert (pool.freebits.test(bit_i));
+ pool.freebits.clear(bit_i);
if (!(attrs & BlkAttr.NO_SCAN))
memset(p + size, 0, capacity - size);
if (opts.options.mem_stomp)
}
else
{
- p = bigAlloc(size);
+ size_t pn;
+ size_t npages = round_up(size, PAGESIZE);
+ p = bigAlloc(npages, pool, &pn, &collected);
if (!p)
onOutOfMemoryError();
- // Round the size up to the number of pages needed to store it
- size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
+ assert (pool !is null);
+
capacity = npages * PAGESIZE;
+ bit_i = pn * (PAGESIZE / 16);
+ pool.freebits.clear(bit_i);
+ pool.pagetable[pn] = B_PAGE;
+ if (npages > 1)
+ memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
+ p = pool.baseAddr + pn * PAGESIZE;
+ memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
+ if (opts.options.mem_stomp)
+ memset(p, 0xF1, size);
+
}
// Store the bit mask AFTER SENTINEL_POST
sentinel_init(p, size);
}
- if (attrs)
- {
- Pool *pool = findPool(p);
- assert(pool);
-
- setAttr(pool, cast(size_t)(p - pool.baseAddr) / 16, attrs);
+ if (attrs) {
+ setAttr(pool, bit_i, attrs);
+ assert (bin >= B_PAGE || !pool.freebits.test(bit_i));
}
- return p;
-}
-
-/**
- *
- */
-void *calloc(size_t size, uint attrs, PointerMap ptrmap)
-{
- if (!size)
- {
- return null;
+ gc.free_mem -= capacity;
+ if (collected) {
+ // If there is not enough free memory, allocate a new pool big enough
+ // to have at least the min_free% of the total heap free. If there is
+ // too much free memory, try to free some empty pools.
+ double percent_free = gc.free_mem * 100.0 / gc.total_mem;
+ if (percent_free < opts.options.min_free) {
+ auto pool_size = gc.total_mem * 1.0 / opts.options.min_free
+ - gc.free_mem;
+ newPool(round_up(cast(size_t)pool_size, PAGESIZE));
+ }
+ else
+ minimize(false);
}
- if (!thread_needLock())
- {
- return callocNoSync(size, attrs, ptrmap.bits.ptr);
- }
- else synchronized (gc.lock)
- {
- return callocNoSync(size, attrs, ptrmap.bits.ptr);
- }
+ return p;
}
//
//
//
-private void *callocNoSync(size_t size, uint attrs, size_t* pm_bitmask)
+private void *calloc(size_t size, uint attrs, size_t* pm_bitmask)
{
assert(size != 0);
- void *p = mallocNoSync(size, attrs, pm_bitmask);
+ void *p = malloc(size, attrs, pm_bitmask);
memset(p, 0, size);
return p;
}
-/**
- *
- */
-void *realloc(void *p, size_t size, uint attrs, PointerMap ptrmap)
-{
- if (!thread_needLock())
- {
- return reallocNoSync(p, size, attrs, ptrmap.bits.ptr);
- }
- else synchronized (gc.lock)
- {
- return reallocNoSync(p, size, attrs, ptrmap.bits.ptr);
- }
-}
-
-
//
//
//
-private void *reallocNoSync(void *p, size_t size, uint attrs,
+private void *realloc(void *p, size_t size, uint attrs,
size_t* pm_bitmask)
{
- if (!size)
- {
+ if (!size) {
if (p)
- {
- freeNoSync(p);
- p = null;
- }
+ free(p);
+ return null;
}
- else if (!p)
- {
- p = mallocNoSync(size, attrs, pm_bitmask);
+
+ if (p is null)
+ return malloc(size, attrs, pm_bitmask);
+
+ Pool* pool = findPool(p);
+ if (pool is null)
+ return null;
+
+ // Set or retrieve attributes as appropriate
+ auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
+ if (attrs) {
+ clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
+ setAttr(pool, bit_i, attrs);
}
else
- {
- Pool* pool = findPool(p);
- if (pool is null)
- return null;
+ attrs = getAttr(pool, bit_i);
- // Set or retrieve attributes as appropriate
- auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
- if (attrs) {
- clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
- setAttr(pool, bit_i, attrs);
- }
- else
- attrs = getAttr(pool, bit_i);
-
- void* blk_base_addr = findBase(p);
- size_t blk_size = findSize(p);
- bool has_pm = has_pointermap(attrs);
- size_t pm_bitmask_size = 0;
- if (has_pm) {
- pm_bitmask_size = size_t.sizeof;
- // Retrieve pointer map bit mask if appropriate
- if (pm_bitmask is null) {
- auto end_of_blk = cast(size_t**)(blk_base_addr +
- blk_size - size_t.sizeof);
- pm_bitmask = *end_of_blk;
- }
+ void* blk_base_addr = pool.findBase(p);
+ size_t blk_size = pool.findSize(p);
+ bool has_pm = has_pointermap(attrs);
+ size_t pm_bitmask_size = 0;
+ if (has_pm) {
+ pm_bitmask_size = size_t.sizeof;
+ // Retrieve pointer map bit mask if appropriate
+ if (pm_bitmask is null) {
+ auto end_of_blk = cast(size_t**)(
+ blk_base_addr + blk_size - size_t.sizeof);
+ pm_bitmask = *end_of_blk;
}
+ }
- if (opts.options.sentinel)
- {
- sentinel_Invariant(p);
- size_t sentinel_stored_size = *sentinel_size(p);
- if (sentinel_stored_size != size)
- {
- void* p2 = mallocNoSync(size, attrs, pm_bitmask);
- if (sentinel_stored_size < size)
- size = sentinel_stored_size;
- cstring.memcpy(p2, p, size);
- p = p2;
+ if (opts.options.sentinel) {
+ sentinel_Invariant(p);
+ size_t sentinel_stored_size = *sentinel_size(p);
+ if (sentinel_stored_size != size) {
+ void* p2 = malloc(size, attrs, pm_bitmask);
+ if (sentinel_stored_size < size)
+ size = sentinel_stored_size;
+ cstring.memcpy(p2, p, size);
+ p = p2;
+ }
+ return p;
+ }
+
+ size += pm_bitmask_size;
+ if (blk_size >= PAGESIZE && size >= PAGESIZE) {
+ auto psz = blk_size / PAGESIZE;
+ auto newsz = round_up(size, PAGESIZE);
+ if (newsz == psz)
+ return p;
+
+ auto pagenum = (p - pool.baseAddr) / PAGESIZE;
+
+ if (newsz < psz) {
+ // Shrink in place
+ if (opts.options.mem_stomp)
+ memset(p + size - pm_bitmask_size, 0xF2,
+ blk_size - size - pm_bitmask_size);
+ pool.freePages(pagenum + newsz, psz - newsz);
+ auto new_blk_size = (PAGESIZE * newsz);
+ gc.free_mem += blk_size - new_blk_size;
+ // update the size cache, assuming that is very likely the
+ // size of this block will be queried in the near future
+ pool.update_cache(p, new_blk_size);
+ if (has_pm) {
+ auto end_of_blk = cast(size_t**)(blk_base_addr +
+ new_blk_size - pm_bitmask_size);
+ *end_of_blk = pm_bitmask;
}
+ return p;
}
- else
- {
- size += pm_bitmask_size;
- if (blk_size >= PAGESIZE && size >= PAGESIZE)
- {
- auto psz = blk_size / PAGESIZE;
- auto newsz = (size + PAGESIZE - 1) / PAGESIZE;
- if (newsz == psz)
- return p;
- auto pagenum = (p - pool.baseAddr) / PAGESIZE;
-
- if (newsz < psz)
- {
- // Shrink in place
- synchronized (gc.lock)
- {
- if (opts.options.mem_stomp)
- memset(p + size - pm_bitmask_size, 0xF2,
- blk_size - size - pm_bitmask_size);
- pool.freePages(pagenum + newsz, psz - newsz);
- }
+ if (pagenum + newsz <= pool.npages) {
+ // Attempt to expand in place
+ for (size_t i = pagenum + psz; 1;) {
+ if (i == pagenum + newsz) {
+ if (opts.options.mem_stomp)
+ memset(p + blk_size - pm_bitmask_size, 0xF0,
+ size - blk_size - pm_bitmask_size);
+ memset(pool.pagetable + pagenum + psz, B_PAGEPLUS,
+ newsz - psz);
+ auto new_blk_size = (PAGESIZE * newsz);
+ gc.free_mem -= new_blk_size - blk_size;
+ // update the size cache, assuming that is very
+ // likely the size of this block will be queried in
+ // the near future
+ pool.update_cache(p, new_blk_size);
if (has_pm) {
auto end_of_blk = cast(size_t**)(
- blk_base_addr + (PAGESIZE * newsz) -
- pm_bitmask_size);
+ blk_base_addr + new_blk_size - pm_bitmask_size);
*end_of_blk = pm_bitmask;
}
return p;
}
- else if (pagenum + newsz <= pool.npages)
- {
- // Attempt to expand in place
- synchronized (gc.lock)
- {
- for (size_t i = pagenum + psz; 1;)
- {
- if (i == pagenum + newsz)
- {
- if (opts.options.mem_stomp)
- memset(p + blk_size - pm_bitmask_size,
- 0xF0, size - blk_size
- - pm_bitmask_size);
- memset(pool.pagetable + pagenum +
- psz, B_PAGEPLUS, newsz - psz);
- if (has_pm) {
- auto end_of_blk = cast(size_t**)(
- blk_base_addr +
- (PAGESIZE * newsz) -
- pm_bitmask_size);
- *end_of_blk = pm_bitmask;
- }
- return p;
- }
- if (i == pool.npages)
- {
- break;
- }
- if (pool.pagetable[i] != B_FREE)
- break;
- i++;
- }
- }
- }
- }
- // if new size is bigger or less than half
- if (blk_size < size || blk_size > size * 2)
- {
- size -= pm_bitmask_size;
- blk_size -= pm_bitmask_size;
- void* p2 = mallocNoSync(size, attrs, pm_bitmask);
- if (blk_size < size)
- size = blk_size;
- cstring.memcpy(p2, p, size);
- p = p2;
+ if (i == pool.npages)
+ break;
+ if (pool.pagetable[i] != B_FREE)
+ break;
+ i++;
}
}
}
+
+ // if new size is bigger or less than half
+ if (blk_size < size || blk_size > size * 2) {
+ size -= pm_bitmask_size;
+ blk_size -= pm_bitmask_size;
+ void* p2 = malloc(size, attrs, pm_bitmask);
+ if (blk_size < size)
+ size = blk_size;
+ cstring.memcpy(p2, p, size);
+ p = p2;
+ }
+
return p;
}
/**
* Attempt to in-place enlarge the memory block pointed to by p by at least
- * minbytes beyond its current capacity, up to a maximum of maxsize. This
+ * min_size beyond its current capacity, up to a maximum of max_size. This
* does not attempt to move the memory block (like realloc() does).
*
* Returns:
* 0 if could not extend p,
* total size of entire memory block if successful.
*/
-size_t extend(void* p, size_t minsize, size_t maxsize)
-{
- if (!thread_needLock())
- {
- return extendNoSync(p, minsize, maxsize);
- }
- else synchronized (gc.lock)
- {
- return extendNoSync(p, minsize, maxsize);
- }
-}
-
-
-//
-//
-//
-private size_t extendNoSync(void* p, size_t minsize, size_t maxsize)
+private size_t extend(void* p, size_t minsize, size_t maxsize)
in
{
assert( minsize <= maxsize );
auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
uint attrs = getAttr(pool, bit_i);
- void* blk_base_addr = findBase(p);
- size_t blk_size = findSize(p);
+ void* blk_base_addr = pool.findBase(p);
+ size_t blk_size = pool.findSize(p);
bool has_pm = has_pointermap(attrs);
size_t* pm_bitmask = null;
size_t pm_bitmask_size = 0;
return 0; // cannot extend buckets
auto psz = blk_size / PAGESIZE;
- auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE;
- auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE;
+ auto minsz = round_up(minsize, PAGESIZE);
+ auto maxsz = round_up(maxsize, PAGESIZE);
auto pagenum = (p - pool.baseAddr) / PAGESIZE;
memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
gc.p_cache = null;
gc.size_cache = 0;
+ gc.free_mem -= new_size - blk_size;
+ // update the size cache, assuming that is very likely the size of this
+ // block will be queried in the near future
+ pool.update_cache(p, new_size);
if (has_pm) {
new_size -= size_t.sizeof;
}
-/**
- *
- */
-size_t reserve(size_t size)
-{
- if (!size)
- {
- return 0;
- }
-
- if (!thread_needLock())
- {
- return reserveNoSync(size);
- }
- else synchronized (gc.lock)
- {
- return reserveNoSync(size);
- }
-}
-
-
-/**
- *
- */
-void free(void *p)
-{
- if (!p)
- {
- return;
- }
-
- if (!thread_needLock())
- {
- return freeNoSync(p);
- }
- else synchronized (gc.lock)
- {
- return freeNoSync(p);
- }
-}
-
-
//
//
//
-private void freeNoSync(void *p)
+private void free(void *p)
{
assert (p);
// Free pages
size_t npages = 1;
size_t n = pagenum;
+ pool.freebits.set_group(bit_i, PAGESIZE / 16);
while (++n < pool.npages && pool.pagetable[n] == B_PAGEPLUS)
npages++;
+ size_t size = npages * PAGESIZE;
if (opts.options.mem_stomp)
- memset(p, 0xF2, npages * PAGESIZE);
+ memset(p, 0xF2, size);
pool.freePages(pagenum, npages);
+ gc.free_mem += size;
+ // just in case we were caching this pointer
+ pool.clear_cache(p);
}
else
{
// Add to free list
- List *list = cast(List*)p;
+ List* list = cast(List*) p;
if (opts.options.mem_stomp)
memset(p, 0xF2, binsize[bin]);
list.next = gc.free_list[bin];
+ list.pool = pool;
gc.free_list[bin] = list;
+ pool.freebits.set(bit_i);
+ gc.free_mem += binsize[bin];
}
-}
-
-
-/**
- * Determine the base address of the block containing p. If p is not a gc
- * allocated pointer, return null.
- */
-void* addrOf(void *p)
-{
- if (!p)
- {
- return null;
- }
-
- if (!thread_needLock())
- {
- return addrOfNoSync(p);
- }
- else synchronized (gc.lock)
- {
- return addrOfNoSync(p);
- }
-}
-
-
-//
-//
-//
-void* addrOfNoSync(void *p)
-{
- if (!p)
- {
- return null;
- }
-
- return findBase(p);
+ double percent_free = gc.free_mem * 100.0 / gc.total_mem;
+ if (percent_free > opts.options.min_free)
+ minimize(false);
}
* Determine the allocated size of pointer p. If p is an interior pointer
* or not a gc allocated pointer, return 0.
*/
-size_t sizeOf(void *p)
-{
- if (!p)
- {
- return 0;
- }
-
- if (!thread_needLock())
- {
- return sizeOfNoSync(p);
- }
- else synchronized (gc.lock)
- {
- return sizeOfNoSync(p);
- }
-}
-
-
-//
-//
-//
-private size_t sizeOfNoSync(void *p)
+private size_t sizeOf(void *p)
{
assert (p);
auto biti = cast(size_t)(p - pool.baseAddr) / 16;
uint attrs = getAttr(pool, biti);
- size_t size = findSize(p);
+ size_t size = pool.findSize(p);
size_t pm_bitmask_size = 0;
if (has_pointermap(attrs))
pm_bitmask_size = size_t.sizeof;
}
-/**
- * Determine the base address of the block containing p. If p is not a gc
- * allocated pointer, return null.
- */
-BlkInfo query(void *p)
-{
- if (!p)
- {
- BlkInfo i;
- return i;
- }
-
- if (!thread_needLock())
- {
- return queryNoSync(p);
- }
- else synchronized (gc.lock)
- {
- return queryNoSync(p);
- }
-}
-
-
-//
-//
-//
-BlkInfo queryNoSync(void *p)
-{
- assert(p);
-
- return getInfo(p);
-}
-
-
/**
* Verify that pointer p:
* 1) belongs to this memory pool
* 2) points to the start of an allocated piece of memory
* 3) is not on a free list
*/
-void check(void *p)
-{
- if (!p)
- {
- return;
- }
-
- if (!thread_needLock())
- {
- checkNoSync(p);
- }
- else synchronized (gc.lock)
- {
- checkNoSync(p);
- }
-}
-
-
-//
-//
-//
private void checkNoSync(void *p)
{
assert(p);
if (bin < B_PAGE)
{
// Check that p is not on a free list
- List *list;
-
- for (list = gc.free_list[bin]; list; list = list.next)
+ for (List* list = gc.free_list[bin]; list; list = list.next)
{
assert(cast(void*)list != p);
}
}
-/**
- * add p to list of roots
- */
-void addRoot(void *p)
-{
- if (!p)
- {
- return;
- }
-
- if (!thread_needLock())
- {
- if (gc.roots.append(p) is null)
- onOutOfMemoryError();
- }
- else synchronized (gc.lock)
- {
- if (gc.roots.append(p) is null)
- onOutOfMemoryError();
- }
-}
-
-
-/**
- * remove p from list of roots
- */
-void removeRoot(void *p)
-{
- if (!p)
- {
- return;
- }
-
- bool r;
- if (!thread_needLock())
- {
- r = gc.roots.remove(p);
- }
- else synchronized (gc.lock)
- {
- r = gc.roots.remove(p);
- }
- assert (r);
-}
-
-
-/**
- * add range to scan for roots
- */
-void addRange(void *p, size_t sz)
-{
- if (!p || !sz)
- {
- return;
- }
-
- if (!thread_needLock())
- {
- if (gc.ranges.append(Range(p, p+sz)) is null)
- onOutOfMemoryError();
- }
- else synchronized (gc.lock)
- {
- if (gc.ranges.append(Range(p, p+sz)) is null)
- onOutOfMemoryError();
- }
-}
-
-
-/**
- * remove range
- */
-void removeRange(void *p)
-{
- if (!p)
- {
- return;
- }
-
- bool r;
- if (!thread_needLock())
- {
- r = gc.ranges.remove(Range(p, null));
- }
- else synchronized (gc.lock)
- {
- r = gc.ranges.remove(Range(p, null));
- }
- assert (r);
-}
-
-
-/**
- * do full garbage collection
- */
-void fullCollect()
-{
-
- if (!thread_needLock())
- {
- fullcollectshell();
- }
- else synchronized (gc.lock)
- {
- fullcollectshell();
- }
-
- version (none)
- {
- GCStats stats;
- getStats(stats);
- }
-
-}
-
-
-/**
- * do full garbage collection ignoring roots
- */
-void fullCollectNoStack()
-{
- if (!thread_needLock())
- {
- gc.no_stack++;
- fullcollectshell();
- gc.no_stack--;
- }
- else synchronized (gc.lock)
- {
- gc.no_stack++;
- fullcollectshell();
- gc.no_stack--;
- }
-}
-
-
-/**
- * minimize free space usage
- */
-void minimize()
-{
- if (!thread_needLock())
- {
- minimizeNoSync();
- }
- else synchronized (gc.lock)
- {
- minimizeNoSync();
- }
-}
-
-
/**
* Retrieve statistics about garbage collection.
* Useful for debugging and tuning.
*/
-void getStats(out GCStats stats)
-{
- if (!thread_needLock())
- {
- getStatsNoSync(stats);
- }
- else synchronized (gc.lock)
- {
- getStatsNoSync(stats);
- }
-}
-
-
-//
-//
-//
-private void getStatsNoSync(out GCStats stats)
+private GCStats getStats()
{
+ GCStats stats;
size_t psize = 0;
size_t usize = 0;
size_t flsize = 0;
size_t n;
size_t bsize = 0;
- memset(&stats, 0, GCStats.sizeof);
-
for (n = 0; n < gc.pools.length; n++)
{
Pool* pool = gc.pools[n];
for (n = 0; n < B_PAGE; n++)
{
- for (List *list = gc.free_list[n]; list; list = list.next)
+ for (List* list = gc.free_list[n]; list; list = list.next)
flsize += binsize[n];
}
stats.poolsize = psize;
stats.usedsize = bsize - flsize;
stats.freelistsize = flsize;
+ return stats;
}
/******************* weak-reference support *********************/
-// call locked if necessary
-private T locked(T)(in T delegate() code)
-{
- if (thread_needLock)
- synchronized(gc.lock) return code();
- else
- return code();
-}
-
private struct WeakPointer
{
Object reference;
// lock for memory consistency (parallel readers)
// also ensures that weakpointerDestroy can be called while another
// thread is freeing the reference with "delete"
- locked!(void)({ reference = null; });
+ return locked!(void, () {
+ reference = null;
+ })();
}
}
auto wp = cast(WeakPointer*)p;
// must be extra careful about the GC or parallel threads
// finalizing the reference at the same time
- locked!(void)({
- if (wp.reference)
- rt_detachDisposeEvent(wp.reference, &wp.ondestroy);
- });
+ return locked!(void, () {
+ if (wp.reference)
+ rt_detachDisposeEvent(wp.reference, &wp.ondestroy);
+ })();
cstdlib.free(wp);
}
}
// that'd require core.sync.Atomic and lots of care about memory
// consistency it's an optional optimization see
// http://dsource.org/projects/tango/browser/trunk/user/tango/core/Lifetime.d?rev=5100#L158
- return locked!(Object)({
- return (cast(WeakPointer*)p).reference;
- });
+ return locked!(Object, () {
+ return (cast(WeakPointer*)p).reference;
+ })();
}
}
size_t npages;
ubyte* pagetable;
+ /// Cache for findSize()
+ size_t cached_size;
+ void* cached_ptr;
+
+ void clear_cache(void* ptr = null)
+ {
+ if (ptr is null || ptr is this.cached_ptr) {
+ this.cached_ptr = null;
+ this.cached_size = 0;
+ }
+ }
+
+ void update_cache(void* ptr, size_t size)
+ {
+ this.cached_ptr = ptr;
+ this.cached_size = size;
+ }
void initialize(size_t npages)
{
size_t poolsize = npages * PAGESIZE;
assert(poolsize >= POOLSIZE);
- baseAddr = cast(byte *) alloc.os_mem_map(poolsize);
+ baseAddr = cast(byte *) os.alloc(poolsize);
// Some of the code depends on page alignment of memory pools
assert((cast(size_t)baseAddr & (PAGESIZE - 1)) == 0);
npages = 0;
poolsize = 0;
}
- //assert(baseAddr);
topAddr = baseAddr + poolsize;
- mark.alloc(cast(size_t)poolsize / 16);
- scan.alloc(cast(size_t)poolsize / 16);
- freebits.alloc(cast(size_t)poolsize / 16);
- noscan.alloc(cast(size_t)poolsize / 16);
+ size_t nbits = cast(size_t)poolsize / 16;
+
+ // if the GC will run in parallel in a fork()ed process, we need to
+ // share the mark bits
+ os.Vis vis = os.Vis.PRIV;
+ if (opts.options.fork)
+ vis = os.Vis.SHARED;
+ mark.alloc(nbits, vis); // shared between mark and sweep
+ freebits.alloc(nbits); // not used by the mark phase
+ scan.alloc(nbits); // only used in the mark phase
+ finals.alloc(nbits); // not used by the mark phase
+ noscan.alloc(nbits); // mark phase *MUST* have a snapshot
+
+ // all is free when we start
+ freebits.set_all();
+
+ // avoid accidental sweeping of new pools while using eager allocation
+ if (collect_in_progress())
+ mark.set_all();
pagetable = cast(ubyte*) cstdlib.malloc(npages);
if (!pagetable)
if (npages)
{
- result = alloc.os_mem_unmap(baseAddr, npages * PAGESIZE);
+ result = os.dealloc(baseAddr, npages * PAGESIZE);
assert(result);
npages = 0;
}
if (pagetable)
cstdlib.free(pagetable);
- mark.Dtor();
- scan.Dtor();
+ os.Vis vis = os.Vis.PRIV;
+ if (opts.options.fork)
+ vis = os.Vis.SHARED;
+ mark.Dtor(vis);
freebits.Dtor();
+ scan.Dtor();
finals.Dtor();
noscan.Dtor();
}
}
+ /**
+ * Find base address of block containing pointer p.
+ * Returns null if the pointer doesn't belong to this pool
+ */
+ void* findBase(void *p)
+ {
+ size_t offset = cast(size_t)(p - this.baseAddr);
+ size_t pagenum = offset / PAGESIZE;
+ Bins bin = cast(Bins)this.pagetable[pagenum];
+ // Adjust bit to be at start of allocated memory block
+ if (bin <= B_PAGE)
+ return this.baseAddr + (offset & notbinsize[bin]);
+ if (bin == B_PAGEPLUS) {
+ do {
+ --pagenum, offset -= PAGESIZE;
+ } while (cast(Bins)this.pagetable[pagenum] == B_PAGEPLUS);
+ return this.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
+ }
+ // we are in a B_FREE page
+ return null;
+ }
+
+
+ /**
+ * Find size of pointer p.
+ * Returns 0 if p doesn't belong to this pool if if it's block size is less
+ * than a PAGE.
+ */
+ size_t findSize(void *p)
+ {
+ size_t pagenum = cast(size_t)(p - this.baseAddr) / PAGESIZE;
+ Bins bin = cast(Bins)this.pagetable[pagenum];
+ if (bin != B_PAGE)
+ return binsize[bin];
+ if (this.cached_ptr == p)
+ return this.cached_size;
+ size_t i = pagenum + 1;
+ for (; i < this.npages; i++)
+ if (this.pagetable[i] != B_PAGEPLUS)
+ break;
+ this.cached_ptr = p;
+ this.cached_size = (i - pagenum) * PAGESIZE;
+ return this.cached_size;
+ }
+
+
/**
* Used for sorting pools
*/
void sentinel_Invariant(void *p)
{
- assert(*sentinel_pre(p) == SENTINEL_PRE);
- assert(*sentinel_post(p) == SENTINEL_POST);
+ if (*sentinel_pre(p) != SENTINEL_PRE ||
+ *sentinel_post(p) != SENTINEL_POST)
+ cstdlib.abort();
}
private int _termCleanupLevel=1;
+extern (C):
+
/// sets the cleanup level done by gc
-/// (0: none, 1: fullCollect, 2: fullCollectNoStack (might crash daemonThreads))
+/// 0: none
+/// 1: fullCollect
+/// 2: fullCollect ignoring stack roots (might crash daemonThreads)
/// result !=0 if the value was invalid
-extern (C) int gc_setTermCleanupLevel(int cLevel){
+int gc_setTermCleanupLevel(int cLevel)
+{
if (cLevel<0 || cLevel>2) return cLevel;
_termCleanupLevel=cLevel;
return 0;
}
/// returns the cleanup level done by gc
-extern (C) int gc_getTermCleanupLevel(){
+int gc_getTermCleanupLevel()
+{
return _termCleanupLevel;
}
-version (DigitalMars) version(OSX) {
- extern(C) void _d_osx_image_init();
-}
-
-extern (C) void thread_init();
-
-extern (C) void gc_init()
+void gc_init()
{
scope (exit) assert (Invariant());
gc = cast(GC*) cstdlib.calloc(1, GC.sizeof);
thread_init();
}
-extern (C) void gc_term()
+void gc_term()
{
assert (Invariant());
if (_termCleanupLevel<1) {
// I'm disabling cleanup for now until I can think about it some
// more.
//
- fullCollectNoStack(); // not really a 'collect all' -- still scans
- // static data area, roots, and ranges.
+ // not really a 'collect all' -- still scans static data area, roots,
+ // and ranges.
+ return locked!(void, () {
+ gc.no_stack++;
+ fullcollectshell();
+ gc.no_stack--;
+ })();
} else {
// default (safe) clenup
- fullCollect();
+ return locked!(void, () {
+ fullcollectshell();
+ })();
}
}
-extern (C) void gc_enable()
+void gc_enable()
{
- assert (Invariant()); scope (exit) assert (Invariant());
- enable();
+ return locked!(void, () {
+ assert (Invariant()); scope (exit) assert (Invariant());
+ assert (gc.disabled > 0);
+ gc.disabled--;
+ })();
}
-extern (C) void gc_disable()
+void gc_disable()
{
- assert (Invariant()); scope (exit) assert (Invariant());
- disable();
+ return locked!(void, () {
+ assert (Invariant()); scope (exit) assert (Invariant());
+ gc.disabled++;
+ })();
}
-extern (C) void gc_collect()
+void gc_collect()
{
- assert (Invariant()); scope (exit) assert (Invariant());
- fullCollect();
+ return locked!(void, () {
+ assert (Invariant()); scope (exit) assert (Invariant());
+ fullcollectshell();
+ })();
}
-extern (C) void gc_minimize()
+void gc_minimize()
{
- assert (Invariant()); scope (exit) assert (Invariant());
- minimize();
+ return locked!(void, () {
+ assert (Invariant()); scope (exit) assert (Invariant());
+ minimize();
+ })();
}
-extern (C) uint gc_getAttr( void* p )
+uint gc_getAttr(void* p)
{
- assert (Invariant()); scope (exit) assert (Invariant());
- return getAttr(p);
+ if (p is null)
+ return 0;
+ return locked!(uint, () {
+ assert (Invariant()); scope (exit) assert (Invariant());
+ Pool* pool = findPool(p);
+ if (pool is null)
+ return 0u;
+ auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
+ return getAttr(pool, bit_i);
+ })();
}
-extern (C) uint gc_setAttr( void* p, uint a )
+uint gc_setAttr(void* p, uint attrs)
{
- assert (Invariant()); scope (exit) assert (Invariant());
- return setAttr(p, a);
+ if (p is null)
+ return 0;
+ return locked!(uint, () {
+ assert (Invariant()); scope (exit) assert (Invariant());
+ Pool* pool = findPool(p);
+ if (pool is null)
+ return 0u;
+ auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
+ uint old_attrs = getAttr(pool, bit_i);
+ setAttr(pool, bit_i, attrs);
+ return old_attrs;
+ })();
}
-extern (C) uint gc_clrAttr( void* p, uint a )
+uint gc_clrAttr(void* p, uint attrs)
{
- assert (Invariant()); scope (exit) assert (Invariant());
- return clrAttr(p, a);
+ if (p is null)
+ return 0;
+ return locked!(uint, () {
+ assert (Invariant()); scope (exit) assert (Invariant());
+ Pool* pool = findPool(p);
+ if (pool is null)
+ return 0u;
+ auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
+ uint old_attrs = getAttr(pool, bit_i);
+ clrAttr(pool, bit_i, attrs);
+ return old_attrs;
+ })();
}
-extern (C) void* gc_malloc(size_t sz, uint attrs = 0,
+void* gc_malloc(size_t size, uint attrs = 0,
PointerMap ptrmap = PointerMap.init)
{
- assert (Invariant()); scope (exit) assert (Invariant());
- return malloc(sz, attrs, ptrmap);
+ if (size == 0)
+ return null;
+ return locked!(void*, () {
+ assert (Invariant()); scope (exit) assert (Invariant());
+ return malloc(size, attrs, ptrmap.bits.ptr);
+ })();
}
-extern (C) void* gc_calloc(size_t sz, uint attrs = 0,
+void* gc_calloc(size_t size, uint attrs = 0,
PointerMap ptrmap = PointerMap.init)
{
- assert (Invariant()); scope (exit) assert (Invariant());
- return calloc(sz, attrs, ptrmap);
+ if (size == 0)
+ return null;
+ return locked!(void*, () {
+ assert (Invariant()); scope (exit) assert (Invariant());
+ return calloc(size, attrs, ptrmap.bits.ptr);
+ })();
}
-extern (C) void* gc_realloc(void* p, size_t sz, uint attrs = 0,
+void* gc_realloc(void* p, size_t size, uint attrs = 0,
PointerMap ptrmap = PointerMap.init)
{
- assert (Invariant()); scope (exit) assert (Invariant());
- return realloc(p, sz, attrs, ptrmap);
+ return locked!(void*, () {
+ assert (Invariant()); scope (exit) assert (Invariant());
+ return realloc(p, size, attrs, ptrmap.bits.ptr);
+ })();
}
-extern (C) size_t gc_extend( void* p, size_t mx, size_t sz )
+size_t gc_extend(void* p, size_t min_size, size_t max_size)
{
- assert (Invariant()); scope (exit) assert (Invariant());
- return extend(p, mx, sz);
+ return locked!(size_t, () {
+ assert (Invariant()); scope (exit) assert (Invariant());
+ return extend(p, min_size, max_size);
+ })();
}
-extern (C) size_t gc_reserve( size_t sz )
+size_t gc_reserve(size_t size)
{
- assert (Invariant()); scope (exit) assert (Invariant());
- return reserve(sz);
+ if (size == 0)
+ return 0;
+ return locked!(size_t, () {
+ assert (Invariant()); scope (exit) assert (Invariant());
+ return reserve(size);
+ })();
}
-extern (C) void gc_free( void* p )
+void gc_free(void* p)
{
- assert (Invariant()); scope (exit) assert (Invariant());
- free(p);
+ if (p is null)
+ return;
+ return locked!(void, () {
+ assert (Invariant()); scope (exit) assert (Invariant());
+ free(p);
+ })();
}
-extern (C) void* gc_addrOf( void* p )
+void* gc_addrOf(void* p)
{
- assert (Invariant()); scope (exit) assert (Invariant());
- return addrOf(p);
+ if (p is null)
+ return null;
+ return locked!(void*, () {
+ assert (Invariant()); scope (exit) assert (Invariant());
+ Pool* pool = findPool(p);
+ if (pool is null)
+ return null;
+ return pool.findBase(p);
+ })();
}
-extern (C) size_t gc_sizeOf( void* p )
+size_t gc_sizeOf(void* p)
{
- assert (Invariant()); scope (exit) assert (Invariant());
- return sizeOf(p);
+ if (p is null)
+ return 0;
+ return locked!(size_t, () {
+ assert (Invariant()); scope (exit) assert (Invariant());
+ return sizeOf(p);
+ })();
}
-extern (C) BlkInfo gc_query( void* p )
+BlkInfo gc_query(void* p)
{
- assert (Invariant()); scope (exit) assert (Invariant());
- return query(p);
+ if (p is null)
+ return BlkInfo.init;
+ return locked!(BlkInfo, () {
+ assert (Invariant()); scope (exit) assert (Invariant());
+ return getInfo(p);
+ })();
}
// NOTE: This routine is experimental. The stats or function name may change
// before it is made officially available.
-extern (C) GCStats gc_stats()
+GCStats gc_stats()
{
- assert (Invariant()); scope (exit) assert (Invariant());
- GCStats stats = void;
- getStats(stats);
- return stats;
+ return locked!(GCStats, () {
+ assert (Invariant()); scope (exit) assert (Invariant());
+ return getStats();
+ })();
}
-extern (C) void gc_addRoot( void* p )
+void gc_addRoot(void* p)
{
- assert (Invariant()); scope (exit) assert (Invariant());
- addRoot(p);
+ if (p is null)
+ return;
+ return locked!(void, () {
+ assert (Invariant()); scope (exit) assert (Invariant());
+ if (gc.roots.append(p) is null)
+ onOutOfMemoryError();
+ })();
}
-extern (C) void gc_addRange( void* p, size_t sz )
+void gc_addRange(void* p, size_t size)
{
- assert (Invariant()); scope (exit) assert (Invariant());
- addRange(p, sz);
+ if (p is null || size == 0)
+ return;
+ return locked!(void, () {
+ assert (Invariant()); scope (exit) assert (Invariant());
+ if (gc.ranges.append(Range(p, p + size)) is null)
+ onOutOfMemoryError();
+ })();
}
-extern (C) void gc_removeRoot( void *p )
+void gc_removeRoot(void* p)
{
- assert (Invariant()); scope (exit) assert (Invariant());
- removeRoot(p);
+ if (p is null)
+ return;
+ return locked!(void, () {
+ assert (Invariant()); scope (exit) assert (Invariant());
+ bool r = gc.roots.remove(p);
+ assert (r);
+ })();
}
-extern (C) void gc_removeRange( void *p )
+void gc_removeRange(void* p)
{
- assert (Invariant()); scope (exit) assert (Invariant());
- removeRange(p);
+ if (p is null)
+ return;
+ return locked!(void, () {
+ assert (Invariant()); scope (exit) assert (Invariant());
+ bool r = gc.ranges.remove(Range(p, null));
+ assert (r);
+ })();
}
-extern (C) void* gc_weakpointerCreate( Object r )
+void* gc_weakpointerCreate(Object r)
{
- assert (Invariant()); scope (exit) assert (Invariant());
+ // weakpointers do their own locking
return weakpointerCreate(r);
}
-extern (C) void gc_weakpointerDestroy( void* wp )
+void gc_weakpointerDestroy(void* wp)
{
- assert (Invariant()); scope (exit) assert (Invariant());
+ // weakpointers do their own locking
weakpointerDestroy(wp);
}
-extern (C) Object gc_weakpointerGet( void* wp )
+Object gc_weakpointerGet(void* wp)
{
- assert (Invariant()); scope (exit) assert (Invariant());
+ // weakpointers do their own locking
return weakpointerGet(wp);
}