return !opts.options.conservative && !(attrs & BlkAttr.NO_SCAN);
}
+private size_t round_up(size_t n, size_t to)
+{
+ return (n + to - 1) / to;
+}
+
private
{
alias void delegate(Object) DEvent;
/// max(pool.topAddr)
byte *max_addr;
+ /// Total heap memory
+ size_t total_mem;
+ /// Free heap memory
+ size_t free_mem;
+
/// Free list for each size
List*[B_MAX] free_list;
private GC* gc;
+
+bool collect_in_progress()
+{
+ return gc.mark_proc_pid != 0;
+}
+
+
bool Invariant()
{
assert (gc !is null);
if (gc.inited) {
+ size_t total_mem = 0;
+ size_t free_mem = 0;
for (size_t i = 0; i < gc.pools.length; i++) {
Pool* pool = gc.pools[i];
pool.Invariant();
assert(*pool < *gc.pools[i + 1]);
else if (i + 1 == gc.pools.length)
assert(gc.max_addr == pool.topAddr);
+ total_mem += pool.npages * PAGESIZE;
+ for (size_t pn = 0; pn < pool.npages; ++pn)
+ if (pool.pagetable[pn] == B_FREE)
+ free_mem += PAGESIZE;
}
gc.roots.Invariant();
assert (p >= pool.baseAddr);
assert (p < pool.topAddr);
assert (pool.freebits.test((p - pool.baseAddr) / 16));
+ free_mem += binsize[i];
}
}
+ assert (gc.total_mem == total_mem);
+ assert (gc.free_mem == free_mem);
}
return true;
}
size_t reserve(size_t size)
{
assert(size != 0);
- size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
+ size_t npages = round_up(size, PAGESIZE);
Pool* pool = newPool(npages);
if (!pool)
/**
* Minimizes physical memory usage by returning free pools to the OS.
+ *
+ * If full is false, keep some pools alive if the resulting free memory would
+ * be too small.
*/
-void minimize()
+void minimize(bool full = true)
{
- // Disabled if a parallel collection is in progress because the shared mark
- // bits of the freed pool might be used by the mark process
- if (gc.mark_proc_pid != 0)
+ // The shared mark bits of the freed pool might be used by the mark process
+ if (collect_in_progress())
return;
- size_t n;
- size_t pn;
- Pool* pool;
+ if (gc.pools.length == 0)
+ return;
- for (n = 0; n < gc.pools.length; n++)
+ for (size_t n = 0; n < gc.pools.length; n++)
{
- pool = gc.pools[n];
+ Pool* pool = gc.pools[n];
+ size_t pn;
for (pn = 0; pn < pool.npages; pn++)
{
if (cast(Bins)pool.pagetable[pn] != B_FREE)
}
if (pn < pool.npages)
continue;
+ // Free pool
+ size_t pool_size = pool.npages * PAGESIZE;
+ if (!full) {
+ double percent_free = (gc.free_mem - pool_size) * 100.0 /
+ (gc.total_mem - pool_size);
+ if (percent_free < opts.options.min_free)
+ continue; // not enough free, don't remove this pool
+ }
+ gc.total_mem -= pool_size;
+ gc.free_mem -= pool_size;
pool.Dtor();
cstdlib.free(pool);
gc.pools.remove_at(n);
* Allocate a chunk of memory that is larger than a page.
* Return null if out of memory.
*/
-void* bigAlloc(size_t size, out Pool* pool)
+void* bigAlloc(size_t npages, out Pool* pool, size_t* pn, bool* collected)
{
- size_t npages;
- size_t n;
- size_t pn;
- size_t freedpages;
- void* p;
- int state;
-
- npages = (size + PAGESIZE - 1) / PAGESIZE;
+ *collected = false;
+ // This code could use some refinement when repeatedly
+ // allocating very large arrays.
- for (state = 0; ; )
+ void* find_block()
{
- // This code could use some refinement when repeatedly
- // allocating very large arrays.
-
- for (n = 0; n < gc.pools.length; n++)
+ for (size_t n = 0; n < gc.pools.length; n++)
{
pool = gc.pools[n];
- pn = pool.allocPages(npages);
- if (pn != OPFAIL)
- goto L1;
+ *pn = pool.allocPages(npages);
+ if (*pn != OPFAIL)
+ return pool.baseAddr + *pn * PAGESIZE;
}
+ return null;
+ }
- // Failed
- switch (state)
- {
- case 0:
- if (gc.disabled)
- {
- state = 1;
- continue;
- }
- // Try collecting
- freedpages = fullcollectshell();
- if (freedpages >= gc.pools.length * ((POOLSIZE / PAGESIZE) / 4))
- {
- state = 1;
- continue;
- }
- // Release empty pools to prevent bloat
- minimize();
- // Allocate new pool
- pool = newPool(npages);
- if (!pool)
- {
- state = 2;
- continue;
- }
- pn = pool.allocPages(npages);
- assert(pn != OPFAIL);
- goto L1;
- case 1:
- // Release empty pools to prevent bloat
- minimize();
- // Allocate new pool
- pool = newPool(npages);
- if (!pool)
- goto Lnomemory;
- pn = pool.allocPages(npages);
- assert(pn != OPFAIL);
- goto L1;
- case 2:
- goto Lnomemory;
- default:
- assert(false);
- }
+ void* alloc_more()
+ {
+ // Allocate new pool
+ pool = newPool(npages);
+ if (!pool)
+ return null; // let malloc handle the error
+ *pn = pool.allocPages(npages);
+ assert(*pn != OPFAIL);
+ return pool.baseAddr + *pn * PAGESIZE;
}
- L1:
- size_t bit_i = pn * (PAGESIZE / 16);
- pool.freebits.clear(bit_i);
- pool.pagetable[pn] = B_PAGE;
- if (npages > 1)
- memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
- p = pool.baseAddr + pn * PAGESIZE;
- memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
- if (opts.options.mem_stomp)
- memset(p, 0xF1, size);
- return p;
+ if (void* p = find_block())
+ return p;
- Lnomemory:
- return null; // let mallocNoSync handle the error
+ if (gc.disabled)
+ return alloc_more();
+
+ // Try collecting
+ size_t freedpages = fullcollectshell();
+ *collected = true;
+ if (freedpages >= npages) {
+ if (void* p = find_block())
+ return p;
+ }
+
+ return alloc_more();
}
assert (inserted_pool is pool);
gc.min_addr = gc.pools[0].baseAddr;
gc.max_addr = gc.pools[gc.pools.length - 1].topAddr;
+ size_t pool_size = pool.topAddr - pool.baseAddr;
+ gc.total_mem += pool_size;
+ gc.free_mem += pool_size;
return pool;
}
// memory is freed (if that not the case, the caller will allocate more
// memory and the next time it's exhausted it will run a new collection).
if (opts.options.eager_alloc) {
- if (gc.mark_proc_pid != 0) { // there is a mark process in progress
+ if (collect_in_progress()) {
os.WRes r = os.wait_pid(gc.mark_proc_pid, false); // don't block
assert (r != os.WRes.ERROR);
switch (r) {
debug(COLLECT_PRINTF) printf("\tsweep\n");
gc.p_cache = null;
gc.size_cache = 0;
+ gc.free_mem = 0; // will be recalculated
size_t freedpages = 0;
size_t freed = 0;
for (size_t n = 0; n < gc.pools.length; n++)
pool.pagetable[pn] = B_FREE;
pool.freebits.set_group(bit_i, PAGESIZE / 16);
freedpages++;
+ gc.free_mem += PAGESIZE;
if (opts.options.mem_stomp)
memset(p, 0xF3, PAGESIZE);
while (pn + 1 < pool.npages && pool.pagetable[pn + 1] == B_PAGEPLUS)
bit_i += bit_stride;
pool.freebits.set_group(bit_i, PAGESIZE / 16);
freedpages++;
+ gc.free_mem += PAGESIZE;
if (opts.options.mem_stomp)
{
}
}
}
+ else if (bin == B_FREE) {
+ gc.free_mem += PAGESIZE;
+ }
}
}
pool.pagetable[pn] = B_FREE;
pool.freebits.set_group(bit_base, PAGESIZE / 16);
recoveredpages++;
+ gc.free_mem += PAGESIZE;
continue;
Lnotfree:
if (list.pool != pool)
list.pool = pool;
gc.free_list[bin] = list;
+ gc.free_mem += binsize[bin];
}
}
}
setStackBottom(rt_stackBottom());
gc.stats = Stats(gc);
if (opts.options.prealloc_npools) {
- size_t pages = (opts.options.prealloc_psize + PAGESIZE - 1) / PAGESIZE;
+ size_t pages = round_up(opts.options.prealloc_psize, PAGESIZE);
for (size_t i = 0; i < opts.options.prealloc_npools; ++i)
newPool(pages);
}
Pool* pool = void;
size_t bit_i = void;
size_t capacity = void; // to figure out where to store the bitmask
+ bool collected = false;
if (bin < B_PAGE)
{
p = gc.free_list[bin];
{
//newPool(1);
}
+ collected = true;
}
if (!gc.free_list[bin] && !allocPage(bin))
{
}
else
{
- p = bigAlloc(size, pool);
+ size_t pn;
+ size_t npages = round_up(size, PAGESIZE);
+ p = bigAlloc(npages, pool, &pn, &collected);
if (!p)
onOutOfMemoryError();
assert (pool !is null);
- // Round the size up to the number of pages needed to store it
- size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
+
capacity = npages * PAGESIZE;
- bit_i = (p - pool.baseAddr) / 16;
+ bit_i = pn * (PAGESIZE / 16);
+ pool.freebits.clear(bit_i);
+ pool.pagetable[pn] = B_PAGE;
+ if (npages > 1)
+ memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
+ p = pool.baseAddr + pn * PAGESIZE;
+ memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
+ if (opts.options.mem_stomp)
+ memset(p, 0xF1, size);
+
}
// Store the bit mask AFTER SENTINEL_POST
assert (bin >= B_PAGE || !pool.freebits.test(bit_i));
}
+ gc.free_mem -= capacity;
+ if (collected) {
+ // If there is not enough free memory, allocate a new pool big enough
+ // to have at least the min_free% of the total heap free. If there is
+ // too much free memory, try to free some empty pools.
+ double percent_free = gc.free_mem * 100.0 / gc.total_mem;
+ if (percent_free < opts.options.min_free) {
+ auto pool_size = gc.total_mem * 1.0 / opts.options.min_free
+ - gc.free_mem;
+ newPool(round_up(cast(size_t)pool_size, PAGESIZE));
+ }
+ else
+ minimize(false);
+ }
+
return p;
}
private void *realloc(void *p, size_t size, uint attrs,
size_t* pm_bitmask)
{
- if (!size)
- {
+ if (!size) {
if (p)
- {
free(p);
- p = null;
- }
+ return null;
}
- else if (!p)
- {
- p = malloc(size, attrs, pm_bitmask);
+
+ if (p is null)
+ return malloc(size, attrs, pm_bitmask);
+
+ Pool* pool = findPool(p);
+ if (pool is null)
+ return null;
+
+ // Set or retrieve attributes as appropriate
+ auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
+ if (attrs) {
+ clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
+ setAttr(pool, bit_i, attrs);
}
else
- {
- Pool* pool = findPool(p);
- if (pool is null)
- return null;
+ attrs = getAttr(pool, bit_i);
- // Set or retrieve attributes as appropriate
- auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
- if (attrs) {
- clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
- setAttr(pool, bit_i, attrs);
- }
- else
- attrs = getAttr(pool, bit_i);
-
- void* blk_base_addr = pool.findBase(p);
- size_t blk_size = pool.findSize(p);
- bool has_pm = has_pointermap(attrs);
- size_t pm_bitmask_size = 0;
- if (has_pm) {
- pm_bitmask_size = size_t.sizeof;
- // Retrieve pointer map bit mask if appropriate
- if (pm_bitmask is null) {
- auto end_of_blk = cast(size_t**)(blk_base_addr +
- blk_size - size_t.sizeof);
- pm_bitmask = *end_of_blk;
- }
+ void* blk_base_addr = pool.findBase(p);
+ size_t blk_size = pool.findSize(p);
+ bool has_pm = has_pointermap(attrs);
+ size_t pm_bitmask_size = 0;
+ if (has_pm) {
+ pm_bitmask_size = size_t.sizeof;
+ // Retrieve pointer map bit mask if appropriate
+ if (pm_bitmask is null) {
+ auto end_of_blk = cast(size_t**)(
+ blk_base_addr + blk_size - size_t.sizeof);
+ pm_bitmask = *end_of_blk;
}
+ }
- if (opts.options.sentinel)
- {
- sentinel_Invariant(p);
- size_t sentinel_stored_size = *sentinel_size(p);
- if (sentinel_stored_size != size)
- {
- void* p2 = malloc(size, attrs, pm_bitmask);
- if (sentinel_stored_size < size)
- size = sentinel_stored_size;
- cstring.memcpy(p2, p, size);
- p = p2;
+ if (opts.options.sentinel) {
+ sentinel_Invariant(p);
+ size_t sentinel_stored_size = *sentinel_size(p);
+ if (sentinel_stored_size != size) {
+ void* p2 = malloc(size, attrs, pm_bitmask);
+ if (sentinel_stored_size < size)
+ size = sentinel_stored_size;
+ cstring.memcpy(p2, p, size);
+ p = p2;
+ }
+ return p;
+ }
+
+ size += pm_bitmask_size;
+ if (blk_size >= PAGESIZE && size >= PAGESIZE) {
+ auto psz = blk_size / PAGESIZE;
+ auto newsz = round_up(size, PAGESIZE);
+ if (newsz == psz)
+ return p;
+
+ auto pagenum = (p - pool.baseAddr) / PAGESIZE;
+
+ if (newsz < psz) {
+ // Shrink in place
+ if (opts.options.mem_stomp)
+ memset(p + size - pm_bitmask_size, 0xF2,
+ blk_size - size - pm_bitmask_size);
+ pool.freePages(pagenum + newsz, psz - newsz);
+ auto new_blk_size = (PAGESIZE * newsz);
+ gc.free_mem += blk_size - new_blk_size;
+ // update the size cache, assuming that is very likely the
+ // size of this block will be queried in the near future
+ pool.update_cache(p, new_blk_size);
+ if (has_pm) {
+ auto end_of_blk = cast(size_t**)(blk_base_addr +
+ new_blk_size - pm_bitmask_size);
+ *end_of_blk = pm_bitmask;
}
+ return p;
}
- else
- {
- size += pm_bitmask_size;
- if (blk_size >= PAGESIZE && size >= PAGESIZE)
- {
- auto psz = blk_size / PAGESIZE;
- auto newsz = (size + PAGESIZE - 1) / PAGESIZE;
- if (newsz == psz)
- return p;
-
- auto pagenum = (p - pool.baseAddr) / PAGESIZE;
- if (newsz < psz)
- {
- // Shrink in place
+ if (pagenum + newsz <= pool.npages) {
+ // Attempt to expand in place
+ for (size_t i = pagenum + psz; 1;) {
+ if (i == pagenum + newsz) {
if (opts.options.mem_stomp)
- memset(p + size - pm_bitmask_size, 0xF2,
- blk_size - size - pm_bitmask_size);
- pool.freePages(pagenum + newsz, psz - newsz);
+ memset(p + blk_size - pm_bitmask_size, 0xF0,
+ size - blk_size - pm_bitmask_size);
+ memset(pool.pagetable + pagenum + psz, B_PAGEPLUS,
+ newsz - psz);
auto new_blk_size = (PAGESIZE * newsz);
- // update the size cache, assuming that is very likely the
- // size of this block will be queried in the near future
+ gc.free_mem -= new_blk_size - blk_size;
+ // update the size cache, assuming that is very
+ // likely the size of this block will be queried in
+ // the near future
pool.update_cache(p, new_blk_size);
if (has_pm) {
- auto end_of_blk = cast(size_t**)(blk_base_addr +
- new_blk_size - pm_bitmask_size);
+ auto end_of_blk = cast(size_t**)(
+ blk_base_addr + new_blk_size - pm_bitmask_size);
*end_of_blk = pm_bitmask;
}
return p;
}
- else if (pagenum + newsz <= pool.npages)
- {
- // Attempt to expand in place
- for (size_t i = pagenum + psz; 1;)
- {
- if (i == pagenum + newsz)
- {
- if (opts.options.mem_stomp)
- memset(p + blk_size - pm_bitmask_size,
- 0xF0, size - blk_size
- - pm_bitmask_size);
- memset(pool.pagetable + pagenum +
- psz, B_PAGEPLUS, newsz - psz);
- auto new_blk_size = (PAGESIZE * newsz);
- // update the size cache, assuming that is very
- // likely the size of this block will be queried in
- // the near future
- pool.update_cache(p, new_blk_size);
- if (has_pm) {
- auto end_of_blk = cast(size_t**)(
- blk_base_addr + new_blk_size -
- pm_bitmask_size);
- *end_of_blk = pm_bitmask;
- }
- return p;
- }
- if (i == pool.npages)
- {
- break;
- }
- if (pool.pagetable[i] != B_FREE)
- break;
- i++;
- }
- }
- }
- // if new size is bigger or less than half
- if (blk_size < size || blk_size > size * 2)
- {
- size -= pm_bitmask_size;
- blk_size -= pm_bitmask_size;
- void* p2 = malloc(size, attrs, pm_bitmask);
- if (blk_size < size)
- size = blk_size;
- cstring.memcpy(p2, p, size);
- p = p2;
+ if (i == pool.npages)
+ break;
+ if (pool.pagetable[i] != B_FREE)
+ break;
+ i++;
}
}
}
+
+ // if new size is bigger or less than half
+ if (blk_size < size || blk_size > size * 2) {
+ size -= pm_bitmask_size;
+ blk_size -= pm_bitmask_size;
+ void* p2 = malloc(size, attrs, pm_bitmask);
+ if (blk_size < size)
+ size = blk_size;
+ cstring.memcpy(p2, p, size);
+ p = p2;
+ }
+
return p;
}
return 0; // cannot extend buckets
auto psz = blk_size / PAGESIZE;
- auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE;
- auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE;
+ auto minsz = round_up(minsize, PAGESIZE);
+ auto maxsz = round_up(maxsize, PAGESIZE);
auto pagenum = (p - pool.baseAddr) / PAGESIZE;
memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
gc.p_cache = null;
gc.size_cache = 0;
+ gc.free_mem -= new_size - blk_size;
// update the size cache, assuming that is very likely the size of this
// block will be queried in the near future
pool.update_cache(p, new_size);
pool.freebits.set_group(bit_i, PAGESIZE / 16);
while (++n < pool.npages && pool.pagetable[n] == B_PAGEPLUS)
npages++;
+ size_t size = npages * PAGESIZE;
if (opts.options.mem_stomp)
- memset(p, 0xF2, npages * PAGESIZE);
+ memset(p, 0xF2, size);
pool.freePages(pagenum, npages);
+ gc.free_mem += size;
// just in case we were caching this pointer
pool.clear_cache(p);
}
list.pool = pool;
gc.free_list[bin] = list;
pool.freebits.set(bit_i);
+ gc.free_mem += binsize[bin];
}
+ double percent_free = gc.free_mem * 100.0 / gc.total_mem;
+ if (percent_free > opts.options.min_free)
+ minimize(false);
}
freebits.set_all();
// avoid accidental sweeping of new pools while using eager allocation
- if (gc.mark_proc_pid)
+ if (collect_in_progress())
mark.set_all();
pagetable = cast(ubyte*) cstdlib.malloc(npages);