summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
fcae284)
The GC can have a lot of pressure if a collection recovers very little
memory, but enough to fulfill the current memory request, causing a lot of
collections with too little gain (when the key to a efficient GC is
recover the bigger amount of memory with as little work as possible).
This effect is greatly reduced when using eager allocation, because
eventually the GC will allocate more memory, but heap minimization can
trigger this effect again.
This patch adds an option, min_free, which specifies the minimum
percentage of heap that should be free after a collection. If the free
heap is less than min_free% of the total heap, the GC will allocate a new
pool, big enough to fulfill that requirement. If the free heap is bigger
than min_free% of the total heap, the GC will try to release some pools to
the OS to keep the heap occupancy near min_free%.
/// max(pool.topAddr)
byte *max_addr;
/// max(pool.topAddr)
byte *max_addr;
+ /// Total heap memory
+ size_t total_mem;
+ /// Free heap memory
+ size_t free_mem;
+
/// Free list for each size
List*[B_MAX] free_list;
/// Free list for each size
List*[B_MAX] free_list;
{
assert (gc !is null);
if (gc.inited) {
{
assert (gc !is null);
if (gc.inited) {
+ size_t total_mem = 0;
+ size_t free_mem = 0;
for (size_t i = 0; i < gc.pools.length; i++) {
Pool* pool = gc.pools[i];
pool.Invariant();
for (size_t i = 0; i < gc.pools.length; i++) {
Pool* pool = gc.pools[i];
pool.Invariant();
assert(*pool < *gc.pools[i + 1]);
else if (i + 1 == gc.pools.length)
assert(gc.max_addr == pool.topAddr);
assert(*pool < *gc.pools[i + 1]);
else if (i + 1 == gc.pools.length)
assert(gc.max_addr == pool.topAddr);
+ total_mem += pool.npages * PAGESIZE;
+ for (size_t pn = 0; pn < pool.npages; ++pn)
+ if (pool.pagetable[pn] == B_FREE)
+ free_mem += PAGESIZE;
assert (p >= pool.baseAddr);
assert (p < pool.topAddr);
assert (pool.freebits.test((p - pool.baseAddr) / 16));
assert (p >= pool.baseAddr);
assert (p < pool.topAddr);
assert (pool.freebits.test((p - pool.baseAddr) / 16));
+ free_mem += binsize[i];
+ assert (gc.total_mem == total_mem);
+ assert (gc.free_mem == free_mem);
/**
* Minimizes physical memory usage by returning free pools to the OS.
/**
* Minimizes physical memory usage by returning free pools to the OS.
+ *
+ * If full is false, keep some pools alive if the resulting free memory would
+ * be too small.
+void minimize(bool full = true)
{
// Disabled if a parallel collection is in progress because the shared mark
// bits of the freed pool might be used by the mark process
{
// Disabled if a parallel collection is in progress because the shared mark
// bits of the freed pool might be used by the mark process
if (gc.pools.length == 0)
return;
if (gc.pools.length == 0)
return;
- size_t n;
- size_t pn;
- Pool* pool;
-
- for (n = 0; n < gc.pools.length; n++)
+ for (size_t n = 0; n < gc.pools.length; n++)
+ Pool* pool = gc.pools[n];
+ size_t pn;
for (pn = 0; pn < pool.npages; pn++)
{
if (cast(Bins)pool.pagetable[pn] != B_FREE)
for (pn = 0; pn < pool.npages; pn++)
{
if (cast(Bins)pool.pagetable[pn] != B_FREE)
}
if (pn < pool.npages)
continue;
}
if (pn < pool.npages)
continue;
+ // Free pool
+ size_t pool_size = pool.npages * PAGESIZE;
+ if (!full) {
+ double percent_free = (gc.free_mem - pool_size) * 100.0 /
+ (gc.total_mem - pool_size);
+ if (percent_free < opts.options.min_free)
+ continue; // not enough free, don't remove this pool
+ }
+ gc.total_mem -= pool_size;
+ gc.free_mem -= pool_size;
pool.Dtor();
cstdlib.free(pool);
gc.pools.remove_at(n);
pool.Dtor();
cstdlib.free(pool);
gc.pools.remove_at(n);
* Allocate a chunk of memory that is larger than a page.
* Return null if out of memory.
*/
* Allocate a chunk of memory that is larger than a page.
* Return null if out of memory.
*/
-void* bigAlloc(size_t npages, out Pool* pool, size_t* pn)
+void* bigAlloc(size_t npages, out Pool* pool, size_t* pn, bool* collected)
// This code could use some refinement when repeatedly
// allocating very large arrays.
// This code could use some refinement when repeatedly
// allocating very large arrays.
- // Release empty pools to prevent bloat
- minimize();
// Allocate new pool
pool = newPool(npages);
if (!pool)
// Allocate new pool
pool = newPool(npages);
if (!pool)
// Try collecting
size_t freedpages = fullcollectshell();
// Try collecting
size_t freedpages = fullcollectshell();
- if (freedpages >= gc.pools.length * ((POOLSIZE / PAGESIZE) / 4)) {
+ *collected = true;
+ if (freedpages >= npages) {
if (void* p = find_block())
return p;
}
if (void* p = find_block())
return p;
}
assert (inserted_pool is pool);
gc.min_addr = gc.pools[0].baseAddr;
gc.max_addr = gc.pools[gc.pools.length - 1].topAddr;
assert (inserted_pool is pool);
gc.min_addr = gc.pools[0].baseAddr;
gc.max_addr = gc.pools[gc.pools.length - 1].topAddr;
+ size_t pool_size = pool.topAddr - pool.baseAddr;
+ gc.total_mem += pool_size;
+ gc.free_mem += pool_size;
debug(COLLECT_PRINTF) printf("\tsweep\n");
gc.p_cache = null;
gc.size_cache = 0;
debug(COLLECT_PRINTF) printf("\tsweep\n");
gc.p_cache = null;
gc.size_cache = 0;
+ gc.free_mem = 0; // will be recalculated
size_t freedpages = 0;
size_t freed = 0;
for (size_t n = 0; n < gc.pools.length; n++)
size_t freedpages = 0;
size_t freed = 0;
for (size_t n = 0; n < gc.pools.length; n++)
pool.pagetable[pn] = B_FREE;
pool.freebits.set_group(bit_i, PAGESIZE / 16);
freedpages++;
pool.pagetable[pn] = B_FREE;
pool.freebits.set_group(bit_i, PAGESIZE / 16);
freedpages++;
+ gc.free_mem += PAGESIZE;
if (opts.options.mem_stomp)
memset(p, 0xF3, PAGESIZE);
while (pn + 1 < pool.npages && pool.pagetable[pn + 1] == B_PAGEPLUS)
if (opts.options.mem_stomp)
memset(p, 0xF3, PAGESIZE);
while (pn + 1 < pool.npages && pool.pagetable[pn + 1] == B_PAGEPLUS)
bit_i += bit_stride;
pool.freebits.set_group(bit_i, PAGESIZE / 16);
freedpages++;
bit_i += bit_stride;
pool.freebits.set_group(bit_i, PAGESIZE / 16);
freedpages++;
+ gc.free_mem += PAGESIZE;
if (opts.options.mem_stomp)
{
if (opts.options.mem_stomp)
{
+ else if (bin == B_FREE) {
+ gc.free_mem += PAGESIZE;
+ }
pool.pagetable[pn] = B_FREE;
pool.freebits.set_group(bit_base, PAGESIZE / 16);
recoveredpages++;
pool.pagetable[pn] = B_FREE;
pool.freebits.set_group(bit_base, PAGESIZE / 16);
recoveredpages++;
+ gc.free_mem += PAGESIZE;
if (list.pool != pool)
list.pool = pool;
gc.free_list[bin] = list;
if (list.pool != pool)
list.pool = pool;
gc.free_list[bin] = list;
+ gc.free_mem += binsize[bin];
Pool* pool = void;
size_t bit_i = void;
size_t capacity = void; // to figure out where to store the bitmask
Pool* pool = void;
size_t bit_i = void;
size_t capacity = void; // to figure out where to store the bitmask
+ bool collected = false;
if (bin < B_PAGE)
{
p = gc.free_list[bin];
if (bin < B_PAGE)
{
p = gc.free_list[bin];
}
if (!gc.free_list[bin] && !allocPage(bin))
{
}
if (!gc.free_list[bin] && !allocPage(bin))
{
{
size_t pn;
size_t npages = round_up(size, PAGESIZE);
{
size_t pn;
size_t npages = round_up(size, PAGESIZE);
- p = bigAlloc(npages, pool, &pn);
+ p = bigAlloc(npages, pool, &pn, &collected);
if (!p)
onOutOfMemoryError();
assert (pool !is null);
if (!p)
onOutOfMemoryError();
assert (pool !is null);
assert (bin >= B_PAGE || !pool.freebits.test(bit_i));
}
assert (bin >= B_PAGE || !pool.freebits.test(bit_i));
}
+ gc.free_mem -= capacity;
+ if (collected) {
+ // If there is not enough free memory, allocate a new pool big enough
+ // to have at least the min_free% of the total heap free. If there is
+ // too much free memory, try to free some empty pools.
+ double percent_free = gc.free_mem * 100.0 / gc.total_mem;
+ if (percent_free < opts.options.min_free) {
+ auto pool_size = gc.total_mem * 1.0 / opts.options.min_free
+ - gc.free_mem;
+ newPool(round_up(cast(size_t)pool_size, PAGESIZE));
+ }
+ else
+ minimize(false);
+ }
+
blk_size - size - pm_bitmask_size);
pool.freePages(pagenum + newsz, psz - newsz);
auto new_blk_size = (PAGESIZE * newsz);
blk_size - size - pm_bitmask_size);
pool.freePages(pagenum + newsz, psz - newsz);
auto new_blk_size = (PAGESIZE * newsz);
+ gc.free_mem += blk_size - new_blk_size;
// update the size cache, assuming that is very likely the
// size of this block will be queried in the near future
pool.update_cache(p, new_blk_size);
// update the size cache, assuming that is very likely the
// size of this block will be queried in the near future
pool.update_cache(p, new_blk_size);
memset(pool.pagetable + pagenum +
psz, B_PAGEPLUS, newsz - psz);
auto new_blk_size = (PAGESIZE * newsz);
memset(pool.pagetable + pagenum +
psz, B_PAGEPLUS, newsz - psz);
auto new_blk_size = (PAGESIZE * newsz);
+ gc.free_mem -= new_blk_size - blk_size;
// update the size cache, assuming that is very
// likely the size of this block will be queried in
// the near future
// update the size cache, assuming that is very
// likely the size of this block will be queried in
// the near future
memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
gc.p_cache = null;
gc.size_cache = 0;
memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
gc.p_cache = null;
gc.size_cache = 0;
+ gc.free_mem -= new_size - blk_size;
// update the size cache, assuming that is very likely the size of this
// block will be queried in the near future
pool.update_cache(p, new_size);
// update the size cache, assuming that is very likely the size of this
// block will be queried in the near future
pool.update_cache(p, new_size);
pool.freebits.set_group(bit_i, PAGESIZE / 16);
while (++n < pool.npages && pool.pagetable[n] == B_PAGEPLUS)
npages++;
pool.freebits.set_group(bit_i, PAGESIZE / 16);
while (++n < pool.npages && pool.pagetable[n] == B_PAGEPLUS)
npages++;
+ size_t size = npages * PAGESIZE;
if (opts.options.mem_stomp)
if (opts.options.mem_stomp)
- memset(p, 0xF2, npages * PAGESIZE);
pool.freePages(pagenum, npages);
pool.freePages(pagenum, npages);
// just in case we were caching this pointer
pool.clear_cache(p);
}
// just in case we were caching this pointer
pool.clear_cache(p);
}
list.pool = pool;
gc.free_list[bin] = list;
pool.freebits.set(bit_i);
list.pool = pool;
gc.free_list[bin] = list;
pool.freebits.set(bit_i);
+ gc.free_mem += binsize[bin];
+ double percent_free = gc.free_mem * 100.0 / gc.total_mem;
+ if (percent_free > opts.options.min_free)
+ minimize(false);
bool conservative = false;
bool fork = true;
bool eager_alloc = true;
bool conservative = false;
bool fork = true;
bool eager_alloc = true;
+ uint min_free = 15; // percent of the heap (0-100)
size_t prealloc_psize = 0;
size_t prealloc_npools = 0;
}
size_t prealloc_psize = 0;
size_t prealloc_npools = 0;
}
+void parse_min_free(char* value)
+{
+ char* end;
+ long free = cstdlib.strtol(value, &end, 10);
+ if (*end != '\0' || end == value || cerrno.errno || free < 0 || free > 100)
+ return;
+ options.min_free = free;
+}
+
+
void process_option(char* opt_name, char* opt_value)
{
if (cstr_eq(opt_name, "verbose"))
void process_option(char* opt_name, char* opt_value)
{
if (cstr_eq(opt_name, "verbose"))
options.fork = parse_bool(opt_value);
else if (cstr_eq(opt_name, "eager_alloc"))
options.eager_alloc = parse_bool(opt_value);
options.fork = parse_bool(opt_value);
else if (cstr_eq(opt_name, "eager_alloc"))
options.eager_alloc = parse_bool(opt_value);
+ else if (cstr_eq(opt_name, "min_free"))
+ parse_min_free(opt_value);
else if (cstr_eq(opt_name, "pre_alloc"))
parse_prealloc(opt_value);
}
else if (cstr_eq(opt_name, "pre_alloc"))
parse_prealloc(opt_value);
}
assert (eager_alloc == true);
assert (prealloc_psize == 0);
assert (prealloc_npools == 0);
assert (eager_alloc == true);
assert (prealloc_psize == 0);
assert (prealloc_npools == 0);
+ assert (min_free == 15);
}
parse("mem_stomp");
with (options) {
}
parse("mem_stomp");
with (options) {
assert (eager_alloc == true);
assert (prealloc_psize == 0);
assert (prealloc_npools == 0);
assert (eager_alloc == true);
assert (prealloc_psize == 0);
assert (prealloc_npools == 0);
+ assert (min_free == 15);
}
parse("mem_stomp=0:verbose=2:conservative:fork=0:eager_alloc=0");
with (options) {
}
parse("mem_stomp=0:verbose=2:conservative:fork=0:eager_alloc=0");
with (options) {
assert (eager_alloc == false);
assert (prealloc_psize == 0);
assert (prealloc_npools == 0);
assert (eager_alloc == false);
assert (prealloc_psize == 0);
assert (prealloc_npools == 0);
+ assert (min_free == 15);
}
parse("log_file=12345 67890:verbose=1:sentinel=4:mem_stomp=1");
with (options) {
}
parse("log_file=12345 67890:verbose=1:sentinel=4:mem_stomp=1");
with (options) {
assert (prealloc_psize == 0);
assert (prealloc_npools == 0);
}
assert (prealloc_psize == 0);
assert (prealloc_npools == 0);
}
+ parse("pre_alloc:min_free=30");
with (options) {
assert (verbose == 1);
assert (cstring.strcmp(log_file.ptr, "12345 67890".ptr) == 0);
with (options) {
assert (verbose == 1);
assert (cstring.strcmp(log_file.ptr, "12345 67890".ptr) == 0);
assert (eager_alloc == false);
assert (prealloc_psize == 0);
assert (prealloc_npools == 0);
assert (eager_alloc == false);
assert (prealloc_psize == 0);
assert (prealloc_npools == 0);
+ assert (min_free == 30);
}
parse("pre_alloc=1");
with (options) {
}
parse("pre_alloc=1");
with (options) {
assert (eager_alloc == false);
assert (prealloc_psize == 1 * 1024 * 1024);
assert (prealloc_npools == 1);
assert (eager_alloc == false);
assert (prealloc_psize == 1 * 1024 * 1024);
assert (prealloc_npools == 1);
+ assert (min_free == 30);
+ parse("pre_alloc=5a:min_free=101");
with (options) {
assert (verbose == 1);
assert (cstring.strcmp(log_file.ptr, "12345 67890".ptr) == 0);
with (options) {
assert (verbose == 1);
assert (cstring.strcmp(log_file.ptr, "12345 67890".ptr) == 0);
assert (eager_alloc == false);
assert (prealloc_psize == 1 * 1024 * 1024);
assert (prealloc_npools == 1);
assert (eager_alloc == false);
assert (prealloc_psize == 1 * 1024 * 1024);
assert (prealloc_npools == 1);
+ assert (min_free == 30);
+ parse("pre_alloc=5x:min_free=-1");
with (options) {
assert (verbose == 1);
assert (cstring.strcmp(log_file.ptr, "12345 67890".ptr) == 0);
with (options) {
assert (verbose == 1);
assert (cstring.strcmp(log_file.ptr, "12345 67890".ptr) == 0);
assert (eager_alloc == false);
assert (prealloc_psize == 1 * 1024 * 1024);
assert (prealloc_npools == 1);
assert (eager_alloc == false);
assert (prealloc_psize == 1 * 1024 * 1024);
assert (prealloc_npools == 1);
+ assert (min_free == 30);
- parse("pre_alloc=09x010");
+ parse("pre_alloc=09x010:min_free=10a");
with (options) {
assert (verbose == 1);
assert (cstring.strcmp(log_file.ptr, "12345 67890".ptr) == 0);
with (options) {
assert (verbose == 1);
assert (cstring.strcmp(log_file.ptr, "12345 67890".ptr) == 0);
assert (eager_alloc == false);
assert (prealloc_psize == 9 * 1024 * 1024);
assert (prealloc_npools == 10);
assert (eager_alloc == false);
assert (prealloc_psize == 9 * 1024 * 1024);
assert (prealloc_npools == 10);
+ assert (min_free == 30);
- parse("pre_alloc=5x2");
+ parse("pre_alloc=5x2:min_free=1.0");
with (options) {
assert (verbose == 1);
assert (cstring.strcmp(log_file.ptr, "12345 67890".ptr) == 0);
with (options) {
assert (verbose == 1);
assert (cstring.strcmp(log_file.ptr, "12345 67890".ptr) == 0);
assert (eager_alloc == false);
assert (prealloc_psize == 5 * 1024 * 1024);
assert (prealloc_npools == 2);
assert (eager_alloc == false);
assert (prealloc_psize == 5 * 1024 * 1024);
assert (prealloc_npools == 2);
+ assert (min_free == 30);
- parse("pre_alloc=9x5x");
+ parse("pre_alloc=9x5x:min_free=-1");
with (options) {
assert (verbose == 1);
assert (cstring.strcmp(log_file.ptr, "12345 67890".ptr) == 0);
with (options) {
assert (verbose == 1);
assert (cstring.strcmp(log_file.ptr, "12345 67890".ptr) == 0);
assert (eager_alloc == false);
assert (prealloc_psize == 5 * 1024 * 1024);
assert (prealloc_npools == 2);
assert (eager_alloc == false);
assert (prealloc_psize == 5 * 1024 * 1024);
assert (prealloc_npools == 2);
+ assert (min_free == 30);
- parse("pre_alloc=9x-5");
+ parse("pre_alloc=9x-5:min_free=0");
with (options) {
assert (verbose == 1);
assert (cstring.strcmp(log_file.ptr, "12345 67890".ptr) == 0);
with (options) {
assert (verbose == 1);
assert (cstring.strcmp(log_file.ptr, "12345 67890".ptr) == 0);
assert (eager_alloc == false);
assert (prealloc_psize == 5 * 1024 * 1024);
assert (prealloc_npools == 2);
assert (eager_alloc == false);
assert (prealloc_psize == 5 * 1024 * 1024);
assert (prealloc_npools == 2);
+ assert (min_free == 0);
- parse("pre_alloc=0x3x0x4");
+ parse("pre_alloc=0x3x0x4:min_free=100");
with (options) {
assert (verbose == 1);
assert (cstring.strcmp(log_file.ptr, "12345 67890".ptr) == 0);
with (options) {
assert (verbose == 1);
assert (cstring.strcmp(log_file.ptr, "12345 67890".ptr) == 0);
assert (eager_alloc == false);
assert (prealloc_psize == 5 * 1024 * 1024);
assert (prealloc_npools == 2);
assert (eager_alloc == false);
assert (prealloc_psize == 5 * 1024 * 1024);
assert (prealloc_npools == 2);
+ assert (min_free == 100);
}
parse(null);
with (options) {
}
parse(null);
with (options) {
assert (eager_alloc == false);
assert (prealloc_psize == 5 * 1024 * 1024);
assert (prealloc_npools == 2);
assert (eager_alloc == false);
assert (prealloc_psize == 5 * 1024 * 1024);
assert (prealloc_npools == 2);
+ assert (min_free == 100);
}
parse("");
with (options) {
}
parse("");
with (options) {
assert (eager_alloc == false);
assert (prealloc_psize == 5 * 1024 * 1024);
assert (prealloc_npools == 2);
assert (eager_alloc == false);
assert (prealloc_psize == 5 * 1024 * 1024);
assert (prealloc_npools == 2);
+ assert (min_free == 100);
}
parse(":");
with (options) {
}
parse(":");
with (options) {
assert (eager_alloc == false);
assert (prealloc_psize == 5 * 1024 * 1024);
assert (prealloc_npools == 2);
assert (eager_alloc == false);
assert (prealloc_psize == 5 * 1024 * 1024);
assert (prealloc_npools == 2);
+ assert (min_free == 100);
}
parse("::::");
with (options) {
}
parse("::::");
with (options) {
assert (eager_alloc == false);
assert (prealloc_psize == 5 * 1024 * 1024);
assert (prealloc_npools == 2);
assert (eager_alloc == false);
assert (prealloc_psize == 5 * 1024 * 1024);
assert (prealloc_npools == 2);
+ assert (min_free == 100);