return BlkInfo.init;
BlkInfo info;
info.base = pool.findBase(p);
+ if (info.base is null)
+ return BlkInfo.init;
info.size = pool.findSize(info.base);
info.attr = getAttr(pool, cast(size_t)(info.base - pool.baseAddr) / 16u);
if (has_pointermap(info.attr)) {
{
size_t n;
size_t pn;
- Pool* pool;
+ Pool* pool;
for (n = 0; n < gc.pools.length; n++)
{
void **p1 = cast(void **)pbot;
void **p2 = cast(void **)ptop;
size_t pcache = 0;
- uint changes = 0;
+ bool changes = false;
size_t type_size = pm_bitmask[0];
size_t* pm_bits = pm_bitmask + 1;
if (pool)
{
size_t offset = cast(size_t)(p - pool.baseAddr);
- size_t bit_i;
+ size_t bit_i = void;
size_t pn = offset / PAGESIZE;
Bins bin = cast(Bins)pool.pagetable[pn];
+ // Cache B_PAGE, B_PAGEPLUS and B_FREE lookups
+ if (bin >= B_PAGE)
+ pcache = cast(size_t)p & ~(PAGESIZE-1);
+
// Adjust bit to be at start of allocated memory block
if (bin <= B_PAGE)
- bit_i = (offset & notbinsize[bin]) >> 4;
+ bit_i = (offset & notbinsize[bin]) / 16;
else if (bin == B_PAGEPLUS)
{
do
while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
bit_i = pn * (PAGESIZE / 16);
}
- else
- {
- // Don't mark bits in B_FREE pages
+ else // Don't mark bits in B_FREE pages
continue;
- }
-
- if (bin >= B_PAGE) // Cache B_PAGE and B_PAGEPLUS lookups
- pcache = cast(size_t)p & ~(PAGESIZE-1);
if (!pool.mark.test(bit_i))
{
if (!pool.noscan.test(bit_i))
{
pool.scan.set(bit_i);
- changes = 1;
+ changes = true;
}
}
}
{
debug(COLLECT_PRINTF) printf("\tmark()\n");
- gc.p_cache = null;
- gc.size_cache = 0;
-
gc.any_changes = false;
for (size_t n = 0; n < gc.pools.length; n++)
{
{
// Free up everything not marked
debug(COLLECT_PRINTF) printf("\tsweep\n");
+ gc.p_cache = null;
+ gc.size_cache = 0;
size_t freedpages = 0;
size_t freed = 0;
for (size_t n = 0; n < gc.pools.length; n++)
{
if (pool.finals.nbits && pool.finals.testClear(bit_i)) {
if (opts.options.sentinel)
- rt_finalize(cast(List *)sentinel_add(p), false/*gc.no_stack > 0*/);
+ rt_finalize(sentinel_add(p), false/*gc.no_stack > 0*/);
else
- rt_finalize(cast(List *)p, false/*gc.no_stack > 0*/);
+ rt_finalize(p, false/*gc.no_stack > 0*/);
}
clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
- List *list = cast(List *)p;
-
if (opts.options.mem_stomp)
memset(p, 0xF3, size);
}
pool.freebits.set(bit_i);
if (pool.finals.nbits && pool.finals.testClear(bit_i)) {
if (opts.options.sentinel)
- rt_finalize(cast(List *)sentinel_add(p), false/*gc.no_stack > 0*/);
+ rt_finalize(sentinel_add(p), false/*gc.no_stack > 0*/);
else
- rt_finalize(cast(List *)p, false/*gc.no_stack > 0*/);
+ rt_finalize(p, false/*gc.no_stack > 0*/);
}
clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
- List *list = cast(List *)p;
-
if (opts.options.mem_stomp)
memset(p, 0xF3, size);
if (!gc.free_list[bin] && !allocPage(bin))
{
newPool(1); // allocate new pool to find a new page
+ // TODO: hint allocPage() to use the pool we just created
int result = allocPage(bin);
if (!result)
onOutOfMemoryError();
memset(p + size - pm_bitmask_size, 0xF2,
blk_size - size - pm_bitmask_size);
pool.freePages(pagenum + newsz, psz - newsz);
+ auto new_blk_size = (PAGESIZE * newsz);
+ // update the size cache, assuming that is very likely the
+ // size of this block will be queried in the near future
+ pool.update_cache(p, new_blk_size);
if (has_pm) {
- auto end_of_blk = cast(size_t**)(
- blk_base_addr + (PAGESIZE * newsz) -
- pm_bitmask_size);
+ auto end_of_blk = cast(size_t**)(blk_base_addr +
+ new_blk_size - pm_bitmask_size);
*end_of_blk = pm_bitmask;
}
return p;
- pm_bitmask_size);
memset(pool.pagetable + pagenum +
psz, B_PAGEPLUS, newsz - psz);
+ auto new_blk_size = (PAGESIZE * newsz);
+ // update the size cache, assuming that is very
+ // likely the size of this block will be queried in
+ // the near future
+ pool.update_cache(p, new_blk_size);
if (has_pm) {
auto end_of_blk = cast(size_t**)(
- blk_base_addr +
- (PAGESIZE * newsz) -
+ blk_base_addr + new_blk_size -
pm_bitmask_size);
*end_of_blk = pm_bitmask;
}
memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
gc.p_cache = null;
gc.size_cache = 0;
+ // update the size cache, assuming that is very likely the size of this
+ // block will be queried in the near future
+ pool.update_cache(p, new_size);
if (has_pm) {
new_size -= size_t.sizeof;
if (opts.options.mem_stomp)
memset(p, 0xF2, npages * PAGESIZE);
pool.freePages(pagenum, npages);
+ // just in case we were caching this pointer
+ pool.clear_cache(p);
}
else
{
// Add to free list
- List *list = cast(List*)p;
+ List* list = cast(List*) p;
if (opts.options.mem_stomp)
memset(p, 0xF2, binsize[bin]);
if (bin < B_PAGE)
{
// Check that p is not on a free list
- List *list;
-
- for (list = gc.free_list[bin]; list; list = list.next)
+ for (List* list = gc.free_list[bin]; list; list = list.next)
{
assert(cast(void*)list != p);
}
for (n = 0; n < B_PAGE; n++)
{
- for (List *list = gc.free_list[n]; list; list = list.next)
+ for (List* list = gc.free_list[n]; list; list = list.next)
flsize += binsize[n];
}
size_t cached_size;
void* cached_ptr;
- void clear_cache()
+ void clear_cache(void* ptr = null)
+ {
+ if (ptr is null || ptr is this.cached_ptr) {
+ this.cached_ptr = null;
+ this.cached_size = 0;
+ }
+ }
+
+ void update_cache(void* ptr, size_t size)
{
- this.cached_ptr = null;
- this.cached_size = 0;
+ this.cached_ptr = ptr;
+ this.cached_size = size;
}
void initialize(size_t npages)
npages = 0;
poolsize = 0;
}
- //assert(baseAddr);
topAddr = baseAddr + poolsize;
size_t nbits = cast(size_t)poolsize / 16;