X-Git-Url: https://git.llucax.com/software/dgc/cdgc.git/blobdiff_plain/b28fd72842fc9ce935bed74f7b2ba79f9cc59711..e04cd7f5a7fd412bb98f9631fde20b7856de58dd:/rt/gc/cdgc/gc.d?ds=sidebyside diff --git a/rt/gc/cdgc/gc.d b/rt/gc/cdgc/gc.d index 78f673c..e53d5d1 100644 --- a/rt/gc/cdgc/gc.d +++ b/rt/gc/cdgc/gc.d @@ -155,7 +155,8 @@ alias ubyte Bins; struct List { - List *next; + List* next; + Pool* pool; } @@ -210,7 +211,7 @@ struct GC dynarray.DynArray!(void*) roots; dynarray.DynArray!(Range) ranges; - dynarray.DynArray!(Pool) pools; + dynarray.DynArray!(Pool*) pools; Stats stats; } @@ -236,7 +237,7 @@ bool Invariant() if (i == 0) assert(gc.min_addr == pool.baseAddr); if (i + 1 < gc.pools.length) - assert(*pool < gc.pools[i + 1]); + assert(*pool < *gc.pools[i + 1]); else if (i + 1 == gc.pools.length) assert(gc.max_addr == pool.topAddr); } @@ -250,10 +251,14 @@ bool Invariant() assert(gc.ranges[i].pbot <= gc.ranges[i].ptop); } - for (size_t i = 0; i < B_PAGE; i++) - for (List *list = gc.free_list[i]; list; list = list.next) - { + for (size_t i = 0; i < B_PAGE; i++) { + for (List *list = gc.free_list[i]; list; list = list.next) { + assert (list.pool !is null); + auto p = cast(byte*) list; + assert (p >= list.pool.baseAddr); + assert (p < list.pool.topAddr); } + } } return true; } @@ -393,7 +398,7 @@ void minimize() { size_t n; size_t pn; - Pool* pool; + Pool* pool; for (n = 0; n < gc.pools.length; n++) { @@ -406,6 +411,7 @@ void minimize() if (pn < pool.npages) continue; pool.Dtor(); + cstdlib.free(pool); gc.pools.remove_at(n); n--; } @@ -418,9 +424,8 @@ void minimize() * Allocate a chunk of memory that is larger than a page. * Return null if out of memory. */ -void *bigAlloc(size_t size) +void* bigAlloc(size_t size, out Pool* pool) { - Pool* pool; size_t npages; size_t n; size_t pn; @@ -532,20 +537,24 @@ Pool *newPool(size_t npages) npages = n; } - Pool p; - p.initialize(npages); - if (!p.baseAddr) + auto pool = cast(Pool*) cstdlib.calloc(1, Pool.sizeof); + if (pool is null) + return null; + pool.initialize(npages); + if (!pool.baseAddr) { - p.Dtor(); + pool.Dtor(); return null; } - Pool* pool = gc.pools.insert_sorted(p); - if (pool) - { - gc.min_addr = gc.pools[0].baseAddr; - gc.max_addr = gc.pools[gc.pools.length - 1].topAddr; + auto inserted_pool = *gc.pools.insert_sorted!("*a < *b")(pool); + if (inserted_pool is null) { + pool.Dtor(); + return null; } + assert (inserted_pool is pool); + gc.min_addr = gc.pools[0].baseAddr; + gc.max_addr = gc.pools[gc.pools.length - 1].topAddr; return pool; } @@ -577,14 +586,16 @@ int allocPage(Bins bin) // Convert page to free list size_t size = binsize[bin]; - List **b = &gc.free_list[bin]; + auto list_head = &gc.free_list[bin]; p = pool.baseAddr + pn * PAGESIZE; ptop = p + PAGESIZE; for (; p < ptop; p += size) { - (cast(List *)p).next = *b; - *b = cast(List *)p; + List* l = cast(List *) p; + l.next = *list_head; + l.pool = pool; + *list_head = l; } return 1; } @@ -604,7 +615,7 @@ void mark_range(void *pbot, void *ptop, size_t* pm_bitmask) void **p1 = cast(void **)pbot; void **p2 = cast(void **)ptop; size_t pcache = 0; - uint changes = 0; + bool changes = false; size_t type_size = pm_bitmask[0]; size_t* pm_bits = pm_bitmask + 1; @@ -630,13 +641,17 @@ void mark_range(void *pbot, void *ptop, size_t* pm_bitmask) if (pool) { size_t offset = cast(size_t)(p - pool.baseAddr); - size_t bit_i; + size_t bit_i = void; size_t pn = offset / PAGESIZE; Bins bin = cast(Bins)pool.pagetable[pn]; + // Cache B_PAGE, B_PAGEPLUS and B_FREE lookups + if (bin >= B_PAGE) + pcache = cast(size_t)p & ~(PAGESIZE-1); + // Adjust bit to be at start of allocated memory block if (bin <= B_PAGE) - bit_i = (offset & notbinsize[bin]) >> 4; + bit_i = (offset & notbinsize[bin]) / 16; else if (bin == B_PAGEPLUS) { do @@ -646,14 +661,8 @@ void mark_range(void *pbot, void *ptop, size_t* pm_bitmask) while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS); bit_i = pn * (PAGESIZE / 16); } - else - { - // Don't mark bits in B_FREE pages + else // Don't mark bits in B_FREE pages continue; - } - - if (bin >= B_PAGE) // Cache B_PAGE and B_PAGEPLUS lookups - pcache = cast(size_t)p & ~(PAGESIZE-1); if (!pool.mark.test(bit_i)) { @@ -661,7 +670,7 @@ void mark_range(void *pbot, void *ptop, size_t* pm_bitmask) if (!pool.noscan.test(bit_i)) { pool.scan.set(bit_i); - changes = 1; + changes = true; } } } @@ -830,9 +839,13 @@ void mark(void *stackTop) { for (List *list = gc.free_list[n]; list; list = list.next) { - Pool* pool = findPool(list); - assert(pool); - pool.freebits.set(cast(size_t)(cast(byte*)list - pool.baseAddr) / 16); + Pool* pool = list.pool; + auto ptr = cast(byte*) list; + assert (pool); + assert (pool.baseAddr <= ptr); + assert (ptr < pool.topAddr); + size_t bit_i = cast(size_t)(ptr - pool.baseAddr) / 16; + pool.freebits.set(bit_i); } } @@ -988,14 +1001,12 @@ version(none) // BUG: doesn't work because freebits() must also be cleared { if (pool.finals.nbits && pool.finals.testClear(bit_i)) { if (opts.options.sentinel) - rt_finalize(cast(List *)sentinel_add(p), false/*gc.no_stack > 0*/); + rt_finalize(sentinel_add(p), false/*gc.no_stack > 0*/); else - rt_finalize(cast(List *)p, false/*gc.no_stack > 0*/); + rt_finalize(p, false/*gc.no_stack > 0*/); } clrAttr(pool, bit_i, BlkAttr.ALL_BITS); - List *list = cast(List *)p; - if (opts.options.mem_stomp) memset(p, 0xF3, size); } @@ -1014,14 +1025,12 @@ version(none) // BUG: doesn't work because freebits() must also be cleared pool.freebits.set(bit_i); if (pool.finals.nbits && pool.finals.testClear(bit_i)) { if (opts.options.sentinel) - rt_finalize(cast(List *)sentinel_add(p), false/*gc.no_stack > 0*/); + rt_finalize(sentinel_add(p), false/*gc.no_stack > 0*/); else - rt_finalize(cast(List *)p, false/*gc.no_stack > 0*/); + rt_finalize(p, false/*gc.no_stack > 0*/); } clrAttr(pool, bit_i, BlkAttr.ALL_BITS); - List *list = cast(List *)p; - if (opts.options.mem_stomp) memset(p, 0xF3, size); @@ -1107,10 +1116,14 @@ version(none) // BUG: doesn't work because freebits() must also be cleared bit_i = bit_base + u / 16; if (pool.freebits.test(bit_i)) { - List *list = cast(List *)(p + u); - // avoid unnecessary writes + assert ((p+u) >= pool.baseAddr); + assert ((p+u) < pool.topAddr); + List* list = cast(List*) (p + u); + // avoid unnecesary writes (it really saves time) if (list.next != gc.free_list[bin]) list.next = gc.free_list[bin]; + if (list.pool != pool) + list.pool = pool; gc.free_list[bin] = list; } } @@ -1247,7 +1260,8 @@ private void *malloc(size_t size, uint attrs, size_t* pm_bitmask) lastbin = bin; } - size_t capacity; // to figure out where to store the bitmask + Pool* pool = void; + size_t capacity = void; // to figure out where to store the bitmask if (bin < B_PAGE) { p = gc.free_list[bin]; @@ -1274,6 +1288,7 @@ private void *malloc(size_t size, uint attrs, size_t* pm_bitmask) if (!gc.free_list[bin] && !allocPage(bin)) { newPool(1); // allocate new pool to find a new page + // TODO: hint allocPage() to use the pool we just created int result = allocPage(bin); if (!result) onOutOfMemoryError(); @@ -1283,7 +1298,11 @@ private void *malloc(size_t size, uint attrs, size_t* pm_bitmask) capacity = binsize[bin]; // Return next item from free list - gc.free_list[bin] = (cast(List*)p).next; + List* list = cast(List*) p; + assert ((cast(byte*)list) >= list.pool.baseAddr); + assert ((cast(byte*)list) < list.pool.topAddr); + gc.free_list[bin] = list.next; + pool = list.pool; if (!(attrs & BlkAttr.NO_SCAN)) memset(p + size, 0, capacity - size); if (opts.options.mem_stomp) @@ -1291,9 +1310,10 @@ private void *malloc(size_t size, uint attrs, size_t* pm_bitmask) } else { - p = bigAlloc(size); + p = bigAlloc(size, pool); if (!p) onOutOfMemoryError(); + assert (pool !is null); // Round the size up to the number of pages needed to store it size_t npages = (size + PAGESIZE - 1) / PAGESIZE; capacity = npages * PAGESIZE; @@ -1314,12 +1334,8 @@ private void *malloc(size_t size, uint attrs, size_t* pm_bitmask) } if (attrs) - { - Pool *pool = findPool(p); - assert(pool); - setAttr(pool, cast(size_t)(p - pool.baseAddr) / 16, attrs); - } + return p; } @@ -1416,10 +1432,13 @@ private void *realloc(void *p, size_t size, uint attrs, memset(p + size - pm_bitmask_size, 0xF2, blk_size - size - pm_bitmask_size); pool.freePages(pagenum + newsz, psz - newsz); + auto new_blk_size = (PAGESIZE * newsz); + // update the size cache, assuming that is very likely the + // size of this block will be queried in the near future + pool.update_cache(p, new_blk_size); if (has_pm) { - auto end_of_blk = cast(size_t**)( - blk_base_addr + (PAGESIZE * newsz) - - pm_bitmask_size); + auto end_of_blk = cast(size_t**)(blk_base_addr + + new_blk_size - pm_bitmask_size); *end_of_blk = pm_bitmask; } return p; @@ -1437,10 +1456,14 @@ private void *realloc(void *p, size_t size, uint attrs, - pm_bitmask_size); memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, newsz - psz); + auto new_blk_size = (PAGESIZE * newsz); + // update the size cache, assuming that is very + // likely the size of this block will be queried in + // the near future + pool.update_cache(p, new_blk_size); if (has_pm) { auto end_of_blk = cast(size_t**)( - blk_base_addr + - (PAGESIZE * newsz) - + blk_base_addr + new_blk_size - pm_bitmask_size); *end_of_blk = pm_bitmask; } @@ -1549,6 +1572,9 @@ body memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz); gc.p_cache = null; gc.size_cache = 0; + // update the size cache, assuming that is very likely the size of this + // block will be queried in the near future + pool.update_cache(p, new_size); if (has_pm) { new_size -= size_t.sizeof; @@ -1594,16 +1620,19 @@ private void free(void *p) if (opts.options.mem_stomp) memset(p, 0xF2, npages * PAGESIZE); pool.freePages(pagenum, npages); + // just in case we were caching this pointer + pool.clear_cache(p); } else { // Add to free list - List *list = cast(List*)p; + List* list = cast(List*) p; if (opts.options.mem_stomp) memset(p, 0xF2, binsize[bin]); list.next = gc.free_list[bin]; + list.pool = pool; gc.free_list[bin] = list; } } @@ -1694,9 +1723,7 @@ private void checkNoSync(void *p) if (bin < B_PAGE) { // Check that p is not on a free list - List *list; - - for (list = gc.free_list[bin]; list; list = list.next) + for (List* list = gc.free_list[bin]; list; list = list.next) { assert(cast(void*)list != p); } @@ -1762,7 +1789,7 @@ private GCStats getStats() for (n = 0; n < B_PAGE; n++) { - for (List *list = gc.free_list[n]; list; list = list.next) + for (List* list = gc.free_list[n]; list; list = list.next) flsize += binsize[n]; } @@ -1873,10 +1900,18 @@ struct Pool size_t cached_size; void* cached_ptr; - void clear_cache() + void clear_cache(void* ptr = null) + { + if (ptr is null || ptr is this.cached_ptr) { + this.cached_ptr = null; + this.cached_size = 0; + } + } + + void update_cache(void* ptr, size_t size) { - this.cached_ptr = null; - this.cached_size = 0; + this.cached_ptr = ptr; + this.cached_size = size; } void initialize(size_t npages) @@ -1893,7 +1928,6 @@ struct Pool npages = 0; poolsize = 0; } - //assert(baseAddr); topAddr = baseAddr + poolsize; size_t nbits = cast(size_t)poolsize / 16; @@ -2093,8 +2127,9 @@ void sentinel_init(void *p, size_t size) void sentinel_Invariant(void *p) { - assert(*sentinel_pre(p) == SENTINEL_PRE); - assert(*sentinel_post(p) == SENTINEL_POST); + if (*sentinel_pre(p) != SENTINEL_PRE || + *sentinel_post(p) != SENTINEL_POST) + cstdlib.abort(); }