From 62b4073393f1d66e88322376627c8bef7b27f81d Mon Sep 17 00:00:00 2001 From: Leandro Lucarella Date: Fri, 27 Aug 2010 23:45:02 -0300 Subject: [PATCH] Store a pointer to the pool in the free_list Since the smallest bin size is big enough to store 2 pointers, the free list is constructed storing the pointer to the pool the bin belongs to, so we don't have to find the pool when operating with the free bin. To make this work, the pools have to be stored outside the DynArray (and store there only pointers) because when inserting sorted, moved pool addresses are changed, and the stored pool pointer in the free list break. --- rt/gc/cdgc/dynarray.d | 44 ++++++++++++++-------- rt/gc/cdgc/gc.d | 88 +++++++++++++++++++++++++++---------------- 2 files changed, 83 insertions(+), 49 deletions(-) diff --git a/rt/gc/cdgc/dynarray.d b/rt/gc/cdgc/dynarray.d index 1995b77..f3aaf8a 100644 --- a/rt/gc/cdgc/dynarray.d +++ b/rt/gc/cdgc/dynarray.d @@ -102,12 +102,14 @@ public: /** * Access an element by index. * - * A pointer is returned so the real element can be modified in place. + * Bear in mind that a copy of the element is returned. If you need + * a pointer, you can always get it through a.ptr + i (but be careful if + * you use the insert_sorted() method). */ - T* opIndex(size_t i) + T opIndex(size_t i) { assert (i < this._size); - return this._data + i; + return this._data[i]; } /** @@ -115,9 +117,9 @@ public: * * This can trigger an allocation if the array is not big enough. * - * Returns true if the append was successful, false otherwise (i.e. an - * allocation was triggered but the allocation failed) in which case the - * internal state is not changed. + * Returns a pointer to the newly appended element if the append was + * successful, null otherwise (i.e. an allocation was triggered but the + * allocation failed) in which case the internal state is not changed. */ T* append(in T x) { @@ -130,25 +132,35 @@ public: } /** - * Append a copy of the element x at the end of the array. + * Insert an element preserving the array sorted. * - * This can trigger an allocation if the array is not big enough. + * This assumes the array was previously sorted. The "cmp" template + * argument can be used to specify a custom comparison expression as "a" + * string (where a is the element in the array and "b" is the element + * passed as a parameter "x"). Using a template to specify the comparison + * method is a hack to cope with compilers that have trouble inlining + * a delegate (i.e. DMD). + * + * This can trigger an allocation if the array is not big enough and moves + * memory around, so you have to be specially careful if you are taking + * pointers into the array's data. * - * Returns true if the append was successful, false otherwise (i.e. an - * allocation was triggered but the allocation failed) in which case the - * internal state is not changed. + * Returns a pointer to the newly inserted element if the append was + * successful, null otherwise (i.e. an allocation was triggered but the + * allocation failed) in which case the internal state is not changed. */ - T* insert_sorted(in T x) + T* insert_sorted(char[] cmp = "a < b")(in T x) { size_t i = 0; for (; i < this._size; i++) { - if (this._data[i] < x) + T a = this._data[i]; + alias x b; + if (mixin(cmp)) continue; break; } - if (this._size == this._capacity) - if (!this.resize()) - return null; + if ((this._size == this._capacity) && !this.resize()) + return null; memmove(this._data + i + 1, this._data + i, (this._size - i) * T.sizeof); this._data[i] = x; diff --git a/rt/gc/cdgc/gc.d b/rt/gc/cdgc/gc.d index 1afe5a0..151d28b 100644 --- a/rt/gc/cdgc/gc.d +++ b/rt/gc/cdgc/gc.d @@ -155,7 +155,8 @@ alias ubyte Bins; struct List { - List *next; + List* next; + Pool* pool; } @@ -210,7 +211,7 @@ struct GC dynarray.DynArray!(void*) roots; dynarray.DynArray!(Range) ranges; - dynarray.DynArray!(Pool) pools; + dynarray.DynArray!(Pool*) pools; Stats stats; } @@ -236,7 +237,7 @@ bool Invariant() if (i == 0) assert(gc.min_addr == pool.baseAddr); if (i + 1 < gc.pools.length) - assert(*pool < gc.pools[i + 1]); + assert(*pool < *gc.pools[i + 1]); else if (i + 1 == gc.pools.length) assert(gc.max_addr == pool.topAddr); } @@ -250,10 +251,14 @@ bool Invariant() assert(gc.ranges[i].pbot <= gc.ranges[i].ptop); } - for (size_t i = 0; i < B_PAGE; i++) - for (List *list = gc.free_list[i]; list; list = list.next) - { + for (size_t i = 0; i < B_PAGE; i++) { + for (List *list = gc.free_list[i]; list; list = list.next) { + assert (list.pool !is null); + auto p = cast(byte*) list; + assert (p >= list.pool.baseAddr); + assert (p < list.pool.topAddr); } + } } return true; } @@ -406,6 +411,7 @@ void minimize() if (pn < pool.npages) continue; pool.Dtor(); + cstdlib.free(pool); gc.pools.remove_at(n); n--; } @@ -418,9 +424,8 @@ void minimize() * Allocate a chunk of memory that is larger than a page. * Return null if out of memory. */ -void *bigAlloc(size_t size) +void* bigAlloc(size_t size, out Pool* pool) { - Pool* pool; size_t npages; size_t n; size_t pn; @@ -532,20 +537,24 @@ Pool *newPool(size_t npages) npages = n; } - Pool p; - p.initialize(npages); - if (!p.baseAddr) + auto pool = cast(Pool*) cstdlib.calloc(1, Pool.sizeof); + if (pool is null) + return null; + pool.initialize(npages); + if (!pool.baseAddr) { - p.Dtor(); + pool.Dtor(); return null; } - Pool* pool = gc.pools.insert_sorted(p); - if (pool) - { - gc.min_addr = gc.pools[0].baseAddr; - gc.max_addr = gc.pools[gc.pools.length - 1].topAddr; + auto inserted_pool = *gc.pools.insert_sorted!("*a < *b")(pool); + if (inserted_pool is null) { + pool.Dtor(); + return null; } + assert (inserted_pool is pool); + gc.min_addr = gc.pools[0].baseAddr; + gc.max_addr = gc.pools[gc.pools.length - 1].topAddr; return pool; } @@ -577,14 +586,16 @@ int allocPage(Bins bin) // Convert page to free list size_t size = binsize[bin]; - List **b = &gc.free_list[bin]; + auto list_head = &gc.free_list[bin]; p = pool.baseAddr + pn * PAGESIZE; ptop = p + PAGESIZE; for (; p < ptop; p += size) { - (cast(List *)p).next = *b; - *b = cast(List *)p; + List* l = cast(List *) p; + l.next = *list_head; + l.pool = pool; + *list_head = l; } return 1; } @@ -830,9 +841,13 @@ void mark(void *stackTop) { for (List *list = gc.free_list[n]; list; list = list.next) { - Pool* pool = findPool(list); - assert(pool); - pool.freebits.set(cast(size_t)(cast(byte*)list - pool.baseAddr) / 16); + Pool* pool = list.pool; + auto ptr = cast(byte*) list; + assert (pool); + assert (pool.baseAddr <= ptr); + assert (ptr < pool.topAddr); + size_t bit_i = cast(size_t)(ptr - pool.baseAddr) / 16; + pool.freebits.set(bit_i); } } @@ -1107,10 +1122,14 @@ version(none) // BUG: doesn't work because freebits() must also be cleared bit_i = bit_base + u / 16; if (pool.freebits.test(bit_i)) { - List *list = cast(List *)(p + u); - // avoid unnecessary writes + assert ((p+u) >= pool.baseAddr); + assert ((p+u) < pool.topAddr); + List* list = cast(List*) (p + u); + // avoid unnecesary writes (it really saves time) if (list.next != gc.free_list[bin]) list.next = gc.free_list[bin]; + if (list.pool != pool) + list.pool = pool; gc.free_list[bin] = list; } } @@ -1247,7 +1266,8 @@ private void *malloc(size_t size, uint attrs, size_t* pm_bitmask) lastbin = bin; } - size_t capacity; // to figure out where to store the bitmask + Pool* pool = void; + size_t capacity = void; // to figure out where to store the bitmask if (bin < B_PAGE) { p = gc.free_list[bin]; @@ -1283,7 +1303,11 @@ private void *malloc(size_t size, uint attrs, size_t* pm_bitmask) capacity = binsize[bin]; // Return next item from free list - gc.free_list[bin] = (cast(List*)p).next; + List* list = cast(List*) p; + assert ((cast(byte*)list) >= list.pool.baseAddr); + assert ((cast(byte*)list) < list.pool.topAddr); + gc.free_list[bin] = list.next; + pool = list.pool; if (!(attrs & BlkAttr.NO_SCAN)) memset(p + size, 0, capacity - size); if (opts.options.mem_stomp) @@ -1291,9 +1315,10 @@ private void *malloc(size_t size, uint attrs, size_t* pm_bitmask) } else { - p = bigAlloc(size); + p = bigAlloc(size, pool); if (!p) onOutOfMemoryError(); + assert (pool !is null); // Round the size up to the number of pages needed to store it size_t npages = (size + PAGESIZE - 1) / PAGESIZE; capacity = npages * PAGESIZE; @@ -1314,12 +1339,8 @@ private void *malloc(size_t size, uint attrs, size_t* pm_bitmask) } if (attrs) - { - Pool *pool = findPool(p); - assert(pool); - setAttr(pool, cast(size_t)(p - pool.baseAddr) / 16, attrs); - } + return p; } @@ -1604,6 +1625,7 @@ private void free(void *p) memset(p, 0xF2, binsize[bin]); list.next = gc.free_list[bin]; + list.pool = pool; gc.free_list[bin] = list; } } -- 2.43.0