+import gc.alloc;
+
+import cstdlib = tango.stdc.stdlib : calloc, free, malloc, realloc;
+import cstring = tango.stdc.string : memcpy, memmove, memset;
+
+debug(THREADINVARIANT) import tango.stdc.posix.pthread;
+debug(PRINTF) import tango.stdc.posix.pthread : pthread_self, pthread_t;
+debug import tango.stdc.stdio : printf;
+
+version (GNU)
+{
+ // BUG: The following import will likely not work, since the gcc
+ // subdirectory is elsewhere. Instead, perhaps the functions
+ // could be declared directly or some other resolution could
+ // be found.
+ import gcc.builtins; // for __builtin_unwind_init
+}
+
+
+struct BlkInfo
+{
+ void* base;
+ size_t size;
+ uint attr;
+}
+
+private
+{
+ enum BlkAttr : uint
+ {
+ FINALIZE = 0b0000_0001,
+ NO_SCAN = 0b0000_0010,
+ NO_MOVE = 0b0000_0100,
+ ALL_BITS = 0b1111_1111
+ }
+
+ extern (C) void* rt_stackBottom();
+ extern (C) void* rt_stackTop();
+
+ extern (C) void rt_finalize( void* p, bool det = true );
+
+ alias void delegate( void*, void* ) scanFn;
+
+ extern (C) void rt_scanStaticData( scanFn scan );
+
+ version (MULTI_THREADED)
+ {
+ extern (C) bool thread_needLock();
+ extern (C) void thread_suspendAll();
+ extern (C) void thread_resumeAll();
+
+ extern (C) void thread_scanAll( scanFn fn, void* curStackTop = null );
+ }
+
+ extern (C) void onOutOfMemoryError();
+
+ enum
+ {
+ OPFAIL = ~cast(size_t)0
+ }
+}
+
+
+alias GC gc_t;
+
+
+/* ======================= Leak Detector =========================== */
+
+
+debug (LOGGING)
+{
+ struct Log
+ {
+ void* p;
+ size_t size;
+ size_t line;
+ char* file;
+ void* parent;
+
+ void print()
+ {
+ printf(" p = %x, size = %d, parent = %x ", p, size, parent);
+ if (file)
+ {
+ printf("%s(%u)", file, line);
+ }
+ printf("\n");
+ }
+ }
+
+
+ struct LogArray
+ {
+ size_t dim;
+ size_t allocdim;
+ Log *data;
+
+ void Dtor()
+ {
+ if (data)
+ cstdlib.free(data);
+ data = null;
+ }
+
+ void reserve(size_t nentries)
+ {
+ assert(dim <= allocdim);
+ if (allocdim - dim < nentries)
+ {
+ allocdim = (dim + nentries) * 2;
+ assert(dim + nentries <= allocdim);
+ if (!data)
+ {
+ data = cast(Log*)cstdlib.malloc(allocdim * Log.sizeof);
+ if (!data && allocdim)
+ onOutOfMemoryError();
+ }
+ else
+ { Log *newdata;
+
+ newdata = cast(Log*)cstdlib.malloc(allocdim * Log.sizeof);
+ if (!newdata && allocdim)
+ onOutOfMemoryError();
+ cstring.memcpy(newdata, data, dim * Log.sizeof);
+ cstdlib.free(data);
+ data = newdata;
+ }
+ }
+ }
+
+
+ void push(Log log)
+ {
+ reserve(1);
+ data[dim++] = log;
+ }
+
+ void remove(size_t i)
+ {
+ cstring.memmove(data + i, data + i + 1, (dim - i) * Log.sizeof);
+ dim--;
+ }
+
+
+ size_t find(void *p)
+ {
+ for (size_t i = 0; i < dim; i++)
+ {
+ if (data[i].p == p)
+ return i;
+ }
+ return OPFAIL; // not found
+ }
+
+
+ void copy(LogArray *from)
+ {
+ reserve(from.dim - dim);
+ assert(from.dim <= allocdim);
+ cstring.memcpy(data, from.data, from.dim * Log.sizeof);
+ dim = from.dim;
+ }
+ }
+}
+
+
+/* ============================ GC =============================== */
+
+
+class GCLock { } // just a dummy so we can get a global lock
+
+
+const uint GCVERSION = 1; // increment every time we change interface
+ // to GC.
+
+class GC
+{
+ // For passing to debug code
+ static size_t line;
+ static char* file;
+
+ uint gcversion = GCVERSION;
+
+ Gcx *gcx; // implementation
+ static ClassInfo gcLock; // global lock
+
+
+ void initialize()
+ {
+ gcLock = GCLock.classinfo;
+ gcx = cast(Gcx*)cstdlib.calloc(1, Gcx.sizeof);
+ if (!gcx)
+ onOutOfMemoryError();
+ gcx.initialize();
+ setStackBottom(rt_stackBottom());
+ }
+
+
+ void Dtor()
+ {
+ version (linux)
+ {
+ //debug(PRINTF) printf("Thread %x ", pthread_self());
+ //debug(PRINTF) printf("GC.Dtor()\n");
+ }
+
+ if (gcx)
+ {
+ gcx.Dtor();
+ cstdlib.free(gcx);
+ gcx = null;
+ }
+ }
+
+
+ invariant
+ {
+ if (gcx)
+ {
+ gcx.thread_Invariant();
+ }
+ }
+
+
+ /**
+ *
+ */
+ void enable()
+ {
+ if (!thread_needLock())
+ {
+ assert(gcx.disabled > 0);
+ gcx.disabled--;
+ }
+ else synchronized (gcLock)
+ {
+ assert(gcx.disabled > 0);
+ gcx.disabled--;
+ }
+ }
+
+
+ /**
+ *
+ */
+ void disable()
+ {
+ if (!thread_needLock())
+ {
+ gcx.disabled++;
+ }
+ else synchronized (gcLock)
+ {
+ gcx.disabled++;
+ }
+ }
+
+
+ /**
+ *
+ */
+ uint getAttr(void* p)
+ {
+ if (!p)
+ {
+ return 0;
+ }
+
+ uint go()
+ {
+ Pool* pool = gcx.findPool(p);
+ uint oldb = 0;
+
+ if (pool)
+ {
+ auto biti = cast(size_t)(p - pool.baseAddr) / 16;
+
+ oldb = gcx.getBits(pool, biti);
+ }
+ return oldb;
+ }
+
+ if (!thread_needLock())
+ {
+ return go();
+ }
+ else synchronized (gcLock)
+ {
+ return go();
+ }
+ }
+
+
+ /**
+ *
+ */
+ uint setAttr(void* p, uint mask)
+ {
+ if (!p)
+ {
+ return 0;
+ }
+
+ uint go()
+ {
+ Pool* pool = gcx.findPool(p);
+ uint oldb = 0;
+
+ if (pool)
+ {
+ auto biti = cast(size_t)(p - pool.baseAddr) / 16;
+
+ oldb = gcx.getBits(pool, biti);
+ gcx.setBits(pool, biti, mask);
+ }
+ return oldb;
+ }
+
+ if (!thread_needLock())
+ {
+ return go();
+ }
+ else synchronized (gcLock)
+ {
+ return go();
+ }
+ }
+
+
+ /**
+ *
+ */
+ uint clrAttr(void* p, uint mask)
+ {
+ if (!p)
+ {
+ return 0;
+ }
+
+ uint go()
+ {
+ Pool* pool = gcx.findPool(p);
+ uint oldb = 0;
+
+ if (pool)
+ {
+ auto biti = cast(size_t)(p - pool.baseAddr) / 16;
+
+ oldb = gcx.getBits(pool, biti);
+ gcx.clrBits(pool, biti, mask);
+ }
+ return oldb;
+ }
+
+ if (!thread_needLock())
+ {
+ return go();
+ }
+ else synchronized (gcLock)
+ {
+ return go();
+ }
+ }
+
+
+ /**
+ *
+ */
+ void *malloc(size_t size, uint bits = 0)
+ {
+ if (!size)
+ {
+ return null;
+ }
+
+ if (!thread_needLock())
+ {
+ return mallocNoSync(size, bits);
+ }
+ else synchronized (gcLock)
+ {
+ return mallocNoSync(size, bits);
+ }
+ }
+
+
+ //
+ //
+ //
+ private void *mallocNoSync(size_t size, uint bits = 0)
+ {
+ assert(size != 0);
+
+ void *p = null;
+ Bins bin;
+
+ //debug(PRINTF) printf("GC::malloc(size = %d, gcx = %p)\n", size, gcx);
+ assert(gcx);
+ //debug(PRINTF) printf("gcx.self = %x, pthread_self() = %x\n", gcx.self, pthread_self());
+
+ size += SENTINEL_EXTRA;
+
+ // Compute size bin
+ // Cache previous binsize lookup - Dave Fladebo.
+ static size_t lastsize = -1;
+ static Bins lastbin;
+ if (size == lastsize)
+ bin = lastbin;
+ else
+ {
+ bin = gcx.findBin(size);
+ lastsize = size;
+ lastbin = bin;
+ }
+
+ if (bin < B_PAGE)
+ {
+ p = gcx.bucket[bin];
+ if (p is null)
+ {
+ if (!gcx.allocPage(bin) && !gcx.disabled) // try to find a new page
+ {
+ if (!thread_needLock())
+ {
+ /* Then we haven't locked it yet. Be sure
+ * and lock for a collection, since a finalizer
+ * may start a new thread.
+ */
+ synchronized (gcLock)
+ {
+ gcx.fullcollectshell();
+ }
+ }
+ else if (!gcx.fullcollectshell()) // collect to find a new page
+ {
+ //gcx.newPool(1);
+ }
+ }
+ if (!gcx.bucket[bin] && !gcx.allocPage(bin))
+ { int result;
+
+ gcx.newPool(1); // allocate new pool to find a new page
+ result = gcx.allocPage(bin);
+ if (!result)
+ onOutOfMemoryError();
+ }
+ p = gcx.bucket[bin];
+ }
+
+ // Return next item from free list
+ gcx.bucket[bin] = (cast(List*)p).next;
+ if( !(bits & BlkAttr.NO_SCAN) )
+ cstring.memset(p + size, 0, binsize[bin] - size);
+ //debug(PRINTF) printf("\tmalloc => %x\n", p);
+ debug (MEMSTOMP) cstring.memset(p, 0xF0, size);
+ }
+ else
+ {
+ p = gcx.bigAlloc(size);
+ if (!p)
+ onOutOfMemoryError();
+ }
+ size -= SENTINEL_EXTRA;
+ p = sentinel_add(p);
+ sentinel_init(p, size);
+ gcx.log_malloc(p, size);
+
+ if (bits)
+ {
+ Pool *pool = gcx.findPool(p);
+ assert(pool);
+
+ gcx.setBits(pool, cast(size_t)(p - pool.baseAddr) / 16, bits);
+ }
+ return p;
+ }
+
+
+ /**
+ *
+ */
+ void *calloc(size_t size, uint bits = 0)
+ {
+ if (!size)
+ {
+ return null;
+ }
+
+ if (!thread_needLock())
+ {
+ return callocNoSync(size, bits);
+ }
+ else synchronized (gcLock)
+ {
+ return callocNoSync(size, bits);
+ }
+ }
+
+
+ //
+ //
+ //
+ private void *callocNoSync(size_t size, uint bits = 0)
+ {
+ assert(size != 0);
+
+ //debug(PRINTF) printf("calloc: %x len %d\n", p, len);
+ void *p = mallocNoSync(size, bits);
+ cstring.memset(p, 0, size);
+ return p;
+ }
+
+
+ /**
+ *
+ */
+ void *realloc(void *p, size_t size, uint bits = 0)
+ {
+ if (!thread_needLock())
+ {
+ return reallocNoSync(p, size, bits);
+ }
+ else synchronized (gcLock)
+ {
+ return reallocNoSync(p, size, bits);
+ }
+ }
+
+
+ //
+ //
+ //
+ private void *reallocNoSync(void *p, size_t size, uint bits = 0)
+ {
+ if (!size)
+ { if (p)
+ { freeNoSync(p);
+ p = null;
+ }
+ }
+ else if (!p)
+ {
+ p = mallocNoSync(size, bits);
+ }
+ else
+ { void *p2;
+ size_t psize;
+
+ //debug(PRINTF) printf("GC::realloc(p = %x, size = %u)\n", p, size);
+ version (SENTINEL)
+ {
+ sentinel_Invariant(p);
+ psize = *sentinel_size(p);
+ if (psize != size)
+ {
+ if (psize)
+ {
+ Pool *pool = gcx.findPool(p);
+
+ if (pool)
+ {
+ auto biti = cast(size_t)(p - pool.baseAddr) / 16;
+
+ if (bits)
+ {
+ gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
+ gcx.setBits(pool, biti, bits);
+ }
+ else
+ {
+ bits = gcx.getBits(pool, biti);
+ }
+ }
+ }
+ p2 = mallocNoSync(size, bits);
+ if (psize < size)
+ size = psize;
+ //debug(PRINTF) printf("\tcopying %d bytes\n",size);
+ cstring.memcpy(p2, p, size);
+ p = p2;
+ }
+ }
+ else
+ {
+ psize = gcx.findSize(p); // find allocated size
+ if (psize >= PAGESIZE && size >= PAGESIZE)
+ {
+ auto psz = psize / PAGESIZE;
+ auto newsz = (size + PAGESIZE - 1) / PAGESIZE;
+ if (newsz == psz)
+ return p;
+
+ auto pool = gcx.findPool(p);
+ auto pagenum = (p - pool.baseAddr) / PAGESIZE;
+
+ if (newsz < psz)
+ { // Shrink in place
+ synchronized (gcLock)
+ {
+ debug (MEMSTOMP) cstring.memset(p + size, 0xF2, psize - size);
+ pool.freePages(pagenum + newsz, psz - newsz);
+ }
+ return p;
+ }
+ else if (pagenum + newsz <= pool.npages)
+ {
+ // Attempt to expand in place
+ synchronized (gcLock)
+ {
+ for (size_t i = pagenum + psz; 1;)
+ {
+ if (i == pagenum + newsz)
+ {
+ debug (MEMSTOMP) cstring.memset(p + psize, 0xF0, size - psize);
+ cstring.memset(&pool.pagetable[pagenum + psz], B_PAGEPLUS, newsz - psz);
+ return p;
+ }
+ if (i == pool.ncommitted)
+ {
+ auto u = pool.extendPages(pagenum + newsz - pool.ncommitted);
+ if (u == OPFAIL)
+ break;
+ i = pagenum + newsz;
+ continue;
+ }
+ if (pool.pagetable[i] != B_FREE)
+ break;
+ i++;
+ }
+ }
+ }
+ }
+ if (psize < size || // if new size is bigger
+ psize > size * 2) // or less than half
+ {
+ if (psize)
+ {
+ Pool *pool = gcx.findPool(p);
+
+ if (pool)
+ {
+ auto biti = cast(size_t)(p - pool.baseAddr) / 16;
+
+ if (bits)
+ {
+ gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
+ gcx.setBits(pool, biti, bits);
+ }
+ else
+ {
+ bits = gcx.getBits(pool, biti);
+ }
+ }
+ }
+ p2 = mallocNoSync(size, bits);
+ if (psize < size)
+ size = psize;
+ //debug(PRINTF) printf("\tcopying %d bytes\n",size);
+ cstring.memcpy(p2, p, size);
+ p = p2;
+ }
+ }
+ }
+ return p;
+ }
+
+
+ /**
+ * Attempt to in-place enlarge the memory block pointed to by p by at least
+ * minbytes beyond its current capacity, up to a maximum of maxsize. This
+ * does not attempt to move the memory block (like realloc() does).
+ *
+ * Returns:
+ * 0 if could not extend p,
+ * total size of entire memory block if successful.
+ */
+ size_t extend(void* p, size_t minsize, size_t maxsize)
+ {
+ if (!thread_needLock())
+ {
+ return extendNoSync(p, minsize, maxsize);
+ }
+ else synchronized (gcLock)
+ {
+ return extendNoSync(p, minsize, maxsize);
+ }
+ }
+
+
+ //
+ //
+ //
+ private size_t extendNoSync(void* p, size_t minsize, size_t maxsize)
+ in
+ {
+ assert( minsize <= maxsize );
+ }
+ body
+ {
+ //debug(PRINTF) printf("GC::extend(p = %x, minsize = %u, maxsize = %u)\n", p, minsize, maxsize);
+ version (SENTINEL)
+ {
+ return 0;
+ }
+ auto psize = gcx.findSize(p); // find allocated size
+ if (psize < PAGESIZE)
+ return 0; // cannot extend buckets
+
+ auto psz = psize / PAGESIZE;
+ auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE;
+ auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE;
+
+ auto pool = gcx.findPool(p);
+ auto pagenum = (p - pool.baseAddr) / PAGESIZE;
+
+ size_t sz;
+ for (sz = 0; sz < maxsz; sz++)
+ {
+ auto i = pagenum + psz + sz;
+ if (i == pool.ncommitted)
+ break;
+ if (pool.pagetable[i] != B_FREE)
+ { if (sz < minsz)
+ return 0;
+ break;
+ }
+ }
+ if (sz >= minsz)
+ {
+ }
+ else if (pagenum + psz + sz == pool.ncommitted)
+ {
+ auto u = pool.extendPages(minsz - sz);
+ if (u == OPFAIL)
+ return 0;
+ sz = minsz;
+ }
+ else
+ return 0;
+ debug (MEMSTOMP) cstring.memset(p + psize, 0xF0, (psz + sz) * PAGESIZE - psize);
+ cstring.memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
+ gcx.p_cache = null;
+ gcx.size_cache = 0;
+ return (psz + sz) * PAGESIZE;
+ }
+
+
+ /**
+ *
+ */
+ size_t reserve(size_t size)
+ {
+ if (!size)
+ {
+ return 0;
+ }
+
+ if (!thread_needLock())
+ {
+ return reserveNoSync(size);
+ }
+ else synchronized (gcLock)
+ {
+ return reserveNoSync(size);
+ }
+ }
+
+
+ //
+ //
+ //
+ private size_t reserveNoSync(size_t size)
+ {
+ assert(size != 0);
+ assert(gcx);
+
+ return gcx.reserve(size);
+ }
+
+
+ /**
+ *
+ */
+ void free(void *p)
+ {
+ if (!p)
+ {
+ return;
+ }
+
+ if (!thread_needLock())
+ {
+ return freeNoSync(p);
+ }
+ else synchronized (gcLock)
+ {
+ return freeNoSync(p);
+ }
+ }
+
+
+ //
+ //
+ //
+ private void freeNoSync(void *p)
+ {
+ assert (p);
+
+ Pool* pool;
+ size_t pagenum;
+ Bins bin;
+ size_t biti;
+
+ // Find which page it is in
+ pool = gcx.findPool(p);
+ if (!pool) // if not one of ours
+ return; // ignore
+ sentinel_Invariant(p);
+ p = sentinel_sub(p);
+ pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
+ biti = cast(size_t)(p - pool.baseAddr) / 16;
+ gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
+
+ bin = cast(Bins)pool.pagetable[pagenum];
+ if (bin == B_PAGE) // if large alloc
+ { size_t npages;
+ size_t n;
+
+ // Free pages
+ npages = 1;
+ n = pagenum;
+ while (++n < pool.ncommitted && pool.pagetable[n] == B_PAGEPLUS)
+ npages++;
+ debug (MEMSTOMP) cstring.memset(p, 0xF2, npages * PAGESIZE);
+ pool.freePages(pagenum, npages);
+ }
+ else
+ { // Add to free list
+ List *list = cast(List*)p;
+
+ debug (MEMSTOMP) cstring.memset(p, 0xF2, binsize[bin]);
+
+ list.next = gcx.bucket[bin];
+ gcx.bucket[bin] = list;
+ }
+ gcx.log_free(sentinel_add(p));
+ }
+
+
+ /**
+ * Determine the base address of the block containing p. If p is not a gc
+ * allocated pointer, return null.
+ */
+ void* addrOf(void *p)
+ {
+ if (!p)
+ {
+ return null;
+ }
+
+ if (!thread_needLock())
+ {
+ return addrOfNoSync(p);
+ }
+ else synchronized (gcLock)
+ {
+ return addrOfNoSync(p);
+ }
+ }
+
+
+ //
+ //
+ //
+ void* addrOfNoSync(void *p)
+ {
+ if (!p)
+ {
+ return null;
+ }
+
+ return gcx.findBase(p);
+ }
+
+
+ /**
+ * Determine the allocated size of pointer p. If p is an interior pointer
+ * or not a gc allocated pointer, return 0.
+ */
+ size_t sizeOf(void *p)
+ {
+ if (!p)
+ {
+ return 0;
+ }
+
+ if (!thread_needLock())
+ {
+ return sizeOfNoSync(p);
+ }
+ else synchronized (gcLock)
+ {
+ return sizeOfNoSync(p);
+ }
+ }
+
+
+ //
+ //
+ //
+ private size_t sizeOfNoSync(void *p)
+ {
+ assert (p);
+
+ version (SENTINEL)
+ {
+ p = sentinel_sub(p);
+ size_t size = gcx.findSize(p);
+
+ // Check for interior pointer
+ // This depends on:
+ // 1) size is a power of 2 for less than PAGESIZE values
+ // 2) base of memory pool is aligned on PAGESIZE boundary
+ if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
+ size = 0;
+ return size ? size - SENTINEL_EXTRA : 0;
+ }
+ else
+ {
+ if (p == gcx.p_cache)
+ return gcx.size_cache;
+
+ size_t size = gcx.findSize(p);
+
+ // Check for interior pointer
+ // This depends on:
+ // 1) size is a power of 2 for less than PAGESIZE values
+ // 2) base of memory pool is aligned on PAGESIZE boundary
+ if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
+ size = 0;
+ else
+ {
+ gcx.p_cache = p;
+ gcx.size_cache = size;
+ }
+
+ return size;
+ }
+ }
+
+
+ /**
+ * Determine the base address of the block containing p. If p is not a gc
+ * allocated pointer, return null.
+ */
+ BlkInfo query(void *p)
+ {
+ if (!p)
+ {
+ BlkInfo i;
+ return i;
+ }
+
+ if (!thread_needLock())
+ {
+ return queryNoSync(p);
+ }
+ else synchronized (gcLock)
+ {
+ return queryNoSync(p);
+ }
+ }
+
+
+ //
+ //
+ //
+ BlkInfo queryNoSync(void *p)
+ {
+ assert(p);
+
+ return gcx.getInfo(p);
+ }
+
+
+ /**
+ * Verify that pointer p:
+ * 1) belongs to this memory pool
+ * 2) points to the start of an allocated piece of memory
+ * 3) is not on a free list
+ */
+ void check(void *p)
+ {
+ if (!p)
+ {
+ return;
+ }
+
+ if (!thread_needLock())
+ {
+ checkNoSync(p);
+ }
+ else synchronized (gcLock)
+ {
+ checkNoSync(p);
+ }
+ }
+
+
+ //
+ //
+ //
+ private void checkNoSync(void *p)
+ {
+ assert(p);
+
+ sentinel_Invariant(p);
+ debug (PTRCHECK)
+ {
+ Pool* pool;
+ size_t pagenum;
+ Bins bin;
+ size_t size;
+
+ p = sentinel_sub(p);
+ pool = gcx.findPool(p);
+ assert(pool);
+ pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
+ bin = cast(Bins)pool.pagetable[pagenum];
+ assert(bin <= B_PAGE);
+ size = binsize[bin];
+ assert((cast(size_t)p & (size - 1)) == 0);
+
+ debug (PTRCHECK2)
+ {
+ if (bin < B_PAGE)
+ {
+ // Check that p is not on a free list
+ List *list;
+
+ for (list = gcx.bucket[bin]; list; list = list.next)
+ {
+ assert(cast(void*)list != p);
+ }
+ }
+ }
+ }
+ }
+
+
+ //
+ //
+ //
+ private void setStackBottom(void *p)
+ {
+ version (STACKGROWSDOWN)
+ {
+ //p = (void *)((uint *)p + 4);
+ if (p > gcx.stackBottom)
+ {
+ //debug(PRINTF) printf("setStackBottom(%x)\n", p);
+ gcx.stackBottom = p;
+ }
+ }
+ else
+ {
+ //p = (void *)((uint *)p - 4);
+ if (p < gcx.stackBottom)
+ {
+ //debug(PRINTF) printf("setStackBottom(%x)\n", p);
+ gcx.stackBottom = cast(char*)p;
+ }
+ }
+ }
+
+
+ /**
+ * add p to list of roots
+ */
+ void addRoot(void *p)
+ {
+ if (!p)
+ {
+ return;
+ }
+
+ if (!thread_needLock())
+ {
+ gcx.addRoot(p);
+ }
+ else synchronized (gcLock)
+ {
+ gcx.addRoot(p);
+ }
+ }
+
+
+ /**
+ * remove p from list of roots
+ */
+ void removeRoot(void *p)
+ {
+ if (!p)
+ {
+ return;
+ }
+
+ if (!thread_needLock())
+ {
+ gcx.removeRoot(p);
+ }
+ else synchronized (gcLock)
+ {
+ gcx.removeRoot(p);
+ }
+ }
+
+
+ /**
+ * add range to scan for roots
+ */
+ void addRange(void *p, size_t sz)
+ {
+ if (!p || !sz)
+ {
+ return;
+ }
+
+ //debug(PRINTF) printf("+GC.addRange(pbot = x%x, ptop = x%x)\n", pbot, ptop);
+ if (!thread_needLock())
+ {
+ gcx.addRange(p, p + sz);
+ }
+ else synchronized (gcLock)
+ {
+ gcx.addRange(p, p + sz);
+ }
+ //debug(PRINTF) printf("-GC.addRange()\n");
+ }
+
+
+ /**
+ * remove range
+ */
+ void removeRange(void *p)
+ {
+ if (!p)
+ {
+ return;
+ }
+
+ if (!thread_needLock())
+ {
+ gcx.removeRange(p);
+ }
+ else synchronized (gcLock)
+ {
+ gcx.removeRange(p);
+ }
+ }
+
+
+ /**
+ * do full garbage collection
+ */
+ void fullCollect()
+ {
+ debug(PRINTF) printf("GC.fullCollect()\n");
+
+ if (!thread_needLock())
+ {
+ gcx.fullcollectshell();
+ }
+ else synchronized (gcLock)
+ {
+ gcx.fullcollectshell();
+ }
+
+ version (none)
+ {
+ GCStats stats;
+
+ getStats(stats);
+ debug(PRINTF) printf("poolsize = %x, usedsize = %x, freelistsize = %x\n",
+ stats.poolsize, stats.usedsize, stats.freelistsize);
+ }
+
+ gcx.log_collect();
+ }
+
+
+ /**
+ * do full garbage collection ignoring roots
+ */
+ void fullCollectNoStack()
+ {
+ if (!thread_needLock())
+ {
+ gcx.noStack++;
+ gcx.fullcollectshell();
+ gcx.noStack--;
+ }
+ else synchronized (gcLock)
+ {
+ gcx.noStack++;
+ gcx.fullcollectshell();
+ gcx.noStack--;
+ }
+ }
+
+
+ /**
+ * minimize free space usage
+ */
+ void minimize()
+ {
+ if (!thread_needLock())
+ {
+ gcx.minimize();
+ }
+ else synchronized (gcLock)
+ {
+ gcx.minimize();
+ }
+ }
+
+
+ /**
+ * Retrieve statistics about garbage collection.
+ * Useful for debugging and tuning.
+ */
+ void getStats(out GCStats stats)
+ {
+ if (!thread_needLock())
+ {
+ getStatsNoSync(stats);
+ }
+ else synchronized (gcLock)
+ {
+ getStatsNoSync(stats);
+ }
+ }
+
+
+ //
+ //
+ //
+ private void getStatsNoSync(out GCStats stats)
+ {
+ size_t psize = 0;
+ size_t usize = 0;
+ size_t flsize = 0;
+
+ size_t n;
+ size_t bsize = 0;
+
+ //debug(PRINTF) printf("getStats()\n");
+ cstring.memset(&stats, 0, GCStats.sizeof);
+
+ for (n = 0; n < gcx.npools; n++)
+ { Pool *pool = gcx.pooltable[n];
+
+ psize += pool.ncommitted * PAGESIZE;
+ for (size_t j = 0; j < pool.ncommitted; j++)
+ {
+ Bins bin = cast(Bins)pool.pagetable[j];
+ if (bin == B_FREE)
+ stats.freeblocks++;
+ else if (bin == B_PAGE)
+ stats.pageblocks++;
+ else if (bin < B_PAGE)
+ bsize += PAGESIZE;
+ }
+ }
+
+ for (n = 0; n < B_PAGE; n++)
+ {
+ //debug(PRINTF) printf("bin %d\n", n);
+ for (List *list = gcx.bucket[n]; list; list = list.next)
+ {
+ //debug(PRINTF) printf("\tlist %x\n", list);
+ flsize += binsize[n];
+ }
+ }
+
+ usize = bsize - flsize;
+
+ stats.poolsize = psize;
+ stats.usedsize = bsize - flsize;
+ stats.freelistsize = flsize;
+ }
+}
+
+
+/* ============================ Gcx =============================== */
+
+enum
+{ PAGESIZE = 4096,
+ COMMITSIZE = (4096*16),
+ POOLSIZE = (4096*256),
+}
+
+
+enum
+{
+ B_16,
+ B_32,
+ B_64,
+ B_128,
+ B_256,
+ B_512,
+ B_1024,
+ B_2048,
+ B_PAGE, // start of large alloc
+ B_PAGEPLUS, // continuation of large alloc
+ B_FREE, // free page
+ B_UNCOMMITTED, // memory not committed for this page
+ B_MAX
+}
+
+
+alias ubyte Bins;
+
+
+struct List
+{
+ List *next;
+}
+
+
+struct Range
+{
+ void *pbot;
+ void *ptop;
+}
+
+
+const uint binsize[B_MAX] = [ 16,32,64,128,256,512,1024,2048,4096 ];
+const uint notbinsize[B_MAX] = [ ~(16u-1),~(32u-1),~(64u-1),~(128u-1),~(256u-1),
+ ~(512u-1),~(1024u-1),~(2048u-1),~(4096u-1) ];
+
+/* ============================ Gcx =============================== */
+
+
+struct Gcx
+{
+ debug (THREADINVARIANT)
+ {
+ pthread_t self;
+ void thread_Invariant()
+ {
+ if (self != pthread_self())
+ printf("thread_Invariant(): gcx = %x, self = %x, pthread_self() = %x\n", this, self, pthread_self());
+ assert(self == pthread_self());
+ }
+ }
+ else
+ {
+ void thread_Invariant() { }
+ }
+
+ void *p_cache;
+ size_t size_cache;
+
+ size_t nroots;
+ size_t rootdim;
+ void **roots;
+
+ size_t nranges;
+ size_t rangedim;
+ Range *ranges;
+
+ uint noStack; // !=0 means don't scan stack
+ uint log; // turn on logging
+ uint anychanges;
+ void *stackBottom;
+ uint inited;
+ int disabled; // turn off collections if >0
+
+ byte *minAddr; // min(baseAddr)
+ byte *maxAddr; // max(topAddr)
+
+ size_t npools;
+ Pool **pooltable;
+
+ List *bucket[B_MAX]; // free list for each size
+
+
+ void initialize()
+ { int dummy;
+
+ (cast(byte*)this)[0 .. Gcx.sizeof] = 0;
+ stackBottom = cast(char*)&dummy;
+ log_init();
+ debug (THREADINVARIANT)
+ self = pthread_self();
+ //printf("gcx = %p, self = %x\n", this, self);
+ inited = 1;
+ }
+
+
+ void Dtor()
+ {
+ inited = 0;
+
+ for (size_t i = 0; i < npools; i++)
+ { Pool *pool = pooltable[i];
+
+ pool.Dtor();
+ cstdlib.free(pool);
+ }
+ if (pooltable)
+ cstdlib.free(pooltable);
+
+ if (roots)
+ cstdlib.free(roots);
+
+ if (ranges)
+ cstdlib.free(ranges);
+ }
+
+
+ void Invariant() { }
+
+
+ invariant
+ {
+ if (inited)
+ {
+ //printf("Gcx.invariant(): this = %p\n", this);
+ size_t i;
+
+ // Assure we're called on the right thread
+ debug (THREADINVARIANT) assert(self == pthread_self());
+
+ for (i = 0; i < npools; i++)
+ { Pool *pool = pooltable[i];
+
+ pool.Invariant();
+ if (i == 0)
+ {
+ assert(minAddr == pool.baseAddr);
+ }
+ if (i + 1 < npools)
+ {
+ assert(pool.opCmp(pooltable[i + 1]) < 0);
+ }
+ else if (i + 1 == npools)
+ {
+ assert(maxAddr == pool.topAddr);
+ }
+ }
+
+ if (roots)
+ {
+ assert(rootdim != 0);
+ assert(nroots <= rootdim);
+ }
+
+ if (ranges)
+ {
+ assert(rangedim != 0);
+ assert(nranges <= rangedim);
+
+ for (i = 0; i < nranges; i++)
+ {
+ assert(ranges[i].pbot);
+ assert(ranges[i].ptop);
+ assert(ranges[i].pbot <= ranges[i].ptop);
+ }
+ }
+
+ for (i = 0; i < B_PAGE; i++)
+ {
+ for (List *list = bucket[i]; list; list = list.next)
+ {
+ }
+ }
+ }
+ }
+
+
+ /**
+ *
+ */
+ void addRoot(void *p)
+ {
+ if (nroots == rootdim)
+ {
+ size_t newdim = rootdim * 2 + 16;
+ void** newroots;
+
+ newroots = cast(void**)cstdlib.malloc(newdim * newroots[0].sizeof);
+ if (!newroots)
+ onOutOfMemoryError();
+ if (roots)
+ { cstring.memcpy(newroots, roots, nroots * newroots[0].sizeof);
+ cstdlib.free(roots);
+ }
+ roots = newroots;
+ rootdim = newdim;
+ }
+ roots[nroots] = p;
+ nroots++;
+ }
+
+
+ /**
+ *
+ */
+ void removeRoot(void *p)
+ {
+ for (size_t i = nroots; i--;)
+ {
+ if (roots[i] == p)
+ {
+ nroots--;
+ cstring.memmove(roots + i, roots + i + 1, (nroots - i) * roots[0].sizeof);
+ return;
+ }
+ }
+ assert(0);
+ }
+
+
+ /**
+ *
+ */
+ void addRange(void *pbot, void *ptop)
+ {
+ debug(PRINTF) printf("Thread %x ", pthread_self());
+ debug(PRINTF) printf("%x.Gcx::addRange(%x, %x), nranges = %d\n", this, pbot, ptop, nranges);
+ if (nranges == rangedim)
+ {
+ size_t newdim = rangedim * 2 + 16;
+ Range *newranges;
+
+ newranges = cast(Range*)cstdlib.malloc(newdim * newranges[0].sizeof);
+ if (!newranges)
+ onOutOfMemoryError();
+ if (ranges)
+ { cstring.memcpy(newranges, ranges, nranges * newranges[0].sizeof);
+ cstdlib.free(ranges);
+ }
+ ranges = newranges;
+ rangedim = newdim;
+ }
+ ranges[nranges].pbot = pbot;
+ ranges[nranges].ptop = ptop;
+ nranges++;
+ }
+
+
+ /**
+ *
+ */
+ void removeRange(void *pbot)
+ {
+ debug(PRINTF) printf("Thread %x ", pthread_self());
+ debug(PRINTF) printf("%x.Gcx.removeRange(%x), nranges = %d\n", this, pbot, nranges);
+ for (size_t i = nranges; i--;)
+ {
+ if (ranges[i].pbot == pbot)
+ {
+ nranges--;
+ cstring.memmove(ranges + i, ranges + i + 1, (nranges - i) * ranges[0].sizeof);
+ return;
+ }
+ }
+ debug(PRINTF) printf("Wrong thread\n");
+
+ // This is a fatal error, but ignore it.
+ // The problem is that we can get a Close() call on a thread
+ // other than the one the range was allocated on.
+ //assert(zero);
+ }
+
+
+ /**
+ * Find Pool that pointer is in.
+ * Return null if not in a Pool.
+ * Assume pooltable[] is sorted.
+ */
+ Pool *findPool(void *p)
+ {
+ if (p >= minAddr && p < maxAddr)
+ {
+ if (npools == 1)
+ {
+ return pooltable[0];
+ }
+
+ for (size_t i = 0; i < npools; i++)
+ { Pool *pool;
+
+ pool = pooltable[i];
+ if (p < pool.topAddr)
+ { if (pool.baseAddr <= p)
+ return pool;
+ break;
+ }
+ }
+ }
+ return null;
+ }
+
+
+ /**
+ * Find base address of block containing pointer p.
+ * Returns null if not a gc'd pointer
+ */
+ void* findBase(void *p)
+ {
+ Pool *pool;
+
+ pool = findPool(p);
+ if (pool)
+ {
+ size_t offset = cast(size_t)(p - pool.baseAddr);
+ size_t pn = offset / PAGESIZE;
+ Bins bin = cast(Bins)pool.pagetable[pn];
+
+ // Adjust bit to be at start of allocated memory block
+ if (bin <= B_PAGE)
+ {
+ return pool.baseAddr + (offset & notbinsize[bin]);
+ }
+ else if (bin == B_PAGEPLUS)
+ {
+ do
+ { --pn, offset -= PAGESIZE;
+ } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
+
+ return pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
+ }
+ else
+ {
+ // we are in a B_FREE or B_UNCOMMITTED page
+ return null;
+ }
+ }
+ return null;
+ }
+
+
+ /**
+ * Find size of pointer p.
+ * Returns 0 if not a gc'd pointer
+ */
+ size_t findSize(void *p)
+ {
+ Pool* pool;
+ size_t size = 0;
+
+ pool = findPool(p);
+ if (pool)
+ {
+ size_t pagenum;
+ Bins bin;
+
+ pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
+ bin = cast(Bins)pool.pagetable[pagenum];
+ size = binsize[bin];
+ if (bin == B_PAGE)
+ { size_t npages = pool.ncommitted;
+ ubyte* pt;
+ size_t i;
+
+ pt = &pool.pagetable[0];
+ for (i = pagenum + 1; i < npages; i++)
+ {
+ if (pt[i] != B_PAGEPLUS)
+ break;
+ }
+ size = (i - pagenum) * PAGESIZE;
+ }
+ }
+ return size;
+ }
+
+
+ /**
+ *
+ */
+ BlkInfo getInfo(void* p)
+ {
+ Pool* pool;
+ BlkInfo info;
+
+ pool = findPool(p);
+ if (pool)
+ {
+ size_t offset = cast(size_t)(p - pool.baseAddr);
+ size_t pn = offset / PAGESIZE;
+ Bins bin = cast(Bins)pool.pagetable[pn];
+
+ ////////////////////////////////////////////////////////////////////
+ // findAddr
+ ////////////////////////////////////////////////////////////////////
+
+ if (bin <= B_PAGE)
+ {
+ info.base = pool.baseAddr + (offset & notbinsize[bin]);
+ }
+ else if (bin == B_PAGEPLUS)
+ {
+ do
+ { --pn, offset -= PAGESIZE;
+ } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
+
+ info.base = pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
+
+ // fix bin for use by size calc below
+ bin = cast(Bins)pool.pagetable[pn];
+ }
+
+ ////////////////////////////////////////////////////////////////////
+ // findSize
+ ////////////////////////////////////////////////////////////////////
+
+ info.size = binsize[bin];
+ if (bin == B_PAGE)
+ { size_t npages = pool.ncommitted;
+ ubyte* pt;
+ size_t i;
+
+ pt = &pool.pagetable[0];
+ for (i = pn + 1; i < npages; i++)
+ {
+ if (pt[i] != B_PAGEPLUS)
+ break;
+ }
+ info.size = (i - pn) * PAGESIZE;
+ }
+
+ ////////////////////////////////////////////////////////////////////
+ // getBits
+ ////////////////////////////////////////////////////////////////////
+
+ info.attr = getBits(pool, cast(size_t)(offset / 16));
+ }
+ return info;
+ }
+
+
+ /**
+ * Compute bin for size.
+ */
+ static Bins findBin(size_t size)
+ { Bins bin;
+
+ if (size <= 256)
+ {
+ if (size <= 64)
+ {
+ if (size <= 16)
+ bin = B_16;
+ else if (size <= 32)
+ bin = B_32;
+ else
+ bin = B_64;
+ }
+ else
+ {
+ if (size <= 128)
+ bin = B_128;
+ else
+ bin = B_256;
+ }
+ }
+ else
+ {
+ if (size <= 1024)
+ {
+ if (size <= 512)
+ bin = B_512;
+ else
+ bin = B_1024;
+ }
+ else
+ {
+ if (size <= 2048)
+ bin = B_2048;
+ else
+ bin = B_PAGE;
+ }
+ }
+ return bin;
+ }