2 * This module contains the garbage collector implementation.
4 * Copyright: Copyright (C) 2001-2007 Digital Mars, www.digitalmars.com.
7 * This software is provided 'as-is', without any express or implied
8 * warranty. In no event will the authors be held liable for any damages
9 * arising from the use of this software.
11 * Permission is granted to anyone to use this software for any purpose,
12 * including commercial applications, and to alter it and redistribute it
13 * freely, in both source and binary form, subject to the following
16 * o The origin of this software must not be misrepresented; you must not
17 * claim that you wrote the original software. If you use this software
18 * in a product, an acknowledgment in the product documentation would be
19 * appreciated but is not required.
20 * o Altered source versions must be plainly marked as such, and must not
21 * be misrepresented as being the original software.
22 * o This notice may not be removed or altered from any source
24 * Authors: Walter Bright, David Friedman, Sean Kelly
29 // D Programming Language Garbage Collector implementation
31 /************** Debugging ***************************/
33 //debug = COLLECT_PRINTF; // turn on printf's
34 //debug = PTRCHECK; // more pointer checking
35 //debug = PTRCHECK2; // thorough but slow pointer checking
37 /*************** Configuration *********************/
39 version = STACKGROWSDOWN; // growing the stack means subtracting from the stack pointer
40 // (use for Intel X86 CPUs)
41 // else growing the stack means adding to the stack pointer
43 /***************************************************/
45 import rt.gc.cdgc.bits: GCBits;
46 import rt.gc.cdgc.stats: GCStats, Stats;
47 import rt.gc.cdgc.dynarray: DynArray;
48 import alloc = rt.gc.cdgc.alloc;
49 import opts = rt.gc.cdgc.opts;
51 import cstdlib = tango.stdc.stdlib;
52 import cstring = tango.stdc.string;
55 * This is a small optimization that proved it's usefulness. For small chunks
56 * or memory memset() seems to be slower (probably because of the call) that
57 * simply doing a simple loop to set the memory.
59 void memset(void* dst, int c, size_t n)
61 // This number (32) has been determined empirically
63 cstring.memset(dst, c, n);
66 auto p = cast(ubyte*)(dst);
73 // BUG: The following import will likely not work, since the gcc
74 // subdirectory is elsewhere. Instead, perhaps the functions
75 // could be declared directly or some other resolution could
77 static import gcc.builtins; // for __builtin_unwind_int
87 package enum BlkAttr : uint
89 FINALIZE = 0b0000_0001,
90 NO_SCAN = 0b0000_0010,
91 NO_MOVE = 0b0000_0100,
92 ALL_BITS = 0b1111_1111
98 extern (C) void* rt_stackBottom();
99 extern (C) void* rt_stackTop();
101 extern (C) void rt_finalize( void* p, bool det = true );
103 alias void delegate(Object) DEvent;
104 extern (C) void rt_attachDisposeEvent(Object h, DEvent e);
105 extern (C) bool rt_detachDisposeEvent(Object h, DEvent e);
108 alias void delegate( void*, void* ) scanFn;
110 extern (C) void rt_scanStaticData( scanFn scan );
112 extern (C) bool thread_needLock();
113 extern (C) void thread_suspendAll();
114 extern (C) void thread_resumeAll();
116 extern (C) void thread_scanAll( scanFn fn, void* curStackTop = null );
118 extern (C) void onOutOfMemoryError();
122 OPFAIL = ~cast(size_t)0
130 /* ============================ GC =============================== */
133 class GCLock { } // just a dummy so we can get a global lock
136 const uint GCVERSION = 1; // increment every time we change interface
143 // For passing to debug code
147 uint gcversion = GCVERSION;
149 Gcx *gcx; // implementation
150 static ClassInfo gcLock; // global lock
155 opts.parse(cstdlib.getenv("D_GC_OPTS"));
156 gcLock = GCLock.classinfo;
157 gcx = cast(Gcx*) cstdlib.calloc(1, Gcx.sizeof);
159 onOutOfMemoryError();
161 setStackBottom(rt_stackBottom());
171 if (!thread_needLock())
173 assert(gcx.disabled > 0);
176 else synchronized (gcLock)
178 assert(gcx.disabled > 0);
189 if (!thread_needLock())
193 else synchronized (gcLock)
203 uint getAttr(void* p)
212 Pool* pool = gcx.findPool(p);
217 auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
219 old_attrs = gcx.getAttr(pool, bit_i);
224 if (!thread_needLock())
228 else synchronized (gcLock)
238 uint setAttr(void* p, uint mask)
247 Pool* pool = gcx.findPool(p);
252 auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
254 old_attrs = gcx.getAttr(pool, bit_i);
255 gcx.setAttr(pool, bit_i, mask);
260 if (!thread_needLock())
264 else synchronized (gcLock)
274 uint clrAttr(void* p, uint mask)
283 Pool* pool = gcx.findPool(p);
288 auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
290 old_attrs = gcx.getAttr(pool, bit_i);
291 gcx.clrAttr(pool, bit_i, mask);
296 if (!thread_needLock())
300 else synchronized (gcLock)
310 void *malloc(size_t size, uint attrs, PointerMap ptrmap)
317 if (!thread_needLock())
319 return mallocNoSync(size, attrs, ptrmap.bits.ptr);
321 else synchronized (gcLock)
323 return mallocNoSync(size, attrs, ptrmap.bits.ptr);
331 private void *mallocNoSync(size_t size, uint attrs, size_t* pm_bitmask)
335 stats.malloc_started(size, attrs, pm_bitmask);
337 stats.malloc_finished(p);
344 if (opts.options.sentinel)
345 size += SENTINEL_EXTRA;
347 bool has_pm = !(attrs & BlkAttr.NO_SCAN);
348 size_t pm_bitmask_size;
350 pm_bitmask_size = (size_t*).sizeof;
351 size += pm_bitmask_size;
354 // Cache previous binsize lookup - Dave Fladebo.
355 static size_t lastsize = -1;
357 if (size == lastsize)
361 bin = gcx.findBin(size);
366 size_t capacity; // to figure out where to store the bitmask
372 if (!gcx.allocPage(bin) && !gcx.disabled) // try to find a new page
374 if (!thread_needLock())
376 /* Then we haven't locked it yet. Be sure
377 * and lock for a collection, since a finalizer
378 * may start a new thread.
380 synchronized (gcLock)
382 gcx.fullcollectshell();
385 else if (!gcx.fullcollectshell()) // collect to find a new page
390 if (!gcx.bucket[bin] && !gcx.allocPage(bin))
392 gcx.newPool(1); // allocate new pool to find a new page
393 int result = gcx.allocPage(bin);
395 onOutOfMemoryError();
399 capacity = binsize[bin];
401 // Return next item from free list
402 gcx.bucket[bin] = (cast(List*)p).next;
403 if( !(attrs & BlkAttr.NO_SCAN) )
404 memset(p + size, 0, capacity - size);
405 if (opts.options.mem_stomp)
406 memset(p, 0xF0, size);
410 p = gcx.bigAlloc(size);
412 onOutOfMemoryError();
413 // Round the size up to the number of pages needed to store it
414 size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
415 capacity = npages * PAGESIZE;
418 // Store the bit mask AFTER SENTINEL_POST
419 // TODO: store it BEFORE, so the bitmask is protected too
421 auto end_of_blk = cast(size_t**)(p + capacity - pm_bitmask_size);
422 *end_of_blk = pm_bitmask;
423 size -= pm_bitmask_size;
426 if (opts.options.sentinel) {
427 size -= SENTINEL_EXTRA;
429 sentinel_init(p, size);
434 Pool *pool = gcx.findPool(p);
437 gcx.setAttr(pool, cast(size_t)(p - pool.baseAddr) / 16, attrs);
446 void *calloc(size_t size, uint attrs, PointerMap ptrmap)
453 if (!thread_needLock())
455 return callocNoSync(size, attrs, ptrmap.bits.ptr);
457 else synchronized (gcLock)
459 return callocNoSync(size, attrs, ptrmap.bits.ptr);
467 private void *callocNoSync(size_t size, uint attrs, size_t* pm_bitmask)
471 void *p = mallocNoSync(size, attrs, pm_bitmask);
480 void *realloc(void *p, size_t size, uint attrs, PointerMap ptrmap)
482 if (!thread_needLock())
484 return reallocNoSync(p, size, attrs, ptrmap.bits.ptr);
486 else synchronized (gcLock)
488 return reallocNoSync(p, size, attrs, ptrmap.bits.ptr);
496 private void *reallocNoSync(void *p, size_t size, uint attrs,
509 p = mallocNoSync(size, attrs, pm_bitmask);
513 Pool* pool = gcx.findPool(p);
517 // Set or retrieve attributes as appropriate
518 auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
520 gcx.clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
521 gcx.setAttr(pool, bit_i, attrs);
524 attrs = gcx.getAttr(pool, bit_i);
526 void* blk_base_addr = gcx.findBase(p);
527 size_t blk_size = gcx.findSize(p);
528 bool has_pm = !(attrs & BlkAttr.NO_SCAN);
529 size_t pm_bitmask_size = 0;
531 pm_bitmask_size = (size_t*).sizeof;
532 // Retrieve pointer map bit mask if appropriate
533 if (pm_bitmask is null) {
534 auto end_of_blk = cast(size_t**)(blk_base_addr +
535 blk_size - pm_bitmask_size);
536 pm_bitmask = *end_of_blk;
540 if (opts.options.sentinel)
542 sentinel_Invariant(p);
543 size_t sentinel_stored_size = *sentinel_size(p);
544 if (sentinel_stored_size != size)
546 void* p2 = mallocNoSync(size, attrs, pm_bitmask);
547 if (sentinel_stored_size < size)
548 size = sentinel_stored_size;
549 cstring.memcpy(p2, p, size);
555 size += pm_bitmask_size;
556 if (blk_size >= PAGESIZE && size >= PAGESIZE)
558 auto psz = blk_size / PAGESIZE;
559 auto newsz = (size + PAGESIZE - 1) / PAGESIZE;
563 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
568 synchronized (gcLock)
570 if (opts.options.mem_stomp)
571 memset(p + size - pm_bitmask_size, 0xF2,
572 blk_size - size - pm_bitmask_size);
573 pool.freePages(pagenum + newsz, psz - newsz);
576 auto end_of_blk = cast(size_t**)(
577 blk_base_addr + (PAGESIZE * newsz) -
579 *end_of_blk = pm_bitmask;
583 else if (pagenum + newsz <= pool.npages)
585 // Attempt to expand in place
586 synchronized (gcLock)
588 for (size_t i = pagenum + psz; 1;)
590 if (i == pagenum + newsz)
592 if (opts.options.mem_stomp)
593 memset(p + blk_size - pm_bitmask_size,
594 0xF0, size - blk_size
596 memset(pool.pagetable + pagenum +
597 psz, B_PAGEPLUS, newsz - psz);
599 auto end_of_blk = cast(size_t**)(
603 *end_of_blk = pm_bitmask;
607 if (i == pool.npages)
611 if (pool.pagetable[i] != B_FREE)
618 // if new size is bigger or less than half
619 if (blk_size < size || blk_size > size * 2)
621 size -= pm_bitmask_size;
622 blk_size -= pm_bitmask_size;
623 void* p2 = mallocNoSync(size, attrs, pm_bitmask);
626 cstring.memcpy(p2, p, size);
636 * Attempt to in-place enlarge the memory block pointed to by p by at least
637 * minbytes beyond its current capacity, up to a maximum of maxsize. This
638 * does not attempt to move the memory block (like realloc() does).
641 * 0 if could not extend p,
642 * total size of entire memory block if successful.
644 size_t extend(void* p, size_t minsize, size_t maxsize)
646 if (!thread_needLock())
648 return extendNoSync(p, minsize, maxsize);
650 else synchronized (gcLock)
652 return extendNoSync(p, minsize, maxsize);
660 private size_t extendNoSync(void* p, size_t minsize, size_t maxsize)
663 assert( minsize <= maxsize );
667 if (opts.options.sentinel)
670 Pool* pool = gcx.findPool(p);
674 // Retrieve attributes
675 auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
676 uint attrs = gcx.getAttr(pool, bit_i);
678 void* blk_base_addr = gcx.findBase(p);
679 size_t blk_size = gcx.findSize(p);
680 bool has_pm = !(attrs & BlkAttr.NO_SCAN);
681 size_t* pm_bitmask = null;
682 size_t pm_bitmask_size = 0;
684 pm_bitmask_size = (size_t*).sizeof;
685 // Retrieve pointer map bit mask
686 auto end_of_blk = cast(size_t**)(blk_base_addr +
687 blk_size - pm_bitmask_size);
688 pm_bitmask = *end_of_blk;
691 if (blk_size < PAGESIZE)
692 return 0; // cannot extend buckets
694 minsize += pm_bitmask_size;
695 maxsize += pm_bitmask_size;
697 auto psz = blk_size / PAGESIZE;
698 auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE;
699 auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE;
701 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
704 for (sz = 0; sz < maxsz; sz++)
706 auto i = pagenum + psz + sz;
707 if (i == pool.npages)
709 if (pool.pagetable[i] != B_FREE)
719 size_t new_size = (psz + sz) * PAGESIZE;
721 if (opts.options.mem_stomp)
722 memset(p + blk_size - pm_bitmask_size, 0xF0,
723 new_size - blk_size - pm_bitmask_size);
724 memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
729 new_size -= pm_bitmask_size;
730 auto end_of_blk = cast(size_t**)(blk_base_addr + new_size);
731 *end_of_blk = pm_bitmask;
740 size_t reserve(size_t size)
747 if (!thread_needLock())
749 return reserveNoSync(size);
751 else synchronized (gcLock)
753 return reserveNoSync(size);
761 private size_t reserveNoSync(size_t size)
766 return gcx.reserve(size);
780 if (!thread_needLock())
782 return freeNoSync(p);
784 else synchronized (gcLock)
786 return freeNoSync(p);
794 private void freeNoSync(void *p)
803 // Find which page it is in
804 pool = gcx.findPool(p);
805 if (!pool) // if not one of ours
807 if (opts.options.sentinel) {
808 sentinel_Invariant(p);
811 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
812 bit_i = cast(size_t)(p - pool.baseAddr) / 16;
813 gcx.clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
815 bin = cast(Bins)pool.pagetable[pagenum];
816 if (bin == B_PAGE) // if large alloc
821 while (++n < pool.npages && pool.pagetable[n] == B_PAGEPLUS)
823 if (opts.options.mem_stomp)
824 memset(p, 0xF2, npages * PAGESIZE);
825 pool.freePages(pagenum, npages);
830 List *list = cast(List*)p;
832 if (opts.options.mem_stomp)
833 memset(p, 0xF2, binsize[bin]);
835 list.next = gcx.bucket[bin];
836 gcx.bucket[bin] = list;
842 * Determine the base address of the block containing p. If p is not a gc
843 * allocated pointer, return null.
845 void* addrOf(void *p)
852 if (!thread_needLock())
854 return addrOfNoSync(p);
856 else synchronized (gcLock)
858 return addrOfNoSync(p);
866 void* addrOfNoSync(void *p)
873 return gcx.findBase(p);
878 * Determine the allocated size of pointer p. If p is an interior pointer
879 * or not a gc allocated pointer, return 0.
881 size_t sizeOf(void *p)
888 if (!thread_needLock())
890 return sizeOfNoSync(p);
892 else synchronized (gcLock)
894 return sizeOfNoSync(p);
902 private size_t sizeOfNoSync(void *p)
906 if (opts.options.sentinel)
909 Pool* pool = gcx.findPool(p);
913 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
914 uint attrs = gcx.getAttr(pool, biti);
916 size_t size = gcx.findSize(p);
917 bool has_pm = !(attrs & BlkAttr.NO_SCAN);
918 size_t pm_bitmask_size = 0;
920 pm_bitmask_size = (size_t*).sizeof;
922 if (opts.options.sentinel) {
923 // Check for interior pointer
925 // 1) size is a power of 2 for less than PAGESIZE values
926 // 2) base of memory pool is aligned on PAGESIZE boundary
927 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
929 return size - SENTINEL_EXTRA - pm_bitmask_size;
932 if (p == gcx.p_cache)
933 return gcx.size_cache;
935 // Check for interior pointer
937 // 1) size is a power of 2 for less than PAGESIZE values
938 // 2) base of memory pool is aligned on PAGESIZE boundary
939 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
943 gcx.size_cache = size - pm_bitmask_size;
945 return gcx.size_cache;
951 * Determine the base address of the block containing p. If p is not a gc
952 * allocated pointer, return null.
954 BlkInfo query(void *p)
962 if (!thread_needLock())
964 return queryNoSync(p);
966 else synchronized (gcLock)
968 return queryNoSync(p);
976 BlkInfo queryNoSync(void *p)
980 return gcx.getInfo(p);
985 * Verify that pointer p:
986 * 1) belongs to this memory pool
987 * 2) points to the start of an allocated piece of memory
988 * 3) is not on a free list
997 if (!thread_needLock())
1001 else synchronized (gcLock)
1011 private void checkNoSync(void *p)
1015 if (opts.options.sentinel)
1016 sentinel_Invariant(p);
1024 if (opts.options.sentinel)
1025 p = sentinel_sub(p);
1026 pool = gcx.findPool(p);
1028 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1029 bin = cast(Bins)pool.pagetable[pagenum];
1030 assert(bin <= B_PAGE);
1031 size = binsize[bin];
1032 assert((cast(size_t)p & (size - 1)) == 0);
1038 // Check that p is not on a free list
1041 for (list = gcx.bucket[bin]; list; list = list.next)
1043 assert(cast(void*)list != p);
1054 private void setStackBottom(void *p)
1056 version (STACKGROWSDOWN)
1058 //p = (void *)((uint *)p + 4);
1059 if (p > gcx.stackBottom)
1061 gcx.stackBottom = p;
1066 //p = (void *)((uint *)p - 4);
1067 if (p < gcx.stackBottom)
1069 gcx.stackBottom = cast(char*)p;
1076 * add p to list of roots
1078 void addRoot(void *p)
1085 if (!thread_needLock())
1087 if (roots.append(p) is null)
1088 onOutOfMemoryError();
1090 else synchronized (gcLock)
1092 if (roots.append(p) is null)
1093 onOutOfMemoryError();
1099 * remove p from list of roots
1101 void removeRoot(void *p)
1109 if (!thread_needLock())
1111 r = roots.remove(p);
1113 else synchronized (gcLock)
1115 r = roots.remove(p);
1122 * add range to scan for roots
1124 void addRange(void *p, size_t sz)
1131 if (!thread_needLock())
1133 if (ranges.append(Range(p, p+sz)) is null)
1134 onOutOfMemoryError();
1136 else synchronized (gcLock)
1138 if (ranges.append(Range(p, p+sz)) is null)
1139 onOutOfMemoryError();
1147 void removeRange(void *p)
1155 if (!thread_needLock())
1157 r = ranges.remove(Range(p, null));
1159 else synchronized (gcLock)
1161 r = ranges.remove(Range(p, null));
1168 * do full garbage collection
1173 if (!thread_needLock())
1175 gcx.fullcollectshell();
1177 else synchronized (gcLock)
1179 gcx.fullcollectshell();
1192 * do full garbage collection ignoring roots
1194 void fullCollectNoStack()
1196 if (!thread_needLock())
1199 gcx.fullcollectshell();
1202 else synchronized (gcLock)
1205 gcx.fullcollectshell();
1212 * minimize free space usage
1216 if (!thread_needLock())
1220 else synchronized (gcLock)
1228 * Retrieve statistics about garbage collection.
1229 * Useful for debugging and tuning.
1231 void getStats(out GCStats stats)
1233 if (!thread_needLock())
1235 getStatsNoSync(stats);
1237 else synchronized (gcLock)
1239 getStatsNoSync(stats);
1247 private void getStatsNoSync(out GCStats stats)
1256 memset(&stats, 0, GCStats.sizeof);
1258 for (n = 0; n < pools.length; n++)
1260 Pool* pool = pools[n];
1261 psize += pool.npages * PAGESIZE;
1262 for (size_t j = 0; j < pool.npages; j++)
1264 Bins bin = cast(Bins)pool.pagetable[j];
1267 else if (bin == B_PAGE)
1269 else if (bin < B_PAGE)
1274 for (n = 0; n < B_PAGE; n++)
1276 for (List *list = gcx.bucket[n]; list; list = list.next)
1277 flsize += binsize[n];
1280 usize = bsize - flsize;
1282 stats.poolsize = psize;
1283 stats.usedsize = bsize - flsize;
1284 stats.freelistsize = flsize;
1287 /******************* weak-reference support *********************/
1289 // call locked if necessary
1290 private T locked(T)(in T delegate() code)
1292 if (thread_needLock)
1293 synchronized(gcLock) return code();
1298 private struct WeakPointer
1302 void ondestroy(Object r)
1304 assert(r is reference);
1305 // lock for memory consistency (parallel readers)
1306 // also ensures that weakpointerDestroy can be called while another
1307 // thread is freeing the reference with "delete"
1308 locked!(void)({ reference = null; });
1313 * Create a weak pointer to the given object.
1314 * Returns a pointer to an opaque struct allocated in C memory.
1316 void* weakpointerCreate( Object r )
1320 // must be allocated in C memory
1321 // 1. to hide the reference from the GC
1322 // 2. the GC doesn't scan delegates added by rt_attachDisposeEvent
1324 auto wp = cast(WeakPointer*)(cstdlib.malloc(WeakPointer.sizeof));
1326 onOutOfMemoryError();
1328 rt_attachDisposeEvent(r, &wp.ondestroy);
1335 * Destroy a weak pointer returned by weakpointerCreate().
1336 * If null is passed, nothing happens.
1338 void weakpointerDestroy( void* p )
1342 auto wp = cast(WeakPointer*)p;
1343 // must be extra careful about the GC or parallel threads
1344 // finalizing the reference at the same time
1347 rt_detachDisposeEvent(wp.reference, &wp.ondestroy);
1354 * Query a weak pointer and return either the object passed to
1355 * weakpointerCreate, or null if it was free'd in the meantime.
1356 * If null is passed, null is returned.
1358 Object weakpointerGet( void* p )
1362 // NOTE: could avoid the lock by using Fawzi style GC counters but
1363 // that'd require core.sync.Atomic and lots of care about memory
1364 // consistency it's an optional optimization see
1365 // http://dsource.org/projects/tango/browser/trunk/user/tango/core/Lifetime.d?rev=5100#L158
1366 return locked!(Object)({
1367 return (cast(WeakPointer*)p).reference;
1374 /* ============================ Gcx =============================== */
1379 POOLSIZE = (4096*256),
1393 B_PAGE, // start of large alloc
1394 B_PAGEPLUS, // continuation of large alloc
1395 B_FREE, // free page
1413 int opCmp(in Range other)
1415 if (pbot < other.pbot)
1418 return cast(int)(pbot > other.pbot);
1423 const uint binsize[B_MAX] = [ 16,32,64,128,256,512,1024,2048,4096 ];
1424 const uint notbinsize[B_MAX] = [ ~(16u-1),~(32u-1),~(64u-1),~(128u-1),~(256u-1),
1425 ~(512u-1),~(1024u-1),~(2048u-1),~(4096u-1) ];
1427 DynArray!(void*) roots;
1429 DynArray!(Range) ranges;
1431 DynArray!(Pool) pools;
1434 /* ============================ Gcx =============================== */
1443 uint noStack; // !=0 means don't scan stack
1444 uint log; // turn on logging
1448 int disabled; // turn off collections if >0
1450 byte *minAddr; // min(baseAddr)
1451 byte *maxAddr; // max(topAddr)
1453 List *bucket[B_MAX]; // free list for each size
1459 (cast(byte*)this)[0 .. Gcx.sizeof] = 0;
1460 stackBottom = cast(char*)&dummy;
1461 //printf("gcx = %p, self = %x\n", this, self);
1466 void Invariant() { }
1473 //printf("Gcx.invariant(): this = %p\n", this);
1476 for (i = 0; i < pools.length; i++)
1478 Pool* pool = pools[i];
1482 assert(minAddr == pool.baseAddr);
1484 if (i + 1 < pools.length)
1486 assert(*pool < pools[i + 1]);
1488 else if (i + 1 == pools.length)
1490 assert(maxAddr == pool.topAddr);
1497 for (i = 0; i < ranges.length; i++)
1499 assert(ranges[i].pbot);
1500 assert(ranges[i].ptop);
1501 assert(ranges[i].pbot <= ranges[i].ptop);
1504 for (i = 0; i < B_PAGE; i++)
1506 for (List *list = bucket[i]; list; list = list.next)
1515 * Find Pool that pointer is in.
1516 * Return null if not in a Pool.
1517 * Assume pools is sorted.
1519 Pool *findPool(void *p)
1521 if (p >= minAddr && p < maxAddr)
1523 if (pools.length == 1)
1528 for (size_t i = 0; i < pools.length; i++)
1530 Pool* pool = pools[i];
1531 if (p < pool.topAddr)
1533 if (pool.baseAddr <= p)
1544 * Find base address of block containing pointer p.
1545 * Returns null if not a gc'd pointer
1547 void* findBase(void *p)
1554 size_t offset = cast(size_t)(p - pool.baseAddr);
1555 size_t pn = offset / PAGESIZE;
1556 Bins bin = cast(Bins)pool.pagetable[pn];
1558 // Adjust bit to be at start of allocated memory block
1561 return pool.baseAddr + (offset & notbinsize[bin]);
1563 else if (bin == B_PAGEPLUS)
1567 --pn, offset -= PAGESIZE;
1568 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1570 return pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1574 // we are in a B_FREE page
1583 * Find size of pointer p.
1584 * Returns 0 if not a gc'd pointer
1586 size_t findSize(void *p)
1597 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1598 bin = cast(Bins)pool.pagetable[pagenum];
1599 size = binsize[bin];
1605 pt = &pool.pagetable[0];
1606 for (i = pagenum + 1; i < pool.npages; i++)
1608 if (pt[i] != B_PAGEPLUS)
1611 size = (i - pagenum) * PAGESIZE;
1621 BlkInfo getInfo(void* p)
1629 size_t offset = cast(size_t)(p - pool.baseAddr);
1630 size_t pn = offset / PAGESIZE;
1631 Bins bin = cast(Bins)pool.pagetable[pn];
1633 ////////////////////////////////////////////////////////////////////
1635 ////////////////////////////////////////////////////////////////////
1639 info.base = pool.baseAddr + (offset & notbinsize[bin]);
1641 else if (bin == B_PAGEPLUS)
1645 --pn, offset -= PAGESIZE;
1647 while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1649 info.base = pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1651 // fix bin for use by size calc below
1652 bin = cast(Bins)pool.pagetable[pn];
1655 ////////////////////////////////////////////////////////////////////
1657 ////////////////////////////////////////////////////////////////////
1659 info.size = binsize[bin];
1665 pt = &pool.pagetable[0];
1666 for (i = pn + 1; i < pool.npages; i++)
1668 if (pt[i] != B_PAGEPLUS)
1671 info.size = (i - pn) * PAGESIZE;
1674 ////////////////////////////////////////////////////////////////////
1676 ////////////////////////////////////////////////////////////////////
1678 info.attr = getAttr(pool, cast(size_t)(offset / 16));
1679 if (!(info.attr & BlkAttr.NO_SCAN))
1680 info.size -= (size_t*).sizeof; // bitmask
1687 * Compute bin for size.
1689 static Bins findBin(size_t size)
1698 else if (size <= 32)
1733 * Allocate a new pool of at least size bytes.
1734 * Sort it into pools.
1735 * Mark all memory in the pool as B_FREE.
1736 * Return the actual number of bytes reserved or 0 on error.
1738 size_t reserve(size_t size)
1740 size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
1741 Pool* pool = newPool(npages);
1745 return pool.npages * PAGESIZE;
1750 * Minimizes physical memory usage by returning free pools to the OS.
1758 for (n = 0; n < pools.length; n++)
1761 for (pn = 0; pn < pool.npages; pn++)
1763 if (cast(Bins)pool.pagetable[pn] != B_FREE)
1766 if (pn < pool.npages)
1772 minAddr = pools[0].baseAddr;
1773 maxAddr = pools[pools.length - 1].topAddr;
1778 * Allocate a chunk of memory that is larger than a page.
1779 * Return null if out of memory.
1781 void *bigAlloc(size_t size)
1791 npages = (size + PAGESIZE - 1) / PAGESIZE;
1795 // This code could use some refinement when repeatedly
1796 // allocating very large arrays.
1798 for (n = 0; n < pools.length; n++)
1801 pn = pool.allocPages(npages);
1816 freedpages = fullcollectshell();
1817 if (freedpages >= pools.length * ((POOLSIZE / PAGESIZE) / 4))
1822 // Release empty pools to prevent bloat
1824 // Allocate new pool
1825 pool = newPool(npages);
1831 pn = pool.allocPages(npages);
1832 assert(pn != OPFAIL);
1835 // Release empty pools to prevent bloat
1837 // Allocate new pool
1838 pool = newPool(npages);
1841 pn = pool.allocPages(npages);
1842 assert(pn != OPFAIL);
1852 pool.pagetable[pn] = B_PAGE;
1854 memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
1855 p = pool.baseAddr + pn * PAGESIZE;
1856 memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
1857 if (opts.options.mem_stomp)
1858 memset(p, 0xF1, size);
1862 return null; // let mallocNoSync handle the error
1867 * Allocate a new pool with at least npages in it.
1868 * Sort it into pools.
1869 * Return null if failed.
1871 Pool *newPool(size_t npages)
1873 // Minimum of POOLSIZE
1874 if (npages < POOLSIZE/PAGESIZE)
1875 npages = POOLSIZE/PAGESIZE;
1876 else if (npages > POOLSIZE/PAGESIZE)
1878 // Give us 150% of requested size, so there's room to extend
1879 auto n = npages + (npages >> 1);
1880 if (n < size_t.max/PAGESIZE)
1884 // Allocate successively larger pools up to 8 megs
1887 size_t n = pools.length;
1889 n = 8; // cap pool size at 8 megs
1890 n *= (POOLSIZE / PAGESIZE);
1896 p.initialize(npages);
1903 Pool* pool = pools.insert_sorted(p);
1906 minAddr = pools[0].baseAddr;
1907 maxAddr = pools[pools.length - 1].topAddr;
1914 * Allocate a page of bin's.
1918 int allocPage(Bins bin)
1926 for (n = 0; n < pools.length; n++)
1929 pn = pool.allocPages(1);
1936 pool.pagetable[pn] = cast(ubyte)bin;
1938 // Convert page to free list
1939 size_t size = binsize[bin];
1940 List **b = &bucket[bin];
1942 p = pool.baseAddr + pn * PAGESIZE;
1943 ptop = p + PAGESIZE;
1944 for (; p < ptop; p += size)
1946 (cast(List *)p).next = *b;
1954 * Marks a range of memory using the conservative bit mask. Used for
1955 * the stack, for the data segment, and additional memory ranges.
1957 void mark_conservative(void* pbot, void* ptop)
1959 mark(pbot, ptop, PointerMap.init.bits.ptr);
1964 * Search a range of memory values and mark any pointers into the GC pool.
1966 void mark(void *pbot, void *ptop, size_t* pm_bitmask)
1968 const BITS_PER_WORD = size_t.sizeof * 8;
1970 void **p1 = cast(void **)pbot;
1971 void **p2 = cast(void **)ptop;
1975 // TODO: add option to be conservative
1976 // force conservative scanning
1977 //pm_bitmask = PointerMap.init.bits.ptr;
1979 size_t type_size = pm_bitmask[0];
1980 size_t* pm_bits = pm_bitmask + 1;
1982 //printf("marking range: %p -> %p\n", pbot, ptop);
1983 for (; p1 + type_size <= p2; p1 += type_size) {
1984 for (size_t n = 0; n < type_size; n++) {
1985 // scan bit set for this word
1986 if (!(pm_bits[n / BITS_PER_WORD] & (1 << (n % BITS_PER_WORD))))
1989 void* p = *(p1 + n);
1991 if (p < minAddr || p >= maxAddr)
1994 if ((cast(size_t)p & ~(PAGESIZE-1)) == pcache)
1997 Pool* pool = findPool(p);
2000 size_t offset = cast(size_t)(p - pool.baseAddr);
2002 size_t pn = offset / PAGESIZE;
2003 Bins bin = cast(Bins)pool.pagetable[pn];
2005 // Adjust bit to be at start of allocated memory block
2007 bit_i = (offset & notbinsize[bin]) >> 4;
2008 else if (bin == B_PAGEPLUS)
2014 while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
2015 bit_i = pn * (PAGESIZE / 16);
2019 // Don't mark bits in B_FREE pages
2023 if (bin >= B_PAGE) // Cache B_PAGE and B_PAGEPLUS lookups
2024 pcache = cast(size_t)p & ~(PAGESIZE-1);
2026 if (!pool.mark.test(bit_i))
2028 pool.mark.set(bit_i);
2029 if (!pool.noscan.test(bit_i))
2031 pool.scan.set(bit_i);
2038 anychanges |= changes;
2042 * Return number of full pages free'd.
2044 size_t fullcollectshell()
2046 stats.collection_started();
2048 stats.collection_finished();
2050 // The purpose of the 'shell' is to ensure all the registers
2051 // get put on the stack so they'll be scanned
2056 gcc.builtins.__builtin_unwind_init();
2063 uint eax,ecx,edx,ebx,ebp,esi,edi;
2076 else version (X86_64)
2078 ulong rax,rbx,rcx,rdx,rbp,rsi,rdi,r8,r9,r10,r11,r12,r13,r14,r15;
2081 movq rax[RBP], RAX ;
2082 movq rbx[RBP], RBX ;
2083 movq rcx[RBP], RCX ;
2084 movq rdx[RBP], RDX ;
2085 movq rbp[RBP], RBP ;
2086 movq rsi[RBP], RSI ;
2087 movq rdi[RBP], RDI ;
2090 movq r10[RBP], R10 ;
2091 movq r11[RBP], R11 ;
2092 movq r12[RBP], R12 ;
2093 movq r13[RBP], R13 ;
2094 movq r14[RBP], R14 ;
2095 movq r15[RBP], R15 ;
2101 static assert( false, "Architecture not supported." );
2112 result = fullcollect(sp);
2135 size_t fullcollect(void *stackTop)
2140 debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n");
2142 thread_suspendAll();
2143 stats.world_stopped();
2149 for (n = 0; n < pools.length; n++)
2154 pool.freebits.zero();
2157 // Mark each free entry, so it doesn't get scanned
2158 for (n = 0; n < B_PAGE; n++)
2160 for (List *list = bucket[n]; list; list = list.next)
2162 pool = findPool(list);
2164 pool.freebits.set(cast(size_t)(cast(byte*)list - pool.baseAddr) / 16);
2168 for (n = 0; n < pools.length; n++)
2171 pool.mark.copy(&pool.freebits);
2174 rt_scanStaticData( &mark_conservative );
2178 // Scan stacks and registers for each paused thread
2179 thread_scanAll( &mark_conservative, stackTop );
2183 debug(COLLECT_PRINTF) printf("scan roots[]\n");
2184 mark_conservative(roots.ptr, roots.ptr + roots.length);
2187 debug(COLLECT_PRINTF) printf("scan ranges[]\n");
2189 for (n = 0; n < ranges.length; n++)
2191 debug(COLLECT_PRINTF) printf("\t%x .. %x\n", ranges[n].pbot, ranges[n].ptop);
2192 mark_conservative(ranges[n].pbot, ranges[n].ptop);
2196 debug(COLLECT_PRINTF) printf("\tscan heap\n");
2200 for (n = 0; n < pools.length; n++)
2208 bbase = pool.scan.base();
2209 btop = bbase + pool.scan.nwords;
2210 for (b = bbase; b < btop;)
2226 o = pool.baseAddr + (b - bbase) * 32 * 16;
2227 if (!(bitm & 0xFFFF))
2232 for (; bitm; o += 16, bitm >>= 1)
2237 pn = cast(size_t)(o - pool.baseAddr) / PAGESIZE;
2238 bin = cast(Bins)pool.pagetable[pn];
2240 auto end_of_blk = cast(size_t**)(o + binsize[bin] -
2242 size_t* pm_bitmask = *end_of_blk;
2243 mark(o, end_of_blk, pm_bitmask);
2245 else if (bin == B_PAGE || bin == B_PAGEPLUS)
2247 if (bin == B_PAGEPLUS)
2249 while (pool.pagetable[pn - 1] != B_PAGE)
2253 while (pn + u < pool.npages &&
2254 pool.pagetable[pn + u] == B_PAGEPLUS)
2257 size_t blk_size = u * PAGESIZE;
2258 auto end_of_blk = cast(size_t**)(o + blk_size -
2260 size_t* pm_bitmask = *end_of_blk;
2261 mark(o, end_of_blk, pm_bitmask);
2269 stats.world_started();
2271 // Free up everything not marked
2272 debug(COLLECT_PRINTF) printf("\tfree'ing\n");
2273 size_t freedpages = 0;
2275 for (n = 0; n < pools.length; n++)
2278 uint* bbase = pool.mark.base();
2280 for (pn = 0; pn < pool.npages; pn++, bbase += PAGESIZE / (32 * 16))
2282 Bins bin = cast(Bins)pool.pagetable[pn];
2286 auto size = binsize[bin];
2287 byte* p = pool.baseAddr + pn * PAGESIZE;
2288 byte* ptop = p + PAGESIZE;
2289 size_t bit_i = pn * (PAGESIZE/16);
2290 size_t bit_stride = size / 16;
2292 version(none) // BUG: doesn't work because freebits() must also be cleared
2294 // If free'd entire page
2295 if (bbase[0] == 0 && bbase[1] == 0 && bbase[2] == 0 &&
2296 bbase[3] == 0 && bbase[4] == 0 && bbase[5] == 0 &&
2297 bbase[6] == 0 && bbase[7] == 0)
2299 for (; p < ptop; p += size, bit_i += bit_stride)
2301 if (pool.finals.nbits && pool.finals.testClear(bit_i)) {
2302 if (opts.options.sentinel)
2303 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2305 rt_finalize(cast(List *)p, false/*noStack > 0*/);
2307 gcx.clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
2309 List *list = cast(List *)p;
2311 if (opts.options.mem_stomp)
2312 memset(p, 0xF3, size);
2314 pool.pagetable[pn] = B_FREE;
2319 for (; p < ptop; p += size, bit_i += bit_stride)
2321 if (!pool.mark.test(bit_i))
2323 if (opts.options.sentinel)
2324 sentinel_Invariant(sentinel_add(p));
2326 pool.freebits.set(bit_i);
2327 if (pool.finals.nbits && pool.finals.testClear(bit_i)) {
2328 if (opts.options.sentinel)
2329 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2331 rt_finalize(cast(List *)p, false/*noStack > 0*/);
2333 clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
2335 List *list = cast(List *)p;
2337 if (opts.options.mem_stomp)
2338 memset(p, 0xF3, size);
2344 else if (bin == B_PAGE)
2346 size_t bit_i = pn * (PAGESIZE / 16);
2347 if (!pool.mark.test(bit_i))
2349 byte *p = pool.baseAddr + pn * PAGESIZE;
2350 if (opts.options.sentinel)
2351 sentinel_Invariant(sentinel_add(p));
2352 if (pool.finals.nbits && pool.finals.testClear(bit_i)) {
2353 if (opts.options.sentinel)
2354 rt_finalize(sentinel_add(p), false/*noStack > 0*/);
2356 rt_finalize(p, false/*noStack > 0*/);
2358 clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
2360 debug(COLLECT_PRINTF) printf("\tcollecting big %x\n", p);
2361 pool.pagetable[pn] = B_FREE;
2363 if (opts.options.mem_stomp)
2364 memset(p, 0xF3, PAGESIZE);
2365 while (pn + 1 < pool.npages && pool.pagetable[pn + 1] == B_PAGEPLUS)
2368 pool.pagetable[pn] = B_FREE;
2371 if (opts.options.mem_stomp)
2374 memset(p, 0xF3, PAGESIZE);
2385 // Free complete pages, rebuild free list
2386 debug(COLLECT_PRINTF) printf("\tfree complete pages\n");
2387 size_t recoveredpages = 0;
2388 for (n = 0; n < pools.length; n++)
2391 for (size_t pn = 0; pn < pool.npages; pn++)
2393 Bins bin = cast(Bins)pool.pagetable[pn];
2399 size_t size = binsize[bin];
2400 size_t bit_stride = size / 16;
2401 size_t bit_base = pn * (PAGESIZE / 16);
2402 size_t bit_top = bit_base + (PAGESIZE / 16);
2406 for (; bit_i < bit_top; bit_i += bit_stride)
2408 if (!pool.freebits.test(bit_i))
2411 pool.pagetable[pn] = B_FREE;
2416 p = pool.baseAddr + pn * PAGESIZE;
2417 for (u = 0; u < PAGESIZE; u += size)
2419 bit_i = bit_base + u / 16;
2420 if (pool.freebits.test(bit_i))
2422 List *list = cast(List *)(p + u);
2423 // avoid unnecessary writes
2424 if (list.next != bucket[bin])
2425 list.next = bucket[bin];
2433 debug(COLLECT_PRINTF) printf("recovered pages = %d\n", recoveredpages);
2434 debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedpages, pools.length);
2436 return freedpages + recoveredpages;
2443 uint getAttr(Pool* pool, size_t bit_i)
2452 if (pool.finals.nbits &&
2453 pool.finals.test(bit_i))
2454 attrs |= BlkAttr.FINALIZE;
2455 if (pool.noscan.test(bit_i))
2456 attrs |= BlkAttr.NO_SCAN;
2457 // if (pool.nomove.nbits &&
2458 // pool.nomove.test(bit_i))
2459 // attrs |= BlkAttr.NO_MOVE;
2467 void setAttr(Pool* pool, size_t bit_i, uint mask)
2474 if (mask & BlkAttr.FINALIZE)
2476 if (!pool.finals.nbits)
2477 pool.finals.alloc(pool.mark.nbits);
2478 pool.finals.set(bit_i);
2480 if (mask & BlkAttr.NO_SCAN)
2482 pool.noscan.set(bit_i);
2484 // if (mask & BlkAttr.NO_MOVE)
2486 // if (!pool.nomove.nbits)
2487 // pool.nomove.alloc(pool.mark.nbits);
2488 // pool.nomove.set(bit_i);
2496 void clrAttr(Pool* pool, size_t bit_i, uint mask)
2503 if (mask & BlkAttr.FINALIZE && pool.finals.nbits)
2504 pool.finals.clear(bit_i);
2505 if (mask & BlkAttr.NO_SCAN)
2506 pool.noscan.clear(bit_i);
2507 // if (mask & BlkAttr.NO_MOVE && pool.nomove.nbits)
2508 // pool.nomove.clear(bit_i);
2514 /* ============================ Pool =============================== */
2521 GCBits mark; // entries already scanned, or should not be scanned
2522 GCBits scan; // entries that need to be scanned
2523 GCBits freebits; // entries that are on the free list
2524 GCBits finals; // entries that need finalizer run on them
2525 GCBits noscan; // entries that should not be scanned
2531 void initialize(size_t npages)
2533 size_t poolsize = npages * PAGESIZE;
2534 assert(poolsize >= POOLSIZE);
2535 baseAddr = cast(byte *) alloc.os_mem_map(poolsize);
2537 // Some of the code depends on page alignment of memory pools
2538 assert((cast(size_t)baseAddr & (PAGESIZE - 1)) == 0);
2546 topAddr = baseAddr + poolsize;
2548 mark.alloc(cast(size_t)poolsize / 16);
2549 scan.alloc(cast(size_t)poolsize / 16);
2550 freebits.alloc(cast(size_t)poolsize / 16);
2551 noscan.alloc(cast(size_t)poolsize / 16);
2553 pagetable = cast(ubyte*) cstdlib.malloc(npages);
2555 onOutOfMemoryError();
2556 memset(pagetable, B_FREE, npages);
2558 this.npages = npages;
2570 result = alloc.os_mem_unmap(baseAddr, npages * PAGESIZE);
2578 // See Gcx.Dtor() for the rationale of the null check.
2580 cstdlib.free(pagetable);
2590 void Invariant() { }
2597 //freebits.Invariant();
2598 //finals.Invariant();
2599 //noscan.Invariant();
2603 //if (baseAddr + npages * PAGESIZE != topAddr)
2604 //printf("baseAddr = %p, npages = %d, topAddr = %p\n", baseAddr, npages, topAddr);
2605 assert(baseAddr + npages * PAGESIZE == topAddr);
2608 for (size_t i = 0; i < npages; i++)
2610 Bins bin = cast(Bins)pagetable[i];
2611 assert(bin < B_MAX);
2617 * Allocate n pages from Pool.
2618 * Returns OPFAIL on failure.
2620 size_t allocPages(size_t n)
2626 for (i = 0; i < npages; i++)
2628 if (pagetable[i] == B_FREE)
2641 * Free npages pages starting with pagenum.
2643 void freePages(size_t pagenum, size_t npages)
2645 memset(&pagetable[pagenum], B_FREE, npages);
2650 * Used for sorting pools
2652 int opCmp(in Pool other)
2654 if (baseAddr < other.baseAddr)
2657 return cast(int)(baseAddr > other.baseAddr);
2662 /* ============================ SENTINEL =============================== */
2665 const size_t SENTINEL_PRE = cast(size_t) 0xF4F4F4F4F4F4F4F4UL; // 32 or 64 bits
2666 const ubyte SENTINEL_POST = 0xF5; // 8 bits
2667 const uint SENTINEL_EXTRA = 2 * size_t.sizeof + 1;
2670 size_t* sentinel_size(void *p) { return &(cast(size_t *)p)[-2]; }
2671 size_t* sentinel_pre(void *p) { return &(cast(size_t *)p)[-1]; }
2672 ubyte* sentinel_post(void *p) { return &(cast(ubyte *)p)[*sentinel_size(p)]; }
2675 void sentinel_init(void *p, size_t size)
2677 *sentinel_size(p) = size;
2678 *sentinel_pre(p) = SENTINEL_PRE;
2679 *sentinel_post(p) = SENTINEL_POST;
2683 void sentinel_Invariant(void *p)
2685 assert(*sentinel_pre(p) == SENTINEL_PRE);
2686 assert(*sentinel_post(p) == SENTINEL_POST);
2690 void *sentinel_add(void *p)
2692 return p + 2 * size_t.sizeof;
2696 void *sentinel_sub(void *p)
2698 return p - 2 * size_t.sizeof;
2702 // vim: set et sw=4 sts=4 :