2 * This module contains the garbage collector implementation.
4 * Copyright: Copyright (C) 2001-2007 Digital Mars, www.digitalmars.com.
7 * This software is provided 'as-is', without any express or implied
8 * warranty. In no event will the authors be held liable for any damages
9 * arising from the use of this software.
11 * Permission is granted to anyone to use this software for any purpose,
12 * including commercial applications, and to alter it and redistribute it
13 * freely, in both source and binary form, subject to the following
16 * o The origin of this software must not be misrepresented; you must not
17 * claim that you wrote the original software. If you use this software
18 * in a product, an acknowledgment in the product documentation would be
19 * appreciated but is not required.
20 * o Altered source versions must be plainly marked as such, and must not
21 * be misrepresented as being the original software.
22 * o This notice may not be removed or altered from any source
24 * Authors: Walter Bright, David Friedman, Sean Kelly
29 // D Programming Language Garbage Collector implementation
31 /************** Debugging ***************************/
33 //debug = PRINTF; // turn on printf's
34 //debug = COLLECT_PRINTF; // turn on printf's
35 //debug = LOGGING; // log allocations / frees
36 //debug = MEMSTOMP; // stomp on memory
37 //debug = SENTINEL; // add underrun/overrrun protection
38 //debug = PTRCHECK; // more pointer checking
39 //debug = PTRCHECK2; // thorough but slow pointer checking
41 /*************** Configuration *********************/
43 version = STACKGROWSDOWN; // growing the stack means subtracting from the stack pointer
44 // (use for Intel X86 CPUs)
45 // else growing the stack means adding to the stack pointer
47 /***************************************************/
49 import rt.gc.cdgc.bits: GCBits;
50 import rt.gc.cdgc.stats: GCStats;
51 import alloc = rt.gc.cdgc.alloc;
52 import libc = rt.gc.cdgc.libc;
57 // BUG: The following import will likely not work, since the gcc
58 // subdirectory is elsewhere. Instead, perhaps the functions
59 // could be declared directly or some other resolution could
61 static import gcc.builtins; // for __builtin_unwind_int
76 FINALIZE = 0b0000_0001,
77 NO_SCAN = 0b0000_0010,
78 NO_MOVE = 0b0000_0100,
79 ALL_BITS = 0b1111_1111
82 extern (C) void* rt_stackBottom();
83 extern (C) void* rt_stackTop();
85 extern (C) void rt_finalize( void* p, bool det = true );
87 alias void delegate(Object) DEvent;
88 extern (C) void rt_attachDisposeEvent(Object h, DEvent e);
89 extern (C) bool rt_detachDisposeEvent(Object h, DEvent e);
92 alias void delegate( void*, void* ) scanFn;
94 extern (C) void rt_scanStaticData( scanFn scan );
96 extern (C) bool thread_needLock();
97 extern (C) void thread_suspendAll();
98 extern (C) void thread_resumeAll();
100 extern (C) void thread_scanAll( scanFn fn, void* curStackTop = null );
102 extern (C) void onOutOfMemoryError();
106 OPFAIL = ~cast(size_t)0
114 /* ======================= Leak Detector =========================== */
129 printf(" p = %x, size = %d, parent = %x ", p, size, parent);
132 printf("%s(%u)", file, line);
152 void reserve(size_t nentries)
154 assert(dim <= allocdim);
155 if (allocdim - dim < nentries)
157 allocdim = (dim + nentries) * 2;
158 assert(dim + nentries <= allocdim);
161 data = cast(Log*) libc.malloc(allocdim * Log.sizeof);
162 if (!data && allocdim)
163 onOutOfMemoryError();
167 Log *newdata = cast(Log*) libc.malloc(
168 allocdim * Log.sizeof);
169 if (!newdata && allocdim)
170 onOutOfMemoryError();
171 libc.memcpy(newdata, data, dim * Log.sizeof);
185 void remove(size_t i)
187 libc.memmove(data + i, data + i + 1, (dim - i) * Log.sizeof);
194 for (size_t i = 0; i < dim; i++)
199 return OPFAIL; // not found
203 void copy(LogArray *from)
205 reserve(from.dim - dim);
206 assert(from.dim <= allocdim);
207 libc.memcpy(data, from.data, from.dim * Log.sizeof);
214 /* ============================ GC =============================== */
217 class GCLock { } // just a dummy so we can get a global lock
220 const uint GCVERSION = 1; // increment every time we change interface
225 // For passing to debug code
229 uint gcversion = GCVERSION;
231 Gcx *gcx; // implementation
232 static ClassInfo gcLock; // global lock
237 gcLock = GCLock.classinfo;
238 gcx = cast(Gcx*) libc.calloc(1, Gcx.sizeof);
240 onOutOfMemoryError();
242 setStackBottom(rt_stackBottom());
262 if (!thread_needLock())
264 assert(gcx.disabled > 0);
267 else synchronized (gcLock)
269 assert(gcx.disabled > 0);
280 if (!thread_needLock())
284 else synchronized (gcLock)
294 uint getAttr(void* p)
303 Pool* pool = gcx.findPool(p);
308 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
310 oldb = gcx.getBits(pool, biti);
315 if (!thread_needLock())
319 else synchronized (gcLock)
329 uint setAttr(void* p, uint mask)
338 Pool* pool = gcx.findPool(p);
343 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
345 oldb = gcx.getBits(pool, biti);
346 gcx.setBits(pool, biti, mask);
351 if (!thread_needLock())
355 else synchronized (gcLock)
365 uint clrAttr(void* p, uint mask)
374 Pool* pool = gcx.findPool(p);
379 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
381 oldb = gcx.getBits(pool, biti);
382 gcx.clrBits(pool, biti, mask);
387 if (!thread_needLock())
391 else synchronized (gcLock)
401 void *malloc(size_t size, uint bits = 0)
408 if (!thread_needLock())
410 return mallocNoSync(size, bits);
412 else synchronized (gcLock)
414 return mallocNoSync(size, bits);
422 private void *mallocNoSync(size_t size, uint bits = 0)
429 //debug(PRINTF) printf("GC::malloc(size = %d, gcx = %p)\n", size, gcx);
432 size += SENTINEL_EXTRA;
435 // Cache previous binsize lookup - Dave Fladebo.
436 static size_t lastsize = -1;
438 if (size == lastsize)
442 bin = gcx.findBin(size);
452 if (!gcx.allocPage(bin) && !gcx.disabled) // try to find a new page
454 if (!thread_needLock())
456 /* Then we haven't locked it yet. Be sure
457 * and lock for a collection, since a finalizer
458 * may start a new thread.
460 synchronized (gcLock)
462 gcx.fullcollectshell();
465 else if (!gcx.fullcollectshell()) // collect to find a new page
470 if (!gcx.bucket[bin] && !gcx.allocPage(bin))
472 gcx.newPool(1); // allocate new pool to find a new page
473 int result = gcx.allocPage(bin);
475 onOutOfMemoryError();
480 // Return next item from free list
481 gcx.bucket[bin] = (cast(List*)p).next;
482 if( !(bits & BlkAttr.NO_SCAN) )
483 libc.memset(p + size, 0, binsize[bin] - size);
484 //debug(PRINTF) printf("\tmalloc => %x\n", p);
485 debug (MEMSTOMP) libc.memset(p, 0xF0, size);
489 p = gcx.bigAlloc(size);
491 onOutOfMemoryError();
493 size -= SENTINEL_EXTRA;
495 sentinel_init(p, size);
496 gcx.log_malloc(p, size);
500 Pool *pool = gcx.findPool(p);
503 gcx.setBits(pool, cast(size_t)(p - pool.baseAddr) / 16, bits);
512 void *calloc(size_t size, uint bits = 0)
519 if (!thread_needLock())
521 return callocNoSync(size, bits);
523 else synchronized (gcLock)
525 return callocNoSync(size, bits);
533 private void *callocNoSync(size_t size, uint bits = 0)
537 //debug(PRINTF) printf("calloc: %x len %d\n", p, len);
538 void *p = mallocNoSync(size, bits);
539 libc.memset(p, 0, size);
547 void *realloc(void *p, size_t size, uint bits = 0)
549 if (!thread_needLock())
551 return reallocNoSync(p, size, bits);
553 else synchronized (gcLock)
555 return reallocNoSync(p, size, bits);
563 private void *reallocNoSync(void *p, size_t size, uint bits = 0)
575 p = mallocNoSync(size, bits);
582 //debug(PRINTF) printf("GC::realloc(p = %x, size = %u)\n", p, size);
585 sentinel_Invariant(p);
586 psize = *sentinel_size(p);
591 Pool *pool = gcx.findPool(p);
595 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
599 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
600 gcx.setBits(pool, biti, bits);
604 bits = gcx.getBits(pool, biti);
608 p2 = mallocNoSync(size, bits);
611 //debug(PRINTF) printf("\tcopying %d bytes\n",size);
612 libc.memcpy(p2, p, size);
618 psize = gcx.findSize(p); // find allocated size
619 if (psize >= PAGESIZE && size >= PAGESIZE)
621 auto psz = psize / PAGESIZE;
622 auto newsz = (size + PAGESIZE - 1) / PAGESIZE;
626 auto pool = gcx.findPool(p);
627 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
632 synchronized (gcLock)
635 libc.memset(p + size, 0xF2, psize - size);
636 pool.freePages(pagenum + newsz, psz - newsz);
640 else if (pagenum + newsz <= pool.npages)
642 // Attempt to expand in place
643 synchronized (gcLock)
645 for (size_t i = pagenum + psz; 1;)
647 if (i == pagenum + newsz)
650 libc.memset(p + psize, 0xF0,
652 libc.memset(&pool.pagetable[pagenum + psz],
653 B_PAGEPLUS, newsz - psz);
656 if (i == pool.npages)
660 if (pool.pagetable[i] != B_FREE)
667 if (psize < size || // if new size is bigger
668 psize > size * 2) // or less than half
672 Pool *pool = gcx.findPool(p);
676 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
680 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
681 gcx.setBits(pool, biti, bits);
685 bits = gcx.getBits(pool, biti);
689 p2 = mallocNoSync(size, bits);
692 //debug(PRINTF) printf("\tcopying %d bytes\n",size);
693 libc.memcpy(p2, p, size);
703 * Attempt to in-place enlarge the memory block pointed to by p by at least
704 * minbytes beyond its current capacity, up to a maximum of maxsize. This
705 * does not attempt to move the memory block (like realloc() does).
708 * 0 if could not extend p,
709 * total size of entire memory block if successful.
711 size_t extend(void* p, size_t minsize, size_t maxsize)
713 if (!thread_needLock())
715 return extendNoSync(p, minsize, maxsize);
717 else synchronized (gcLock)
719 return extendNoSync(p, minsize, maxsize);
727 private size_t extendNoSync(void* p, size_t minsize, size_t maxsize)
730 assert( minsize <= maxsize );
734 //debug(PRINTF) printf("GC::extend(p = %x, minsize = %u, maxsize = %u)\n", p, minsize, maxsize);
739 auto psize = gcx.findSize(p); // find allocated size
740 if (psize < PAGESIZE)
741 return 0; // cannot extend buckets
743 auto psz = psize / PAGESIZE;
744 auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE;
745 auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE;
747 auto pool = gcx.findPool(p);
748 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
751 for (sz = 0; sz < maxsz; sz++)
753 auto i = pagenum + psz + sz;
754 if (i == pool.npages)
756 if (pool.pagetable[i] != B_FREE)
766 libc.memset(p + psize, 0xF0, (psz + sz) * PAGESIZE - psize);
767 libc.memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
770 return (psz + sz) * PAGESIZE;
777 size_t reserve(size_t size)
784 if (!thread_needLock())
786 return reserveNoSync(size);
788 else synchronized (gcLock)
790 return reserveNoSync(size);
798 private size_t reserveNoSync(size_t size)
803 return gcx.reserve(size);
817 if (!thread_needLock())
819 return freeNoSync(p);
821 else synchronized (gcLock)
823 return freeNoSync(p);
831 private void freeNoSync(void *p)
840 // Find which page it is in
841 pool = gcx.findPool(p);
842 if (!pool) // if not one of ours
844 sentinel_Invariant(p);
846 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
847 biti = cast(size_t)(p - pool.baseAddr) / 16;
848 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
850 bin = cast(Bins)pool.pagetable[pagenum];
851 if (bin == B_PAGE) // if large alloc
856 while (++n < pool.npages && pool.pagetable[n] == B_PAGEPLUS)
858 debug (MEMSTOMP) libc.memset(p, 0xF2, npages * PAGESIZE);
859 pool.freePages(pagenum, npages);
864 List *list = cast(List*)p;
866 debug (MEMSTOMP) libc.memset(p, 0xF2, binsize[bin]);
868 list.next = gcx.bucket[bin];
869 gcx.bucket[bin] = list;
871 gcx.log_free(sentinel_add(p));
876 * Determine the base address of the block containing p. If p is not a gc
877 * allocated pointer, return null.
879 void* addrOf(void *p)
886 if (!thread_needLock())
888 return addrOfNoSync(p);
890 else synchronized (gcLock)
892 return addrOfNoSync(p);
900 void* addrOfNoSync(void *p)
907 return gcx.findBase(p);
912 * Determine the allocated size of pointer p. If p is an interior pointer
913 * or not a gc allocated pointer, return 0.
915 size_t sizeOf(void *p)
922 if (!thread_needLock())
924 return sizeOfNoSync(p);
926 else synchronized (gcLock)
928 return sizeOfNoSync(p);
936 private size_t sizeOfNoSync(void *p)
943 size_t size = gcx.findSize(p);
945 // Check for interior pointer
947 // 1) size is a power of 2 for less than PAGESIZE values
948 // 2) base of memory pool is aligned on PAGESIZE boundary
949 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
951 return size ? size - SENTINEL_EXTRA : 0;
955 if (p == gcx.p_cache)
956 return gcx.size_cache;
958 size_t size = gcx.findSize(p);
960 // Check for interior pointer
962 // 1) size is a power of 2 for less than PAGESIZE values
963 // 2) base of memory pool is aligned on PAGESIZE boundary
964 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
969 gcx.size_cache = size;
978 * Determine the base address of the block containing p. If p is not a gc
979 * allocated pointer, return null.
981 BlkInfo query(void *p)
989 if (!thread_needLock())
991 return queryNoSync(p);
993 else synchronized (gcLock)
995 return queryNoSync(p);
1003 BlkInfo queryNoSync(void *p)
1007 return gcx.getInfo(p);
1012 * Verify that pointer p:
1013 * 1) belongs to this memory pool
1014 * 2) points to the start of an allocated piece of memory
1015 * 3) is not on a free list
1024 if (!thread_needLock())
1028 else synchronized (gcLock)
1038 private void checkNoSync(void *p)
1042 sentinel_Invariant(p);
1050 p = sentinel_sub(p);
1051 pool = gcx.findPool(p);
1053 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1054 bin = cast(Bins)pool.pagetable[pagenum];
1055 assert(bin <= B_PAGE);
1056 size = binsize[bin];
1057 assert((cast(size_t)p & (size - 1)) == 0);
1063 // Check that p is not on a free list
1066 for (list = gcx.bucket[bin]; list; list = list.next)
1068 assert(cast(void*)list != p);
1079 private void setStackBottom(void *p)
1081 version (STACKGROWSDOWN)
1083 //p = (void *)((uint *)p + 4);
1084 if (p > gcx.stackBottom)
1086 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1087 gcx.stackBottom = p;
1092 //p = (void *)((uint *)p - 4);
1093 if (p < gcx.stackBottom)
1095 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1096 gcx.stackBottom = cast(char*)p;
1103 * add p to list of roots
1105 void addRoot(void *p)
1112 if (!thread_needLock())
1116 else synchronized (gcLock)
1124 * remove p from list of roots
1126 void removeRoot(void *p)
1133 if (!thread_needLock())
1137 else synchronized (gcLock)
1145 * add range to scan for roots
1147 void addRange(void *p, size_t sz)
1154 //debug(PRINTF) printf("+GC.addRange(pbot = x%x, ptop = x%x)\n", pbot, ptop);
1155 if (!thread_needLock())
1157 gcx.addRange(p, p + sz);
1159 else synchronized (gcLock)
1161 gcx.addRange(p, p + sz);
1163 //debug(PRINTF) printf("-GC.addRange()\n");
1170 void removeRange(void *p)
1177 if (!thread_needLock())
1181 else synchronized (gcLock)
1189 * do full garbage collection
1193 debug(PRINTF) printf("GC.fullCollect()\n");
1195 if (!thread_needLock())
1197 gcx.fullcollectshell();
1199 else synchronized (gcLock)
1201 gcx.fullcollectshell();
1209 debug(PRINTF) printf("poolsize = %x, usedsize = %x, freelistsize = %x\n",
1210 stats.poolsize, stats.usedsize, stats.freelistsize);
1218 * do full garbage collection ignoring roots
1220 void fullCollectNoStack()
1222 if (!thread_needLock())
1225 gcx.fullcollectshell();
1228 else synchronized (gcLock)
1231 gcx.fullcollectshell();
1238 * minimize free space usage
1242 if (!thread_needLock())
1246 else synchronized (gcLock)
1254 * Retrieve statistics about garbage collection.
1255 * Useful for debugging and tuning.
1257 void getStats(out GCStats stats)
1259 if (!thread_needLock())
1261 getStatsNoSync(stats);
1263 else synchronized (gcLock)
1265 getStatsNoSync(stats);
1273 private void getStatsNoSync(out GCStats stats)
1282 //debug(PRINTF) printf("getStats()\n");
1283 libc.memset(&stats, 0, GCStats.sizeof);
1285 for (n = 0; n < gcx.npools; n++)
1287 Pool *pool = gcx.pooltable[n];
1288 psize += pool.npages * PAGESIZE;
1289 for (size_t j = 0; j < pool.npages; j++)
1291 Bins bin = cast(Bins)pool.pagetable[j];
1294 else if (bin == B_PAGE)
1296 else if (bin < B_PAGE)
1301 for (n = 0; n < B_PAGE; n++)
1303 //debug(PRINTF) printf("bin %d\n", n);
1304 for (List *list = gcx.bucket[n]; list; list = list.next)
1306 //debug(PRINTF) printf("\tlist %x\n", list);
1307 flsize += binsize[n];
1311 usize = bsize - flsize;
1313 stats.poolsize = psize;
1314 stats.usedsize = bsize - flsize;
1315 stats.freelistsize = flsize;
1318 /******************* weak-reference support *********************/
1320 // call locked if necessary
1321 private T locked(T)(in T delegate() code)
1323 if (thread_needLock)
1324 synchronized(gcLock) return code();
1329 private struct WeakPointer
1333 void ondestroy(Object r)
1335 assert(r is reference);
1336 // lock for memory consistency (parallel readers)
1337 // also ensures that weakpointerDestroy can be called while another
1338 // thread is freeing the reference with "delete"
1339 locked!(void)({ reference = null; });
1344 * Create a weak pointer to the given object.
1345 * Returns a pointer to an opaque struct allocated in C memory.
1347 void* weakpointerCreate( Object r )
1351 // must be allocated in C memory
1352 // 1. to hide the reference from the GC
1353 // 2. the GC doesn't scan delegates added by rt_attachDisposeEvent
1355 auto wp = cast(WeakPointer*)(libc.malloc(WeakPointer.sizeof));
1357 onOutOfMemoryError();
1359 rt_attachDisposeEvent(r, &wp.ondestroy);
1366 * Destroy a weak pointer returned by weakpointerCreate().
1367 * If null is passed, nothing happens.
1369 void weakpointerDestroy( void* p )
1373 auto wp = cast(WeakPointer*)p;
1374 // must be extra careful about the GC or parallel threads
1375 // finalizing the reference at the same time
1378 rt_detachDisposeEvent(wp.reference, &wp.ondestroy);
1385 * Query a weak pointer and return either the object passed to
1386 * weakpointerCreate, or null if it was free'd in the meantime.
1387 * If null is passed, null is returned.
1389 Object weakpointerGet( void* p )
1393 // NOTE: could avoid the lock by using Fawzi style GC counters but
1394 // that'd require core.sync.Atomic and lots of care about memory
1395 // consistency it's an optional optimization see
1396 // http://dsource.org/projects/tango/browser/trunk/user/tango/core/Lifetime.d?rev=5100#L158
1397 return locked!(Object)({
1398 return (cast(WeakPointer*)p).reference;
1405 /* ============================ Gcx =============================== */
1410 POOLSIZE = (4096*256),
1424 B_PAGE, // start of large alloc
1425 B_PAGEPLUS, // continuation of large alloc
1426 B_FREE, // free page
1447 const uint binsize[B_MAX] = [ 16,32,64,128,256,512,1024,2048,4096 ];
1448 const uint notbinsize[B_MAX] = [ ~(16u-1),~(32u-1),~(64u-1),~(128u-1),~(256u-1),
1449 ~(512u-1),~(1024u-1),~(2048u-1),~(4096u-1) ];
1451 /* ============================ Gcx =============================== */
1468 uint noStack; // !=0 means don't scan stack
1469 uint log; // turn on logging
1473 int disabled; // turn off collections if >0
1475 byte *minAddr; // min(baseAddr)
1476 byte *maxAddr; // max(topAddr)
1481 List *bucket[B_MAX]; // free list for each size
1487 (cast(byte*)this)[0 .. Gcx.sizeof] = 0;
1488 stackBottom = cast(char*)&dummy;
1490 //printf("gcx = %p, self = %x\n", this, self);
1499 for (size_t i = 0; i < npools; i++)
1501 Pool *pool = pooltable[i];
1506 libc.free(pooltable);
1516 void Invariant() { }
1523 //printf("Gcx.invariant(): this = %p\n", this);
1526 for (i = 0; i < npools; i++)
1528 Pool *pool = pooltable[i];
1532 assert(minAddr == pool.baseAddr);
1536 assert(pool.opCmp(pooltable[i + 1]) < 0);
1538 else if (i + 1 == npools)
1540 assert(maxAddr == pool.topAddr);
1546 assert(rootdim != 0);
1547 assert(nroots <= rootdim);
1552 assert(rangedim != 0);
1553 assert(nranges <= rangedim);
1555 for (i = 0; i < nranges; i++)
1557 assert(ranges[i].pbot);
1558 assert(ranges[i].ptop);
1559 assert(ranges[i].pbot <= ranges[i].ptop);
1563 for (i = 0; i < B_PAGE; i++)
1565 for (List *list = bucket[i]; list; list = list.next)
1576 void addRoot(void *p)
1578 if (nroots == rootdim)
1580 size_t newdim = rootdim * 2 + 16;
1583 newroots = cast(void**) libc.malloc(newdim * newroots[0].sizeof);
1585 onOutOfMemoryError();
1588 libc.memcpy(newroots, roots, nroots * newroots[0].sizeof);
1602 void removeRoot(void *p)
1604 for (size_t i = nroots; i--;)
1609 libc.memmove(roots + i, roots + i + 1,
1610 (nroots - i) * roots[0].sizeof);
1621 void addRange(void *pbot, void *ptop)
1623 debug (PRINTF) printf("%x.Gcx::addRange(%x, %x), nranges = %d\n", this,
1624 pbot, ptop, nranges);
1625 if (nranges == rangedim)
1627 size_t newdim = rangedim * 2 + 16;
1630 newranges = cast(Range*) libc.malloc(newdim * newranges[0].sizeof);
1632 onOutOfMemoryError();
1635 libc.memcpy(newranges, ranges, nranges * newranges[0].sizeof);
1641 ranges[nranges].pbot = pbot;
1642 ranges[nranges].ptop = ptop;
1650 void removeRange(void *pbot)
1652 debug (PRINTF) printf("%x.Gcx.removeRange(%x), nranges = %d\n", this,
1654 for (size_t i = nranges; i--;)
1656 if (ranges[i].pbot == pbot)
1659 libc.memmove(ranges + i, ranges + i + 1,
1660 (nranges - i) * ranges[0].sizeof);
1664 debug(PRINTF) printf("Wrong thread\n");
1666 // This is a fatal error, but ignore it.
1667 // The problem is that we can get a Close() call on a thread
1668 // other than the one the range was allocated on.
1674 * Find Pool that pointer is in.
1675 * Return null if not in a Pool.
1676 * Assume pooltable[] is sorted.
1678 Pool *findPool(void *p)
1680 if (p >= minAddr && p < maxAddr)
1684 return pooltable[0];
1687 for (size_t i = 0; i < npools; i++)
1691 pool = pooltable[i];
1692 if (p < pool.topAddr)
1694 if (pool.baseAddr <= p)
1705 * Find base address of block containing pointer p.
1706 * Returns null if not a gc'd pointer
1708 void* findBase(void *p)
1715 size_t offset = cast(size_t)(p - pool.baseAddr);
1716 size_t pn = offset / PAGESIZE;
1717 Bins bin = cast(Bins)pool.pagetable[pn];
1719 // Adjust bit to be at start of allocated memory block
1722 return pool.baseAddr + (offset & notbinsize[bin]);
1724 else if (bin == B_PAGEPLUS)
1728 --pn, offset -= PAGESIZE;
1729 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1731 return pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1735 // we are in a B_FREE page
1744 * Find size of pointer p.
1745 * Returns 0 if not a gc'd pointer
1747 size_t findSize(void *p)
1758 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1759 bin = cast(Bins)pool.pagetable[pagenum];
1760 size = binsize[bin];
1766 pt = &pool.pagetable[0];
1767 for (i = pagenum + 1; i < pool.npages; i++)
1769 if (pt[i] != B_PAGEPLUS)
1772 size = (i - pagenum) * PAGESIZE;
1782 BlkInfo getInfo(void* p)
1790 size_t offset = cast(size_t)(p - pool.baseAddr);
1791 size_t pn = offset / PAGESIZE;
1792 Bins bin = cast(Bins)pool.pagetable[pn];
1794 ////////////////////////////////////////////////////////////////////
1796 ////////////////////////////////////////////////////////////////////
1800 info.base = pool.baseAddr + (offset & notbinsize[bin]);
1802 else if (bin == B_PAGEPLUS)
1806 --pn, offset -= PAGESIZE;
1808 while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1810 info.base = pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1812 // fix bin for use by size calc below
1813 bin = cast(Bins)pool.pagetable[pn];
1816 ////////////////////////////////////////////////////////////////////
1818 ////////////////////////////////////////////////////////////////////
1820 info.size = binsize[bin];
1826 pt = &pool.pagetable[0];
1827 for (i = pn + 1; i < pool.npages; i++)
1829 if (pt[i] != B_PAGEPLUS)
1832 info.size = (i - pn) * PAGESIZE;
1835 ////////////////////////////////////////////////////////////////////
1837 ////////////////////////////////////////////////////////////////////
1839 info.attr = getBits(pool, cast(size_t)(offset / 16));
1846 * Compute bin for size.
1848 static Bins findBin(size_t size)
1857 else if (size <= 32)
1892 * Allocate a new pool of at least size bytes.
1893 * Sort it into pooltable[].
1894 * Mark all memory in the pool as B_FREE.
1895 * Return the actual number of bytes reserved or 0 on error.
1897 size_t reserve(size_t size)
1899 size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
1900 Pool* pool = newPool(npages);
1904 return pool.npages * PAGESIZE;
1909 * Minimizes physical memory usage by returning free pools to the OS.
1917 for (n = 0; n < npools; n++)
1919 pool = pooltable[n];
1920 for (pn = 0; pn < pool.npages; pn++)
1922 if (cast(Bins)pool.pagetable[pn] != B_FREE)
1925 if (pn < pool.npages)
1932 libc.memmove(pooltable + n,
1934 (--npools - n) * (Pool*).sizeof);
1935 minAddr = pooltable[0].baseAddr;
1936 maxAddr = pooltable[npools - 1].topAddr;
1942 * Allocate a chunk of memory that is larger than a page.
1943 * Return null if out of memory.
1945 void *bigAlloc(size_t size)
1955 npages = (size + PAGESIZE - 1) / PAGESIZE;
1959 // This code could use some refinement when repeatedly
1960 // allocating very large arrays.
1962 for (n = 0; n < npools; n++)
1964 pool = pooltable[n];
1965 pn = pool.allocPages(npages);
1980 freedpages = fullcollectshell();
1981 if (freedpages >= npools * ((POOLSIZE / PAGESIZE) / 4))
1986 // Release empty pools to prevent bloat
1988 // Allocate new pool
1989 pool = newPool(npages);
1995 pn = pool.allocPages(npages);
1996 assert(pn != OPFAIL);
1999 // Release empty pools to prevent bloat
2001 // Allocate new pool
2002 pool = newPool(npages);
2005 pn = pool.allocPages(npages);
2006 assert(pn != OPFAIL);
2016 pool.pagetable[pn] = B_PAGE;
2018 libc.memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
2019 p = pool.baseAddr + pn * PAGESIZE;
2020 libc.memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
2021 debug (MEMSTOMP) libc.memset(p, 0xF1, size);
2022 //debug(PRINTF) printf("\tp = %x\n", p);
2026 return null; // let mallocNoSync handle the error
2031 * Allocate a new pool with at least npages in it.
2032 * Sort it into pooltable[].
2033 * Return null if failed.
2035 Pool *newPool(size_t npages)
2038 Pool** newpooltable;
2042 //debug(PRINTF) printf("************Gcx::newPool(npages = %d)****************\n", npages);
2044 // Minimum of POOLSIZE
2045 if (npages < POOLSIZE/PAGESIZE)
2046 npages = POOLSIZE/PAGESIZE;
2047 else if (npages > POOLSIZE/PAGESIZE)
2049 // Give us 150% of requested size, so there's room to extend
2050 auto n = npages + (npages >> 1);
2051 if (n < size_t.max/PAGESIZE)
2055 // Allocate successively larger pools up to 8 megs
2060 n = 8; // cap pool size at 8 megs
2061 n *= (POOLSIZE / PAGESIZE);
2066 pool = cast(Pool *) libc.calloc(1, Pool.sizeof);
2069 pool.initialize(npages);
2073 newnpools = npools + 1;
2074 newpooltable = cast(Pool **) libc.realloc(pooltable,
2075 newnpools * (Pool *).sizeof);
2079 // Sort pool into newpooltable[]
2080 for (i = 0; i < npools; i++)
2082 if (pool.opCmp(newpooltable[i]) < 0)
2085 libc.memmove(newpooltable + i + 1, newpooltable + i,
2086 (npools - i) * (Pool *).sizeof);
2087 newpooltable[i] = pool;
2089 pooltable = newpooltable;
2092 minAddr = pooltable[0].baseAddr;
2093 maxAddr = pooltable[npools - 1].topAddr;
2105 * Allocate a page of bin's.
2109 int allocPage(Bins bin)
2117 //debug(PRINTF) printf("Gcx::allocPage(bin = %d)\n", bin);
2118 for (n = 0; n < npools; n++)
2120 pool = pooltable[n];
2121 pn = pool.allocPages(1);
2128 pool.pagetable[pn] = cast(ubyte)bin;
2130 // Convert page to free list
2131 size_t size = binsize[bin];
2132 List **b = &bucket[bin];
2134 p = pool.baseAddr + pn * PAGESIZE;
2135 ptop = p + PAGESIZE;
2136 for (; p < ptop; p += size)
2138 (cast(List *)p).next = *b;
2146 * Search a range of memory values and mark any pointers into the GC pool.
2148 void mark(void *pbot, void *ptop)
2150 void **p1 = cast(void **)pbot;
2151 void **p2 = cast(void **)ptop;
2155 //printf("marking range: %p -> %p\n", pbot, ptop);
2156 for (; p1 < p2; p1++)
2159 byte *p = cast(byte *)(*p1);
2161 //if (log) debug(PRINTF) printf("\tmark %x\n", p);
2162 if (p >= minAddr && p < maxAddr)
2164 if ((cast(size_t)p & ~(PAGESIZE-1)) == pcache)
2170 size_t offset = cast(size_t)(p - pool.baseAddr);
2172 size_t pn = offset / PAGESIZE;
2173 Bins bin = cast(Bins)pool.pagetable[pn];
2175 //debug(PRINTF) printf("\t\tfound pool %x, base=%x, pn = %d, bin = %d, biti = x%x\n", pool, pool.baseAddr, pn, bin, biti);
2177 // Adjust bit to be at start of allocated memory block
2180 biti = (offset & notbinsize[bin]) >> 4;
2181 //debug(PRINTF) printf("\t\tbiti = x%x\n", biti);
2183 else if (bin == B_PAGEPLUS)
2189 while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
2190 biti = pn * (PAGESIZE / 16);
2194 // Don't mark bits in B_FREE pages
2198 if (bin >= B_PAGE) // Cache B_PAGE and B_PAGEPLUS lookups
2199 pcache = cast(size_t)p & ~(PAGESIZE-1);
2201 //debug(PRINTF) printf("\t\tmark(x%x) = %d\n", biti, pool.mark.test(biti));
2202 if (!pool.mark.test(biti))
2204 //if (log) debug(PRINTF) printf("\t\tmarking %x\n", p);
2205 pool.mark.set(biti);
2206 if (!pool.noscan.test(biti))
2208 pool.scan.set(biti);
2211 log_parent(sentinel_add(pool.baseAddr + biti * 16), sentinel_add(pbot));
2216 anychanges |= changes;
2221 * Return number of full pages free'd.
2223 size_t fullcollectshell()
2225 // The purpose of the 'shell' is to ensure all the registers
2226 // get put on the stack so they'll be scanned
2231 gcc.builtins.__builtin_unwind_init();
2238 uint eax,ecx,edx,ebx,ebp,esi,edi;
2251 else version (X86_64)
2253 ulong rax,rbx,rcx,rdx,rbp,rsi,rdi,r8,r9,r10,r11,r12,r13,r14,r15;
2256 movq rax[RBP], RAX ;
2257 movq rbx[RBP], RBX ;
2258 movq rcx[RBP], RCX ;
2259 movq rdx[RBP], RDX ;
2260 movq rbp[RBP], RBP ;
2261 movq rsi[RBP], RSI ;
2262 movq rdi[RBP], RDI ;
2265 movq r10[RBP], R10 ;
2266 movq r11[RBP], R11 ;
2267 movq r12[RBP], R12 ;
2268 movq r13[RBP], R13 ;
2269 movq r14[RBP], R14 ;
2270 movq r15[RBP], R15 ;
2276 static assert( false, "Architecture not supported." );
2287 result = fullcollect(sp);
2310 size_t fullcollect(void *stackTop)
2315 debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n");
2317 thread_suspendAll();
2323 for (n = 0; n < npools; n++)
2325 pool = pooltable[n];
2328 pool.freebits.zero();
2331 // Mark each free entry, so it doesn't get scanned
2332 for (n = 0; n < B_PAGE; n++)
2334 for (List *list = bucket[n]; list; list = list.next)
2336 pool = findPool(list);
2338 pool.freebits.set(cast(size_t)(cast(byte*)list - pool.baseAddr) / 16);
2342 for (n = 0; n < npools; n++)
2344 pool = pooltable[n];
2345 pool.mark.copy(&pool.freebits);
2348 rt_scanStaticData( &mark );
2352 // Scan stacks and registers for each paused thread
2353 thread_scanAll( &mark, stackTop );
2357 debug(COLLECT_PRINTF) printf("scan roots[]\n");
2358 mark(roots, roots + nroots);
2361 debug(COLLECT_PRINTF) printf("scan ranges[]\n");
2363 for (n = 0; n < nranges; n++)
2365 debug(COLLECT_PRINTF) printf("\t%x .. %x\n", ranges[n].pbot, ranges[n].ptop);
2366 mark(ranges[n].pbot, ranges[n].ptop);
2370 debug(COLLECT_PRINTF) printf("\tscan heap\n");
2374 for (n = 0; n < npools; n++)
2380 pool = pooltable[n];
2382 bbase = pool.scan.base();
2383 btop = bbase + pool.scan.nwords;
2384 for (b = bbase; b < btop;)
2400 o = pool.baseAddr + (b - bbase) * 32 * 16;
2401 if (!(bitm & 0xFFFF))
2406 for (; bitm; o += 16, bitm >>= 1)
2411 pn = cast(size_t)(o - pool.baseAddr) / PAGESIZE;
2412 bin = cast(Bins)pool.pagetable[pn];
2415 mark(o, o + binsize[bin]);
2417 else if (bin == B_PAGE || bin == B_PAGEPLUS)
2419 if (bin == B_PAGEPLUS)
2421 while (pool.pagetable[pn - 1] != B_PAGE)
2425 while (pn + u < pool.npages && pool.pagetable[pn + u] == B_PAGEPLUS)
2427 mark(o, o + u * PAGESIZE);
2436 // Free up everything not marked
2437 debug(COLLECT_PRINTF) printf("\tfree'ing\n");
2438 size_t freedpages = 0;
2440 for (n = 0; n < npools; n++)
2442 pool = pooltable[n];
2443 uint* bbase = pool.mark.base();
2445 for (pn = 0; pn < pool.npages; pn++, bbase += PAGESIZE / (32 * 16))
2447 Bins bin = cast(Bins)pool.pagetable[pn];
2451 auto size = binsize[bin];
2452 byte* p = pool.baseAddr + pn * PAGESIZE;
2453 byte* ptop = p + PAGESIZE;
2454 size_t biti = pn * (PAGESIZE/16);
2455 size_t bitstride = size / 16;
2457 version(none) // BUG: doesn't work because freebits() must also be cleared
2459 // If free'd entire page
2460 if (bbase[0] == 0 && bbase[1] == 0 && bbase[2] == 0 && bbase[3] == 0 &&
2461 bbase[4] == 0 && bbase[5] == 0 && bbase[6] == 0 && bbase[7] == 0)
2463 for (; p < ptop; p += size, biti += bitstride)
2465 if (pool.finals.nbits && pool.finals.testClear(biti))
2466 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2467 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
2469 List *list = cast(List *)p;
2470 //debug(PRINTF) printf("\tcollecting %x\n", list);
2471 log_free(sentinel_add(list));
2473 debug (MEMSTOMP) libc.memset(p, 0xF3, size);
2475 pool.pagetable[pn] = B_FREE;
2477 //debug(PRINTF) printf("freeing entire page %d\n", pn);
2481 for (; p < ptop; p += size, biti += bitstride)
2483 if (!pool.mark.test(biti))
2485 sentinel_Invariant(sentinel_add(p));
2487 pool.freebits.set(biti);
2488 if (pool.finals.nbits && pool.finals.testClear(biti))
2489 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2490 clrBits(pool, biti, BlkAttr.ALL_BITS);
2492 List *list = cast(List *)p;
2493 debug(PRINTF) printf("\tcollecting %x\n", list);
2494 log_free(sentinel_add(list));
2496 debug (MEMSTOMP) libc.memset(p, 0xF3, size);
2502 else if (bin == B_PAGE)
2504 size_t biti = pn * (PAGESIZE / 16);
2505 if (!pool.mark.test(biti))
2507 byte *p = pool.baseAddr + pn * PAGESIZE;
2508 sentinel_Invariant(sentinel_add(p));
2509 if (pool.finals.nbits && pool.finals.testClear(biti))
2510 rt_finalize(sentinel_add(p), false/*noStack > 0*/);
2511 clrBits(pool, biti, BlkAttr.ALL_BITS);
2513 debug(COLLECT_PRINTF) printf("\tcollecting big %x\n", p);
2514 log_free(sentinel_add(p));
2515 pool.pagetable[pn] = B_FREE;
2517 debug (MEMSTOMP) libc.memset(p, 0xF3, PAGESIZE);
2518 while (pn + 1 < pool.npages && pool.pagetable[pn + 1] == B_PAGEPLUS)
2521 pool.pagetable[pn] = B_FREE;
2527 libc.memset(p, 0xF3, PAGESIZE);
2538 // Free complete pages, rebuild free list
2539 debug(COLLECT_PRINTF) printf("\tfree complete pages\n");
2540 size_t recoveredpages = 0;
2541 for (n = 0; n < npools; n++)
2543 pool = pooltable[n];
2544 for (size_t pn = 0; pn < pool.npages; pn++)
2546 Bins bin = cast(Bins)pool.pagetable[pn];
2552 size_t size = binsize[bin];
2553 size_t bitstride = size / 16;
2554 size_t bitbase = pn * (PAGESIZE / 16);
2555 size_t bittop = bitbase + (PAGESIZE / 16);
2559 for (biti = bitbase; biti < bittop; biti += bitstride)
2561 if (!pool.freebits.test(biti))
2564 pool.pagetable[pn] = B_FREE;
2569 p = pool.baseAddr + pn * PAGESIZE;
2570 for (u = 0; u < PAGESIZE; u += size)
2572 biti = bitbase + u / 16;
2573 if (pool.freebits.test(biti))
2575 List *list = cast(List *)(p + u);
2576 if (list.next != bucket[bin]) // avoid unnecessary writes
2577 list.next = bucket[bin];
2585 debug(COLLECT_PRINTF) printf("recovered pages = %d\n", recoveredpages);
2586 debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedpages, npools);
2588 return freedpages + recoveredpages;
2595 uint getBits(Pool* pool, size_t biti)
2604 if (pool.finals.nbits &&
2605 pool.finals.test(biti))
2606 bits |= BlkAttr.FINALIZE;
2607 if (pool.noscan.test(biti))
2608 bits |= BlkAttr.NO_SCAN;
2609 // if (pool.nomove.nbits &&
2610 // pool.nomove.test(biti))
2611 // bits |= BlkAttr.NO_MOVE;
2619 void setBits(Pool* pool, size_t biti, uint mask)
2626 if (mask & BlkAttr.FINALIZE)
2628 if (!pool.finals.nbits)
2629 pool.finals.alloc(pool.mark.nbits);
2630 pool.finals.set(biti);
2632 if (mask & BlkAttr.NO_SCAN)
2634 pool.noscan.set(biti);
2636 // if (mask & BlkAttr.NO_MOVE)
2638 // if (!pool.nomove.nbits)
2639 // pool.nomove.alloc(pool.mark.nbits);
2640 // pool.nomove.set(biti);
2648 void clrBits(Pool* pool, size_t biti, uint mask)
2655 if (mask & BlkAttr.FINALIZE && pool.finals.nbits)
2656 pool.finals.clear(biti);
2657 if (mask & BlkAttr.NO_SCAN)
2658 pool.noscan.clear(biti);
2659 // if (mask & BlkAttr.NO_MOVE && pool.nomove.nbits)
2660 // pool.nomove.clear(biti);
2664 /***** Leak Detector ******/
2675 //debug(PRINTF) printf("+log_init()\n");
2676 current.reserve(1000);
2678 //debug(PRINTF) printf("-log_init()\n");
2682 void log_malloc(void *p, size_t size)
2684 //debug(PRINTF) printf("+log_malloc(p = %x, size = %d)\n", p, size);
2697 //debug(PRINTF) printf("-log_malloc()\n");
2701 void log_free(void *p)
2703 //debug(PRINTF) printf("+log_free(%x)\n", p);
2706 i = current.find(p);
2709 debug(PRINTF) printf("free'ing unallocated memory %x\n", p);
2713 //debug(PRINTF) printf("-log_free()\n");
2719 //debug(PRINTF) printf("+log_collect()\n");
2720 // Print everything in current that is not in prev
2722 debug(PRINTF) printf("New pointers this cycle: --------------------------------\n");
2724 for (size_t i = 0; i < current.dim; i++)
2728 j = prev.find(current.data[i].p);
2730 current.data[i].print();
2735 debug(PRINTF) printf("All roots this cycle: --------------------------------\n");
2736 for (size_t i = 0; i < current.dim; i++)
2741 p = current.data[i].p;
2742 if (!findPool(current.data[i].parent))
2744 j = prev.find(current.data[i].p);
2746 debug(PRINTF) printf("N");
2748 debug(PRINTF) printf(" ");;
2749 current.data[i].print();
2753 debug(PRINTF) printf("Used = %d-------------------------------------------------\n", used);
2754 prev.copy(¤t);
2756 debug(PRINTF) printf("-log_collect()\n");
2760 void log_parent(void *p, void *parent)
2762 //debug(PRINTF) printf("+log_parent()\n");
2765 i = current.find(p);
2768 debug(PRINTF) printf("parent'ing unallocated memory %x, parent = %x\n", p, parent);
2772 size_t offset = cast(size_t)(p - pool.baseAddr);
2774 size_t pn = offset / PAGESIZE;
2775 Bins bin = cast(Bins)pool.pagetable[pn];
2776 biti = (offset & notbinsize[bin]);
2777 debug(PRINTF) printf("\tbin = %d, offset = x%x, biti = x%x\n", bin, offset, biti);
2781 current.data[i].parent = parent;
2783 //debug(PRINTF) printf("-log_parent()\n");
2790 void log_malloc(void *p, size_t size) { }
2791 void log_free(void *p) { }
2792 void log_collect() { }
2793 void log_parent(void *p, void *parent) { }
2798 /* ============================ Pool =============================== */
2805 GCBits mark; // entries already scanned, or should not be scanned
2806 GCBits scan; // entries that need to be scanned
2807 GCBits freebits; // entries that are on the free list
2808 GCBits finals; // entries that need finalizer run on them
2809 GCBits noscan; // entries that should not be scanned
2815 void initialize(size_t npages)
2819 //debug(PRINTF) printf("Pool::Pool(%u)\n", npages);
2820 poolsize = npages * PAGESIZE;
2821 assert(poolsize >= POOLSIZE);
2822 baseAddr = cast(byte *) alloc.os_mem_map(poolsize);
2824 // Some of the code depends on page alignment of memory pools
2825 assert((cast(size_t)baseAddr & (PAGESIZE - 1)) == 0);
2829 //debug(PRINTF) printf("GC fail: poolsize = x%x, errno = %d\n", poolsize, errno);
2830 //debug(PRINTF) printf("message = '%s'\n", sys_errlist[errno]);
2836 topAddr = baseAddr + poolsize;
2838 mark.alloc(cast(size_t)poolsize / 16);
2839 scan.alloc(cast(size_t)poolsize / 16);
2840 freebits.alloc(cast(size_t)poolsize / 16);
2841 noscan.alloc(cast(size_t)poolsize / 16);
2843 pagetable = cast(ubyte*) libc.malloc(npages);
2845 onOutOfMemoryError();
2846 libc.memset(pagetable, B_FREE, npages);
2848 this.npages = npages;
2860 result = alloc.os_mem_unmap(baseAddr, npages * PAGESIZE);
2869 libc.free(pagetable);
2879 void Invariant() { }
2886 //freebits.Invariant();
2887 //finals.Invariant();
2888 //noscan.Invariant();
2892 //if (baseAddr + npages * PAGESIZE != topAddr)
2893 //printf("baseAddr = %p, npages = %d, topAddr = %p\n", baseAddr, npages, topAddr);
2894 assert(baseAddr + npages * PAGESIZE == topAddr);
2897 for (size_t i = 0; i < npages; i++)
2899 Bins bin = cast(Bins)pagetable[i];
2900 assert(bin < B_MAX);
2906 * Allocate n pages from Pool.
2907 * Returns OPFAIL on failure.
2909 size_t allocPages(size_t n)
2914 //debug(PRINTF) printf("Pool::allocPages(n = %d)\n", n);
2916 for (i = 0; i < npages; i++)
2918 if (pagetable[i] == B_FREE)
2922 //debug(PRINTF) printf("\texisting pn = %d\n", i - n + 1);
2934 * Free npages pages starting with pagenum.
2936 void freePages(size_t pagenum, size_t npages)
2938 libc.memset(&pagetable[pagenum], B_FREE, npages);
2943 * Used for sorting pooltable[]
2947 if (baseAddr < p2.baseAddr)
2950 return cast(int)(baseAddr > p2.baseAddr);
2955 /* ============================ SENTINEL =============================== */
2960 const size_t SENTINEL_PRE = cast(size_t) 0xF4F4F4F4F4F4F4F4UL; // 32 or 64 bits
2961 const ubyte SENTINEL_POST = 0xF5; // 8 bits
2962 const uint SENTINEL_EXTRA = 2 * size_t.sizeof + 1;
2965 size_t* sentinel_size(void *p) { return &(cast(size_t *)p)[-2]; }
2966 size_t* sentinel_pre(void *p) { return &(cast(size_t *)p)[-1]; }
2967 ubyte* sentinel_post(void *p) { return &(cast(ubyte *)p)[*sentinel_size(p)]; }
2970 void sentinel_init(void *p, size_t size)
2972 *sentinel_size(p) = size;
2973 *sentinel_pre(p) = SENTINEL_PRE;
2974 *sentinel_post(p) = SENTINEL_POST;
2978 void sentinel_Invariant(void *p)
2980 assert(*sentinel_pre(p) == SENTINEL_PRE);
2981 assert(*sentinel_post(p) == SENTINEL_POST);
2985 void *sentinel_add(void *p)
2987 return p + 2 * size_t.sizeof;
2991 void *sentinel_sub(void *p)
2993 return p - 2 * size_t.sizeof;
2998 const uint SENTINEL_EXTRA = 0;
3001 void sentinel_init(void *p, size_t size)
3006 void sentinel_Invariant(void *p)
3011 void *sentinel_add(void *p)
3017 void *sentinel_sub(void *p)