2 * This module contains the garbage collector implementation.
4 * Copyright: Copyright (C) 2001-2007 Digital Mars, www.digitalmars.com.
7 * This software is provided 'as-is', without any express or implied
8 * warranty. In no event will the authors be held liable for any damages
9 * arising from the use of this software.
11 * Permission is granted to anyone to use this software for any purpose,
12 * including commercial applications, and to alter it and redistribute it
13 * freely, in both source and binary form, subject to the following
16 * o The origin of this software must not be misrepresented; you must not
17 * claim that you wrote the original software. If you use this software
18 * in a product, an acknowledgment in the product documentation would be
19 * appreciated but is not required.
20 * o Altered source versions must be plainly marked as such, and must not
21 * be misrepresented as being the original software.
22 * o This notice may not be removed or altered from any source
24 * Authors: Walter Bright, David Friedman, Sean Kelly
29 // D Programming Language Garbage Collector implementation
31 /************** Debugging ***************************/
33 //debug = PRINTF; // turn on printf's
34 //debug = COLLECT_PRINTF; // turn on printf's
35 //debug = LOGGING; // log allocations / frees
36 //debug = MEMSTOMP; // stomp on memory
37 //debug = SENTINEL; // add underrun/overrrun protection
38 //debug = PTRCHECK; // more pointer checking
39 //debug = PTRCHECK2; // thorough but slow pointer checking
41 /*************** Configuration *********************/
43 version = STACKGROWSDOWN; // growing the stack means subtracting from the stack pointer
44 // (use for Intel X86 CPUs)
45 // else growing the stack means adding to the stack pointer
46 version = MULTI_THREADED; // produce multithreaded version
48 /***************************************************/
58 // BUG: The following import will likely not work, since the gcc
59 // subdirectory is elsewhere. Instead, perhaps the functions
60 // could be declared directly or some other resolution could
62 import gcc.builtins; // for __builtin_unwind_init
77 FINALIZE = 0b0000_0001,
78 NO_SCAN = 0b0000_0010,
79 NO_MOVE = 0b0000_0100,
80 ALL_BITS = 0b1111_1111
83 extern (C) void* rt_stackBottom();
84 extern (C) void* rt_stackTop();
86 extern (C) void rt_finalize( void* p, bool det = true );
88 alias void delegate( void*, void* ) scanFn;
90 extern (C) void rt_scanStaticData( scanFn scan );
92 version (MULTI_THREADED)
94 extern (C) bool thread_needLock();
95 extern (C) void thread_suspendAll();
96 extern (C) void thread_resumeAll();
98 extern (C) void thread_scanAll( scanFn fn, void* curStackTop = null );
101 extern (C) void onOutOfMemoryError();
105 OPFAIL = ~cast(size_t)0
113 /* ======================= Leak Detector =========================== */
128 printf(" p = %x, size = %d, parent = %x ", p, size, parent);
131 printf("%s(%u)", file, line);
151 void reserve(size_t nentries)
153 assert(dim <= allocdim);
154 if (allocdim - dim < nentries)
156 allocdim = (dim + nentries) * 2;
157 assert(dim + nentries <= allocdim);
160 data = cast(Log*) .malloc(allocdim * Log.sizeof);
161 if (!data && allocdim)
162 onOutOfMemoryError();
167 newdata = cast(Log*) .malloc(allocdim * Log.sizeof);
168 if (!newdata && allocdim)
169 onOutOfMemoryError();
170 .memcpy(newdata, data, dim * Log.sizeof);
184 void remove(size_t i)
186 .memmove(data + i, data + i + 1, (dim - i) * Log.sizeof);
193 for (size_t i = 0; i < dim; i++)
198 return OPFAIL; // not found
202 void copy(LogArray *from)
204 reserve(from.dim - dim);
205 assert(from.dim <= allocdim);
206 .memcpy(data, from.data, from.dim * Log.sizeof);
213 /* ============================ GC =============================== */
216 class GCLock { } // just a dummy so we can get a global lock
219 const uint GCVERSION = 1; // increment every time we change interface
224 // For passing to debug code
228 uint gcversion = GCVERSION;
230 Gcx *gcx; // implementation
231 static ClassInfo gcLock; // global lock
236 gcLock = GCLock.classinfo;
237 gcx = cast(Gcx*) .calloc(1, Gcx.sizeof);
239 onOutOfMemoryError();
241 setStackBottom(rt_stackBottom());
261 if (!thread_needLock())
263 assert(gcx.disabled > 0);
266 else synchronized (gcLock)
268 assert(gcx.disabled > 0);
279 if (!thread_needLock())
283 else synchronized (gcLock)
293 uint getAttr(void* p)
302 Pool* pool = gcx.findPool(p);
307 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
309 oldb = gcx.getBits(pool, biti);
314 if (!thread_needLock())
318 else synchronized (gcLock)
328 uint setAttr(void* p, uint mask)
337 Pool* pool = gcx.findPool(p);
342 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
344 oldb = gcx.getBits(pool, biti);
345 gcx.setBits(pool, biti, mask);
350 if (!thread_needLock())
354 else synchronized (gcLock)
364 uint clrAttr(void* p, uint mask)
373 Pool* pool = gcx.findPool(p);
378 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
380 oldb = gcx.getBits(pool, biti);
381 gcx.clrBits(pool, biti, mask);
386 if (!thread_needLock())
390 else synchronized (gcLock)
400 void *malloc(size_t size, uint bits = 0)
407 if (!thread_needLock())
409 return mallocNoSync(size, bits);
411 else synchronized (gcLock)
413 return mallocNoSync(size, bits);
421 private void *mallocNoSync(size_t size, uint bits = 0)
428 //debug(PRINTF) printf("GC::malloc(size = %d, gcx = %p)\n", size, gcx);
431 size += SENTINEL_EXTRA;
434 // Cache previous binsize lookup - Dave Fladebo.
435 static size_t lastsize = -1;
437 if (size == lastsize)
441 bin = gcx.findBin(size);
451 if (!gcx.allocPage(bin) && !gcx.disabled) // try to find a new page
453 if (!thread_needLock())
455 /* Then we haven't locked it yet. Be sure
456 * and lock for a collection, since a finalizer
457 * may start a new thread.
459 synchronized (gcLock)
461 gcx.fullcollectshell();
464 else if (!gcx.fullcollectshell()) // collect to find a new page
469 if (!gcx.bucket[bin] && !gcx.allocPage(bin))
472 gcx.newPool(1); // allocate new pool to find a new page
473 result = gcx.allocPage(bin);
475 onOutOfMemoryError();
480 // Return next item from free list
481 gcx.bucket[bin] = (cast(List*)p).next;
482 if( !(bits & BlkAttr.NO_SCAN) )
483 .memset(p + size, 0, binsize[bin] - size);
484 //debug(PRINTF) printf("\tmalloc => %x\n", p);
485 debug (MEMSTOMP) .memset(p, 0xF0, size);
489 p = gcx.bigAlloc(size);
491 onOutOfMemoryError();
493 size -= SENTINEL_EXTRA;
495 sentinel_init(p, size);
496 gcx.log_malloc(p, size);
500 Pool *pool = gcx.findPool(p);
503 gcx.setBits(pool, cast(size_t)(p - pool.baseAddr) / 16, bits);
512 void *calloc(size_t size, uint bits = 0)
519 if (!thread_needLock())
521 return callocNoSync(size, bits);
523 else synchronized (gcLock)
525 return callocNoSync(size, bits);
533 private void *callocNoSync(size_t size, uint bits = 0)
537 //debug(PRINTF) printf("calloc: %x len %d\n", p, len);
538 void *p = mallocNoSync(size, bits);
547 void *realloc(void *p, size_t size, uint bits = 0)
549 if (!thread_needLock())
551 return reallocNoSync(p, size, bits);
553 else synchronized (gcLock)
555 return reallocNoSync(p, size, bits);
563 private void *reallocNoSync(void *p, size_t size, uint bits = 0)
573 p = mallocNoSync(size, bits);
579 //debug(PRINTF) printf("GC::realloc(p = %x, size = %u)\n", p, size);
582 sentinel_Invariant(p);
583 psize = *sentinel_size(p);
588 Pool *pool = gcx.findPool(p);
592 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
596 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
597 gcx.setBits(pool, biti, bits);
601 bits = gcx.getBits(pool, biti);
605 p2 = mallocNoSync(size, bits);
608 //debug(PRINTF) printf("\tcopying %d bytes\n",size);
609 .memcpy(p2, p, size);
615 psize = gcx.findSize(p); // find allocated size
616 if (psize >= PAGESIZE && size >= PAGESIZE)
618 auto psz = psize / PAGESIZE;
619 auto newsz = (size + PAGESIZE - 1) / PAGESIZE;
623 auto pool = gcx.findPool(p);
624 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
628 synchronized (gcLock)
630 debug (MEMSTOMP) .memset(p + size, 0xF2, psize - size);
631 pool.freePages(pagenum + newsz, psz - newsz);
635 else if (pagenum + newsz <= pool.npages)
637 // Attempt to expand in place
638 synchronized (gcLock)
640 for (size_t i = pagenum + psz; 1;)
642 if (i == pagenum + newsz)
644 debug (MEMSTOMP) .memset(p + psize, 0xF0, size - psize);
645 .memset(&pool.pagetable[pagenum + psz], B_PAGEPLUS, newsz - psz);
648 if (i == pool.ncommitted)
650 auto u = pool.extendPages(pagenum + newsz - pool.ncommitted);
656 if (pool.pagetable[i] != B_FREE)
663 if (psize < size || // if new size is bigger
664 psize > size * 2) // or less than half
668 Pool *pool = gcx.findPool(p);
672 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
676 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
677 gcx.setBits(pool, biti, bits);
681 bits = gcx.getBits(pool, biti);
685 p2 = mallocNoSync(size, bits);
688 //debug(PRINTF) printf("\tcopying %d bytes\n",size);
689 .memcpy(p2, p, size);
699 * Attempt to in-place enlarge the memory block pointed to by p by at least
700 * minbytes beyond its current capacity, up to a maximum of maxsize. This
701 * does not attempt to move the memory block (like realloc() does).
704 * 0 if could not extend p,
705 * total size of entire memory block if successful.
707 size_t extend(void* p, size_t minsize, size_t maxsize)
709 if (!thread_needLock())
711 return extendNoSync(p, minsize, maxsize);
713 else synchronized (gcLock)
715 return extendNoSync(p, minsize, maxsize);
723 private size_t extendNoSync(void* p, size_t minsize, size_t maxsize)
726 assert( minsize <= maxsize );
730 //debug(PRINTF) printf("GC::extend(p = %x, minsize = %u, maxsize = %u)\n", p, minsize, maxsize);
735 auto psize = gcx.findSize(p); // find allocated size
736 if (psize < PAGESIZE)
737 return 0; // cannot extend buckets
739 auto psz = psize / PAGESIZE;
740 auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE;
741 auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE;
743 auto pool = gcx.findPool(p);
744 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
747 for (sz = 0; sz < maxsz; sz++)
749 auto i = pagenum + psz + sz;
750 if (i == pool.ncommitted)
752 if (pool.pagetable[i] != B_FREE)
761 else if (pagenum + psz + sz == pool.ncommitted)
763 auto u = pool.extendPages(minsz - sz);
770 debug (MEMSTOMP) .memset(p + psize, 0xF0, (psz + sz) * PAGESIZE - psize);
771 .memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
774 return (psz + sz) * PAGESIZE;
781 size_t reserve(size_t size)
788 if (!thread_needLock())
790 return reserveNoSync(size);
792 else synchronized (gcLock)
794 return reserveNoSync(size);
802 private size_t reserveNoSync(size_t size)
807 return gcx.reserve(size);
821 if (!thread_needLock())
823 return freeNoSync(p);
825 else synchronized (gcLock)
827 return freeNoSync(p);
835 private void freeNoSync(void *p)
844 // Find which page it is in
845 pool = gcx.findPool(p);
846 if (!pool) // if not one of ours
848 sentinel_Invariant(p);
850 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
851 biti = cast(size_t)(p - pool.baseAddr) / 16;
852 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
854 bin = cast(Bins)pool.pagetable[pagenum];
855 if (bin == B_PAGE) // if large alloc
862 while (++n < pool.ncommitted && pool.pagetable[n] == B_PAGEPLUS)
864 debug (MEMSTOMP) .memset(p, 0xF2, npages * PAGESIZE);
865 pool.freePages(pagenum, npages);
868 { // Add to free list
869 List *list = cast(List*)p;
871 debug (MEMSTOMP) .memset(p, 0xF2, binsize[bin]);
873 list.next = gcx.bucket[bin];
874 gcx.bucket[bin] = list;
876 gcx.log_free(sentinel_add(p));
881 * Determine the base address of the block containing p. If p is not a gc
882 * allocated pointer, return null.
884 void* addrOf(void *p)
891 if (!thread_needLock())
893 return addrOfNoSync(p);
895 else synchronized (gcLock)
897 return addrOfNoSync(p);
905 void* addrOfNoSync(void *p)
912 return gcx.findBase(p);
917 * Determine the allocated size of pointer p. If p is an interior pointer
918 * or not a gc allocated pointer, return 0.
920 size_t sizeOf(void *p)
927 if (!thread_needLock())
929 return sizeOfNoSync(p);
931 else synchronized (gcLock)
933 return sizeOfNoSync(p);
941 private size_t sizeOfNoSync(void *p)
948 size_t size = gcx.findSize(p);
950 // Check for interior pointer
952 // 1) size is a power of 2 for less than PAGESIZE values
953 // 2) base of memory pool is aligned on PAGESIZE boundary
954 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
956 return size ? size - SENTINEL_EXTRA : 0;
960 if (p == gcx.p_cache)
961 return gcx.size_cache;
963 size_t size = gcx.findSize(p);
965 // Check for interior pointer
967 // 1) size is a power of 2 for less than PAGESIZE values
968 // 2) base of memory pool is aligned on PAGESIZE boundary
969 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
974 gcx.size_cache = size;
983 * Determine the base address of the block containing p. If p is not a gc
984 * allocated pointer, return null.
986 BlkInfo query(void *p)
994 if (!thread_needLock())
996 return queryNoSync(p);
998 else synchronized (gcLock)
1000 return queryNoSync(p);
1008 BlkInfo queryNoSync(void *p)
1012 return gcx.getInfo(p);
1017 * Verify that pointer p:
1018 * 1) belongs to this memory pool
1019 * 2) points to the start of an allocated piece of memory
1020 * 3) is not on a free list
1029 if (!thread_needLock())
1033 else synchronized (gcLock)
1043 private void checkNoSync(void *p)
1047 sentinel_Invariant(p);
1055 p = sentinel_sub(p);
1056 pool = gcx.findPool(p);
1058 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1059 bin = cast(Bins)pool.pagetable[pagenum];
1060 assert(bin <= B_PAGE);
1061 size = binsize[bin];
1062 assert((cast(size_t)p & (size - 1)) == 0);
1068 // Check that p is not on a free list
1071 for (list = gcx.bucket[bin]; list; list = list.next)
1073 assert(cast(void*)list != p);
1084 private void setStackBottom(void *p)
1086 version (STACKGROWSDOWN)
1088 //p = (void *)((uint *)p + 4);
1089 if (p > gcx.stackBottom)
1091 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1092 gcx.stackBottom = p;
1097 //p = (void *)((uint *)p - 4);
1098 if (p < gcx.stackBottom)
1100 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1101 gcx.stackBottom = cast(char*)p;
1108 * add p to list of roots
1110 void addRoot(void *p)
1117 if (!thread_needLock())
1121 else synchronized (gcLock)
1129 * remove p from list of roots
1131 void removeRoot(void *p)
1138 if (!thread_needLock())
1142 else synchronized (gcLock)
1150 * add range to scan for roots
1152 void addRange(void *p, size_t sz)
1159 //debug(PRINTF) printf("+GC.addRange(pbot = x%x, ptop = x%x)\n", pbot, ptop);
1160 if (!thread_needLock())
1162 gcx.addRange(p, p + sz);
1164 else synchronized (gcLock)
1166 gcx.addRange(p, p + sz);
1168 //debug(PRINTF) printf("-GC.addRange()\n");
1175 void removeRange(void *p)
1182 if (!thread_needLock())
1186 else synchronized (gcLock)
1194 * do full garbage collection
1198 debug(PRINTF) printf("GC.fullCollect()\n");
1200 if (!thread_needLock())
1202 gcx.fullcollectshell();
1204 else synchronized (gcLock)
1206 gcx.fullcollectshell();
1214 debug(PRINTF) printf("poolsize = %x, usedsize = %x, freelistsize = %x\n",
1215 stats.poolsize, stats.usedsize, stats.freelistsize);
1223 * do full garbage collection ignoring roots
1225 void fullCollectNoStack()
1227 if (!thread_needLock())
1230 gcx.fullcollectshell();
1233 else synchronized (gcLock)
1236 gcx.fullcollectshell();
1243 * minimize free space usage
1247 if (!thread_needLock())
1251 else synchronized (gcLock)
1259 * Retrieve statistics about garbage collection.
1260 * Useful for debugging and tuning.
1262 void getStats(out GCStats stats)
1264 if (!thread_needLock())
1266 getStatsNoSync(stats);
1268 else synchronized (gcLock)
1270 getStatsNoSync(stats);
1278 private void getStatsNoSync(out GCStats stats)
1287 //debug(PRINTF) printf("getStats()\n");
1288 .memset(&stats, 0, GCStats.sizeof);
1290 for (n = 0; n < gcx.npools; n++)
1291 { Pool *pool = gcx.pooltable[n];
1293 psize += pool.ncommitted * PAGESIZE;
1294 for (size_t j = 0; j < pool.ncommitted; j++)
1296 Bins bin = cast(Bins)pool.pagetable[j];
1299 else if (bin == B_PAGE)
1301 else if (bin < B_PAGE)
1306 for (n = 0; n < B_PAGE; n++)
1308 //debug(PRINTF) printf("bin %d\n", n);
1309 for (List *list = gcx.bucket[n]; list; list = list.next)
1311 //debug(PRINTF) printf("\tlist %x\n", list);
1312 flsize += binsize[n];
1316 usize = bsize - flsize;
1318 stats.poolsize = psize;
1319 stats.usedsize = bsize - flsize;
1320 stats.freelistsize = flsize;
1325 /* ============================ Gcx =============================== */
1329 COMMITSIZE = (4096*16),
1330 POOLSIZE = (4096*256),
1344 B_PAGE, // start of large alloc
1345 B_PAGEPLUS, // continuation of large alloc
1346 B_FREE, // free page
1347 B_UNCOMMITTED, // memory not committed for this page
1368 const uint binsize[B_MAX] = [ 16,32,64,128,256,512,1024,2048,4096 ];
1369 const uint notbinsize[B_MAX] = [ ~(16u-1),~(32u-1),~(64u-1),~(128u-1),~(256u-1),
1370 ~(512u-1),~(1024u-1),~(2048u-1),~(4096u-1) ];
1372 /* ============================ Gcx =============================== */
1389 uint noStack; // !=0 means don't scan stack
1390 uint log; // turn on logging
1394 int disabled; // turn off collections if >0
1396 byte *minAddr; // min(baseAddr)
1397 byte *maxAddr; // max(topAddr)
1402 List *bucket[B_MAX]; // free list for each size
1408 (cast(byte*)this)[0 .. Gcx.sizeof] = 0;
1409 stackBottom = cast(char*)&dummy;
1411 //printf("gcx = %p, self = %x\n", this, self);
1420 for (size_t i = 0; i < npools; i++)
1421 { Pool *pool = pooltable[i];
1437 void Invariant() { }
1444 //printf("Gcx.invariant(): this = %p\n", this);
1447 for (i = 0; i < npools; i++)
1448 { Pool *pool = pooltable[i];
1453 assert(minAddr == pool.baseAddr);
1457 assert(pool.opCmp(pooltable[i + 1]) < 0);
1459 else if (i + 1 == npools)
1461 assert(maxAddr == pool.topAddr);
1467 assert(rootdim != 0);
1468 assert(nroots <= rootdim);
1473 assert(rangedim != 0);
1474 assert(nranges <= rangedim);
1476 for (i = 0; i < nranges; i++)
1478 assert(ranges[i].pbot);
1479 assert(ranges[i].ptop);
1480 assert(ranges[i].pbot <= ranges[i].ptop);
1484 for (i = 0; i < B_PAGE; i++)
1486 for (List *list = bucket[i]; list; list = list.next)
1497 void addRoot(void *p)
1499 if (nroots == rootdim)
1501 size_t newdim = rootdim * 2 + 16;
1504 newroots = cast(void**) .malloc(newdim * newroots[0].sizeof);
1506 onOutOfMemoryError();
1508 { .memcpy(newroots, roots, nroots * newroots[0].sizeof);
1522 void removeRoot(void *p)
1524 for (size_t i = nroots; i--;)
1529 .memmove(roots + i, roots + i + 1, (nroots - i) * roots[0].sizeof);
1540 void addRange(void *pbot, void *ptop)
1542 debug (PRINTF) printf("%x.Gcx::addRange(%x, %x), nranges = %d\n", this,
1543 pbot, ptop, nranges);
1544 if (nranges == rangedim)
1546 size_t newdim = rangedim * 2 + 16;
1549 newranges = cast(Range*) .malloc(newdim * newranges[0].sizeof);
1551 onOutOfMemoryError();
1553 { .memcpy(newranges, ranges, nranges * newranges[0].sizeof);
1559 ranges[nranges].pbot = pbot;
1560 ranges[nranges].ptop = ptop;
1568 void removeRange(void *pbot)
1570 debug (PRINTF) printf("%x.Gcx.removeRange(%x), nranges = %d\n", this,
1572 for (size_t i = nranges; i--;)
1574 if (ranges[i].pbot == pbot)
1577 .memmove(ranges + i, ranges + i + 1, (nranges - i) * ranges[0].sizeof);
1581 debug(PRINTF) printf("Wrong thread\n");
1583 // This is a fatal error, but ignore it.
1584 // The problem is that we can get a Close() call on a thread
1585 // other than the one the range was allocated on.
1591 * Find Pool that pointer is in.
1592 * Return null if not in a Pool.
1593 * Assume pooltable[] is sorted.
1595 Pool *findPool(void *p)
1597 if (p >= minAddr && p < maxAddr)
1601 return pooltable[0];
1604 for (size_t i = 0; i < npools; i++)
1607 pool = pooltable[i];
1608 if (p < pool.topAddr)
1609 { if (pool.baseAddr <= p)
1620 * Find base address of block containing pointer p.
1621 * Returns null if not a gc'd pointer
1623 void* findBase(void *p)
1630 size_t offset = cast(size_t)(p - pool.baseAddr);
1631 size_t pn = offset / PAGESIZE;
1632 Bins bin = cast(Bins)pool.pagetable[pn];
1634 // Adjust bit to be at start of allocated memory block
1637 return pool.baseAddr + (offset & notbinsize[bin]);
1639 else if (bin == B_PAGEPLUS)
1642 { --pn, offset -= PAGESIZE;
1643 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1645 return pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1649 // we are in a B_FREE or B_UNCOMMITTED page
1658 * Find size of pointer p.
1659 * Returns 0 if not a gc'd pointer
1661 size_t findSize(void *p)
1672 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1673 bin = cast(Bins)pool.pagetable[pagenum];
1674 size = binsize[bin];
1676 { size_t npages = pool.ncommitted;
1680 pt = &pool.pagetable[0];
1681 for (i = pagenum + 1; i < npages; i++)
1683 if (pt[i] != B_PAGEPLUS)
1686 size = (i - pagenum) * PAGESIZE;
1696 BlkInfo getInfo(void* p)
1704 size_t offset = cast(size_t)(p - pool.baseAddr);
1705 size_t pn = offset / PAGESIZE;
1706 Bins bin = cast(Bins)pool.pagetable[pn];
1708 ////////////////////////////////////////////////////////////////////
1710 ////////////////////////////////////////////////////////////////////
1714 info.base = pool.baseAddr + (offset & notbinsize[bin]);
1716 else if (bin == B_PAGEPLUS)
1719 { --pn, offset -= PAGESIZE;
1720 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1722 info.base = pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1724 // fix bin for use by size calc below
1725 bin = cast(Bins)pool.pagetable[pn];
1728 ////////////////////////////////////////////////////////////////////
1730 ////////////////////////////////////////////////////////////////////
1732 info.size = binsize[bin];
1734 { size_t npages = pool.ncommitted;
1738 pt = &pool.pagetable[0];
1739 for (i = pn + 1; i < npages; i++)
1741 if (pt[i] != B_PAGEPLUS)
1744 info.size = (i - pn) * PAGESIZE;
1747 ////////////////////////////////////////////////////////////////////
1749 ////////////////////////////////////////////////////////////////////
1751 info.attr = getBits(pool, cast(size_t)(offset / 16));
1758 * Compute bin for size.
1760 static Bins findBin(size_t size)
1769 else if (size <= 32)
1804 * Allocate a new pool of at least size bytes.
1805 * Sort it into pooltable[].
1806 * Mark all memory in the pool as B_FREE.
1807 * Return the actual number of bytes reserved or 0 on error.
1809 size_t reserve(size_t size)
1811 size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
1812 Pool* pool = newPool(npages);
1814 if (!pool || pool.extendPages(npages) == OPFAIL)
1816 return pool.ncommitted * PAGESIZE;
1821 * Minimizes physical memory usage by returning free pools to the OS.
1830 for (n = 0; n < npools; n++)
1832 pool = pooltable[n];
1833 ncommitted = pool.ncommitted;
1834 for (pn = 0; pn < ncommitted; pn++)
1836 if (cast(Bins)pool.pagetable[pn] != B_FREE)
1839 if (pn < ncommitted)
1846 .memmove(pooltable + n,
1848 (--npools - n) * (Pool*).sizeof);
1849 minAddr = pooltable[0].baseAddr;
1850 maxAddr = pooltable[npools - 1].topAddr;
1856 * Allocate a chunk of memory that is larger than a page.
1857 * Return null if out of memory.
1859 void *bigAlloc(size_t size)
1869 npages = (size + PAGESIZE - 1) / PAGESIZE;
1873 // This code could use some refinement when repeatedly
1874 // allocating very large arrays.
1876 for (n = 0; n < npools; n++)
1878 pool = pooltable[n];
1879 pn = pool.allocPages(npages);
1893 freedpages = fullcollectshell();
1894 if (freedpages >= npools * ((POOLSIZE / PAGESIZE) / 4))
1898 // Release empty pools to prevent bloat
1900 // Allocate new pool
1901 pool = newPool(npages);
1906 pn = pool.allocPages(npages);
1907 assert(pn != OPFAIL);
1910 // Release empty pools to prevent bloat
1912 // Allocate new pool
1913 pool = newPool(npages);
1916 pn = pool.allocPages(npages);
1917 assert(pn != OPFAIL);
1927 pool.pagetable[pn] = B_PAGE;
1929 .memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
1930 p = pool.baseAddr + pn * PAGESIZE;
1931 .memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
1932 debug (MEMSTOMP) .memset(p, 0xF1, size);
1933 //debug(PRINTF) printf("\tp = %x\n", p);
1937 return null; // let mallocNoSync handle the error
1942 * Allocate a new pool with at least npages in it.
1943 * Sort it into pooltable[].
1944 * Return null if failed.
1946 Pool *newPool(size_t npages)
1949 Pool** newpooltable;
1953 //debug(PRINTF) printf("************Gcx::newPool(npages = %d)****************\n", npages);
1955 // Round up to COMMITSIZE pages
1956 npages = (npages + (COMMITSIZE/PAGESIZE) - 1) & ~(COMMITSIZE/PAGESIZE - 1);
1958 // Minimum of POOLSIZE
1959 if (npages < POOLSIZE/PAGESIZE)
1960 npages = POOLSIZE/PAGESIZE;
1961 else if (npages > POOLSIZE/PAGESIZE)
1962 { // Give us 150% of requested size, so there's room to extend
1963 auto n = npages + (npages >> 1);
1964 if (n < size_t.max/PAGESIZE)
1968 // Allocate successively larger pools up to 8 megs
1974 n = 8; // cap pool size at 8 megs
1975 n *= (POOLSIZE / PAGESIZE);
1980 pool = cast(Pool *) .calloc(1, Pool.sizeof);
1983 pool.initialize(npages);
1987 newnpools = npools + 1;
1988 newpooltable = cast(Pool **) .realloc(pooltable, newnpools * (Pool *).sizeof);
1992 // Sort pool into newpooltable[]
1993 for (i = 0; i < npools; i++)
1995 if (pool.opCmp(newpooltable[i]) < 0)
1998 .memmove(newpooltable + i + 1, newpooltable + i, (npools - i) * (Pool *).sizeof);
1999 newpooltable[i] = pool;
2001 pooltable = newpooltable;
2004 minAddr = pooltable[0].baseAddr;
2005 maxAddr = pooltable[npools - 1].topAddr;
2017 * Allocate a page of bin's.
2021 int allocPage(Bins bin)
2029 //debug(PRINTF) printf("Gcx::allocPage(bin = %d)\n", bin);
2030 for (n = 0; n < npools; n++)
2032 pool = pooltable[n];
2033 pn = pool.allocPages(1);
2040 pool.pagetable[pn] = cast(ubyte)bin;
2042 // Convert page to free list
2043 size_t size = binsize[bin];
2044 List **b = &bucket[bin];
2046 p = pool.baseAddr + pn * PAGESIZE;
2047 ptop = p + PAGESIZE;
2048 for (; p < ptop; p += size)
2050 (cast(List *)p).next = *b;
2058 * Search a range of memory values and mark any pointers into the GC pool.
2060 void mark(void *pbot, void *ptop)
2062 void **p1 = cast(void **)pbot;
2063 void **p2 = cast(void **)ptop;
2067 //printf("marking range: %p -> %p\n", pbot, ptop);
2068 for (; p1 < p2; p1++)
2071 byte *p = cast(byte *)(*p1);
2073 //if (log) debug(PRINTF) printf("\tmark %x\n", p);
2074 if (p >= minAddr && p < maxAddr)
2076 if ((cast(size_t)p & ~(PAGESIZE-1)) == pcache)
2082 size_t offset = cast(size_t)(p - pool.baseAddr);
2084 size_t pn = offset / PAGESIZE;
2085 Bins bin = cast(Bins)pool.pagetable[pn];
2087 //debug(PRINTF) printf("\t\tfound pool %x, base=%x, pn = %d, bin = %d, biti = x%x\n", pool, pool.baseAddr, pn, bin, biti);
2089 // Adjust bit to be at start of allocated memory block
2092 biti = (offset & notbinsize[bin]) >> 4;
2093 //debug(PRINTF) printf("\t\tbiti = x%x\n", biti);
2095 else if (bin == B_PAGEPLUS)
2099 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
2100 biti = pn * (PAGESIZE / 16);
2104 // Don't mark bits in B_FREE or B_UNCOMMITTED pages
2108 if (bin >= B_PAGE) // Cache B_PAGE and B_PAGEPLUS lookups
2109 pcache = cast(size_t)p & ~(PAGESIZE-1);
2111 //debug(PRINTF) printf("\t\tmark(x%x) = %d\n", biti, pool.mark.test(biti));
2112 if (!pool.mark.test(biti))
2114 //if (log) debug(PRINTF) printf("\t\tmarking %x\n", p);
2115 pool.mark.set(biti);
2116 if (!pool.noscan.test(biti))
2118 pool.scan.set(biti);
2121 log_parent(sentinel_add(pool.baseAddr + biti * 16), sentinel_add(pbot));
2126 anychanges |= changes;
2131 * Return number of full pages free'd.
2133 size_t fullcollectshell()
2135 // The purpose of the 'shell' is to ensure all the registers
2136 // get put on the stack so they'll be scanned
2141 __builtin_unwind_init();
2148 uint eax,ecx,edx,ebx,ebp,esi,edi;
2161 else version (X86_64)
2163 ulong rax,rbx,rcx,rdx,rbp,rsi,rdi,rsp,r8,r9,r10,r11,r12,r13,r14,r15;
2166 movq rax[RBP], RAX ;
2167 movq rbx[RBP], RBX ;
2168 movq rcx[RBP], RCX ;
2169 movq rdx[RBP], RDX ;
2170 movq rbp[RBP], RBP ;
2171 movq rsi[RBP], RSI ;
2172 movq rdi[RBP], RDI ;
2173 movq rsp[RBP], RSP ;
2176 movq r10[RBP], R10 ;
2177 movq r11[RBP], R11 ;
2178 movq r12[RBP], R12 ;
2179 movq r13[RBP], R13 ;
2180 movq r14[RBP], R14 ;
2181 movq r15[RBP], R15 ;
2186 static assert( false, "Architecture not supported." );
2197 result = fullcollect(sp);
2220 size_t fullcollect(void *stackTop)
2225 debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n");
2227 thread_suspendAll();
2233 for (n = 0; n < npools; n++)
2235 pool = pooltable[n];
2238 pool.freebits.zero();
2241 // Mark each free entry, so it doesn't get scanned
2242 for (n = 0; n < B_PAGE; n++)
2244 for (List *list = bucket[n]; list; list = list.next)
2246 pool = findPool(list);
2248 pool.freebits.set(cast(size_t)(cast(byte*)list - pool.baseAddr) / 16);
2252 for (n = 0; n < npools; n++)
2254 pool = pooltable[n];
2255 pool.mark.copy(&pool.freebits);
2258 rt_scanStaticData( &mark );
2260 version (MULTI_THREADED)
2264 // Scan stacks and registers for each paused thread
2265 thread_scanAll( &mark, stackTop );
2272 // Scan stack for main thread
2273 debug(PRINTF) printf(" scan stack bot = %x, top = %x\n", stackTop, stackBottom);
2274 version (STACKGROWSDOWN)
2275 mark(stackTop, stackBottom);
2277 mark(stackBottom, stackTop);
2282 debug(COLLECT_PRINTF) printf("scan roots[]\n");
2283 mark(roots, roots + nroots);
2286 debug(COLLECT_PRINTF) printf("scan ranges[]\n");
2288 for (n = 0; n < nranges; n++)
2290 debug(COLLECT_PRINTF) printf("\t%x .. %x\n", ranges[n].pbot, ranges[n].ptop);
2291 mark(ranges[n].pbot, ranges[n].ptop);
2295 debug(COLLECT_PRINTF) printf("\tscan heap\n");
2299 for (n = 0; n < npools; n++)
2305 pool = pooltable[n];
2307 bbase = pool.scan.base();
2308 btop = bbase + pool.scan.nwords;
2309 for (b = bbase; b < btop;)
2323 o = pool.baseAddr + (b - bbase) * 32 * 16;
2324 if (!(bitm & 0xFFFF))
2329 for (; bitm; o += 16, bitm >>= 1)
2334 pn = cast(size_t)(o - pool.baseAddr) / PAGESIZE;
2335 bin = cast(Bins)pool.pagetable[pn];
2338 mark(o, o + binsize[bin]);
2340 else if (bin == B_PAGE || bin == B_PAGEPLUS)
2342 if (bin == B_PAGEPLUS)
2344 while (pool.pagetable[pn - 1] != B_PAGE)
2348 while (pn + u < pool.ncommitted && pool.pagetable[pn + u] == B_PAGEPLUS)
2350 mark(o, o + u * PAGESIZE);
2359 // Free up everything not marked
2360 debug(COLLECT_PRINTF) printf("\tfree'ing\n");
2361 size_t freedpages = 0;
2363 for (n = 0; n < npools; n++)
2368 pool = pooltable[n];
2369 bbase = pool.mark.base();
2370 ncommitted = pool.ncommitted;
2371 for (pn = 0; pn < ncommitted; pn++, bbase += PAGESIZE / (32 * 16))
2373 Bins bin = cast(Bins)pool.pagetable[pn];
2380 auto size = binsize[bin];
2382 p = pool.baseAddr + pn * PAGESIZE;
2383 ptop = p + PAGESIZE;
2384 biti = pn * (PAGESIZE/16);
2385 bitstride = size / 16;
2387 version(none) // BUG: doesn't work because freebits() must also be cleared
2389 // If free'd entire page
2390 if (bbase[0] == 0 && bbase[1] == 0 && bbase[2] == 0 && bbase[3] == 0 &&
2391 bbase[4] == 0 && bbase[5] == 0 && bbase[6] == 0 && bbase[7] == 0)
2393 for (; p < ptop; p += size, biti += bitstride)
2395 if (pool.finals.nbits && pool.finals.testClear(biti))
2396 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2397 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
2399 List *list = cast(List *)p;
2400 //debug(PRINTF) printf("\tcollecting %x\n", list);
2401 log_free(sentinel_add(list));
2403 debug (MEMSTOMP) .memset(p, 0xF3, size);
2405 pool.pagetable[pn] = B_FREE;
2407 //debug(PRINTF) printf("freeing entire page %d\n", pn);
2411 for (; p < ptop; p += size, biti += bitstride)
2413 if (!pool.mark.test(biti))
2415 sentinel_Invariant(sentinel_add(p));
2417 pool.freebits.set(biti);
2418 if (pool.finals.nbits && pool.finals.testClear(biti))
2419 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2420 clrBits(pool, biti, BlkAttr.ALL_BITS);
2422 List *list = cast(List *)p;
2423 debug(PRINTF) printf("\tcollecting %x\n", list);
2424 log_free(sentinel_add(list));
2426 debug (MEMSTOMP) .memset(p, 0xF3, size);
2432 else if (bin == B_PAGE)
2433 { size_t biti = pn * (PAGESIZE / 16);
2435 if (!pool.mark.test(biti))
2436 { byte *p = pool.baseAddr + pn * PAGESIZE;
2438 sentinel_Invariant(sentinel_add(p));
2439 if (pool.finals.nbits && pool.finals.testClear(biti))
2440 rt_finalize(sentinel_add(p), false/*noStack > 0*/);
2441 clrBits(pool, biti, BlkAttr.ALL_BITS);
2443 debug(COLLECT_PRINTF) printf("\tcollecting big %x\n", p);
2444 log_free(sentinel_add(p));
2445 pool.pagetable[pn] = B_FREE;
2447 debug (MEMSTOMP) .memset(p, 0xF3, PAGESIZE);
2448 while (pn + 1 < ncommitted && pool.pagetable[pn + 1] == B_PAGEPLUS)
2451 pool.pagetable[pn] = B_FREE;
2456 .memset(p, 0xF3, PAGESIZE);
2467 // Free complete pages, rebuild free list
2468 debug(COLLECT_PRINTF) printf("\tfree complete pages\n");
2469 size_t recoveredpages = 0;
2470 for (n = 0; n < npools; n++)
2474 pool = pooltable[n];
2475 ncommitted = pool.ncommitted;
2476 for (pn = 0; pn < ncommitted; pn++)
2478 Bins bin = cast(Bins)pool.pagetable[pn];
2484 size_t size = binsize[bin];
2485 size_t bitstride = size / 16;
2486 size_t bitbase = pn * (PAGESIZE / 16);
2487 size_t bittop = bitbase + (PAGESIZE / 16);
2491 for (biti = bitbase; biti < bittop; biti += bitstride)
2492 { if (!pool.freebits.test(biti))
2495 pool.pagetable[pn] = B_FREE;
2500 p = pool.baseAddr + pn * PAGESIZE;
2501 for (u = 0; u < PAGESIZE; u += size)
2502 { biti = bitbase + u / 16;
2503 if (pool.freebits.test(biti))
2506 list = cast(List *)(p + u);
2507 if (list.next != bucket[bin]) // avoid unnecessary writes
2508 list.next = bucket[bin];
2516 debug(COLLECT_PRINTF) printf("recovered pages = %d\n", recoveredpages);
2517 debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedpages, npools);
2519 return freedpages + recoveredpages;
2526 uint getBits(Pool* pool, size_t biti)
2535 if (pool.finals.nbits &&
2536 pool.finals.test(biti))
2537 bits |= BlkAttr.FINALIZE;
2538 if (pool.noscan.test(biti))
2539 bits |= BlkAttr.NO_SCAN;
2540 // if (pool.nomove.nbits &&
2541 // pool.nomove.test(biti))
2542 // bits |= BlkAttr.NO_MOVE;
2550 void setBits(Pool* pool, size_t biti, uint mask)
2557 if (mask & BlkAttr.FINALIZE)
2559 if (!pool.finals.nbits)
2560 pool.finals.alloc(pool.mark.nbits);
2561 pool.finals.set(biti);
2563 if (mask & BlkAttr.NO_SCAN)
2565 pool.noscan.set(biti);
2567 // if (mask & BlkAttr.NO_MOVE)
2569 // if (!pool.nomove.nbits)
2570 // pool.nomove.alloc(pool.mark.nbits);
2571 // pool.nomove.set(biti);
2579 void clrBits(Pool* pool, size_t biti, uint mask)
2586 if (mask & BlkAttr.FINALIZE && pool.finals.nbits)
2587 pool.finals.clear(biti);
2588 if (mask & BlkAttr.NO_SCAN)
2589 pool.noscan.clear(biti);
2590 // if (mask & BlkAttr.NO_MOVE && pool.nomove.nbits)
2591 // pool.nomove.clear(biti);
2595 /***** Leak Detector ******/
2606 //debug(PRINTF) printf("+log_init()\n");
2607 current.reserve(1000);
2609 //debug(PRINTF) printf("-log_init()\n");
2613 void log_malloc(void *p, size_t size)
2615 //debug(PRINTF) printf("+log_malloc(p = %x, size = %d)\n", p, size);
2628 //debug(PRINTF) printf("-log_malloc()\n");
2632 void log_free(void *p)
2634 //debug(PRINTF) printf("+log_free(%x)\n", p);
2637 i = current.find(p);
2640 debug(PRINTF) printf("free'ing unallocated memory %x\n", p);
2644 //debug(PRINTF) printf("-log_free()\n");
2650 //debug(PRINTF) printf("+log_collect()\n");
2651 // Print everything in current that is not in prev
2653 debug(PRINTF) printf("New pointers this cycle: --------------------------------\n");
2655 for (size_t i = 0; i < current.dim; i++)
2659 j = prev.find(current.data[i].p);
2661 current.data[i].print();
2666 debug(PRINTF) printf("All roots this cycle: --------------------------------\n");
2667 for (size_t i = 0; i < current.dim; i++)
2672 p = current.data[i].p;
2673 if (!findPool(current.data[i].parent))
2675 j = prev.find(current.data[i].p);
2677 debug(PRINTF) printf("N");
2679 debug(PRINTF) printf(" ");;
2680 current.data[i].print();
2684 debug(PRINTF) printf("Used = %d-------------------------------------------------\n", used);
2685 prev.copy(¤t);
2687 debug(PRINTF) printf("-log_collect()\n");
2691 void log_parent(void *p, void *parent)
2693 //debug(PRINTF) printf("+log_parent()\n");
2696 i = current.find(p);
2699 debug(PRINTF) printf("parent'ing unallocated memory %x, parent = %x\n", p, parent);
2703 size_t offset = cast(size_t)(p - pool.baseAddr);
2705 size_t pn = offset / PAGESIZE;
2706 Bins bin = cast(Bins)pool.pagetable[pn];
2707 biti = (offset & notbinsize[bin]);
2708 debug(PRINTF) printf("\tbin = %d, offset = x%x, biti = x%x\n", bin, offset, biti);
2712 current.data[i].parent = parent;
2714 //debug(PRINTF) printf("-log_parent()\n");
2721 void log_malloc(void *p, size_t size) { }
2722 void log_free(void *p) { }
2723 void log_collect() { }
2724 void log_parent(void *p, void *parent) { }
2729 /* ============================ Pool =============================== */
2736 GCBits mark; // entries already scanned, or should not be scanned
2737 GCBits scan; // entries that need to be scanned
2738 GCBits freebits; // entries that are on the free list
2739 GCBits finals; // entries that need finalizer run on them
2740 GCBits noscan; // entries that should not be scanned
2743 size_t ncommitted; // ncommitted <= npages
2747 void initialize(size_t npages)
2751 //debug(PRINTF) printf("Pool::Pool(%u)\n", npages);
2752 poolsize = npages * PAGESIZE;
2753 assert(poolsize >= POOLSIZE);
2754 baseAddr = cast(byte *)os_mem_map(poolsize);
2756 // Some of the code depends on page alignment of memory pools
2757 assert((cast(size_t)baseAddr & (PAGESIZE - 1)) == 0);
2761 //debug(PRINTF) printf("GC fail: poolsize = x%x, errno = %d\n", poolsize, errno);
2762 //debug(PRINTF) printf("message = '%s'\n", sys_errlist[errno]);
2768 topAddr = baseAddr + poolsize;
2770 mark.alloc(cast(size_t)poolsize / 16);
2771 scan.alloc(cast(size_t)poolsize / 16);
2772 freebits.alloc(cast(size_t)poolsize / 16);
2773 noscan.alloc(cast(size_t)poolsize / 16);
2775 pagetable = cast(ubyte*) .malloc(npages);
2777 onOutOfMemoryError();
2778 .memset(pagetable, B_UNCOMMITTED, npages);
2780 this.npages = npages;
2793 result = os_mem_decommit(baseAddr, 0, ncommitted * PAGESIZE);
2794 assert(result == 0);
2800 result = os_mem_unmap(baseAddr, npages * PAGESIZE);
2801 assert(result == 0);
2819 void Invariant() { }
2826 //freebits.Invariant();
2827 //finals.Invariant();
2828 //noscan.Invariant();
2832 //if (baseAddr + npages * PAGESIZE != topAddr)
2833 //printf("baseAddr = %p, npages = %d, topAddr = %p\n", baseAddr, npages, topAddr);
2834 assert(baseAddr + npages * PAGESIZE == topAddr);
2835 assert(ncommitted <= npages);
2838 for (size_t i = 0; i < npages; i++)
2839 { Bins bin = cast(Bins)pagetable[i];
2841 assert(bin < B_MAX);
2847 * Allocate n pages from Pool.
2848 * Returns OPFAIL on failure.
2850 size_t allocPages(size_t n)
2855 //debug(PRINTF) printf("Pool::allocPages(n = %d)\n", n);
2857 for (i = 0; i < ncommitted; i++)
2859 if (pagetable[i] == B_FREE)
2862 { //debug(PRINTF) printf("\texisting pn = %d\n", i - n + 1);
2869 return extendPages(n);
2873 * Extend Pool by n pages.
2874 * Returns OPFAIL on failure.
2876 size_t extendPages(size_t n)
2878 //debug(PRINTF) printf("Pool::extendPages(n = %d)\n", n);
2879 if (ncommitted + n <= npages)
2883 tocommit = (n + (COMMITSIZE/PAGESIZE) - 1) & ~(COMMITSIZE/PAGESIZE - 1);
2884 if (ncommitted + tocommit > npages)
2885 tocommit = npages - ncommitted;
2886 //debug(PRINTF) printf("\tlooking to commit %d more pages\n", tocommit);
2888 if (os_mem_commit(baseAddr, ncommitted * PAGESIZE, tocommit * PAGESIZE) == 0)
2890 .memset(pagetable + ncommitted, B_FREE, tocommit);
2891 auto i = ncommitted;
2892 ncommitted += tocommit;
2894 while (i && pagetable[i - 1] == B_FREE)
2899 //debug(PRINTF) printf("\tfailed to commit %d pages\n", tocommit);
2907 * Free npages pages starting with pagenum.
2909 void freePages(size_t pagenum, size_t npages)
2911 .memset(&pagetable[pagenum], B_FREE, npages);
2916 * Used for sorting pooltable[]
2920 if (baseAddr < p2.baseAddr)
2923 return cast(int)(baseAddr > p2.baseAddr);
2928 /* ============================ SENTINEL =============================== */
2933 const size_t SENTINEL_PRE = cast(size_t) 0xF4F4F4F4F4F4F4F4UL; // 32 or 64 bits
2934 const ubyte SENTINEL_POST = 0xF5; // 8 bits
2935 const uint SENTINEL_EXTRA = 2 * size_t.sizeof + 1;
2938 size_t* sentinel_size(void *p) { return &(cast(size_t *)p)[-2]; }
2939 size_t* sentinel_pre(void *p) { return &(cast(size_t *)p)[-1]; }
2940 ubyte* sentinel_post(void *p) { return &(cast(ubyte *)p)[*sentinel_size(p)]; }
2943 void sentinel_init(void *p, size_t size)
2945 *sentinel_size(p) = size;
2946 *sentinel_pre(p) = SENTINEL_PRE;
2947 *sentinel_post(p) = SENTINEL_POST;
2951 void sentinel_Invariant(void *p)
2953 assert(*sentinel_pre(p) == SENTINEL_PRE);
2954 assert(*sentinel_post(p) == SENTINEL_POST);
2958 void *sentinel_add(void *p)
2960 return p + 2 * size_t.sizeof;
2964 void *sentinel_sub(void *p)
2966 return p - 2 * size_t.sizeof;
2971 const uint SENTINEL_EXTRA = 0;
2974 void sentinel_init(void *p, size_t size)
2979 void sentinel_Invariant(void *p)
2984 void *sentinel_add(void *p)
2990 void *sentinel_sub(void *p)