2 * This module contains the garbage collector implementation.
4 * Copyright: Copyright (C) 2001-2007 Digital Mars, www.digitalmars.com.
7 * This software is provided 'as-is', without any express or implied
8 * warranty. In no event will the authors be held liable for any damages
9 * arising from the use of this software.
11 * Permission is granted to anyone to use this software for any purpose,
12 * including commercial applications, and to alter it and redistribute it
13 * freely, in both source and binary form, subject to the following
16 * o The origin of this software must not be misrepresented; you must not
17 * claim that you wrote the original software. If you use this software
18 * in a product, an acknowledgment in the product documentation would be
19 * appreciated but is not required.
20 * o Altered source versions must be plainly marked as such, and must not
21 * be misrepresented as being the original software.
22 * o This notice may not be removed or altered from any source
24 * Authors: Walter Bright, David Friedman, Sean Kelly
27 // D Programming Language Garbage Collector implementation
29 /************** Debugging ***************************/
31 //debug = PRINTF; // turn on printf's
32 //debug = COLLECT_PRINTF; // turn on printf's
33 //debug = THREADINVARIANT; // check thread integrity
34 //debug = LOGGING; // log allocations / frees
35 //debug = MEMSTOMP; // stomp on memory
36 //debug = SENTINEL; // add underrun/overrrun protection
37 //debug = PTRCHECK; // more pointer checking
38 //debug = PTRCHECK2; // thorough but slow pointer checking
40 /*************** Configuration *********************/
42 version = STACKGROWSDOWN; // growing the stack means subtracting from the stack pointer
43 // (use for Intel X86 CPUs)
44 // else growing the stack means adding to the stack pointer
45 version = MULTI_THREADED; // produce multithreaded version
47 /***************************************************/
49 private import gcbits;
50 private import gcstats;
51 private import gcalloc;
53 private import cstdlib = stdc.stdlib : calloc, free, malloc, realloc;
54 private import stdc.string;
56 debug private import stdc.stdio;
60 // BUG: The following import will likely not work, since the gcc
61 // subdirectory is elsewhere. Instead, perhaps the functions
62 // could be declared directly or some other resolution could
64 private import gcc.builtins; // for __builtin_unwind_init
72 FINALIZE = 0b0000_0001,
73 NO_SCAN = 0b0000_0010,
74 NO_MOVE = 0b0000_0100,
75 ALL_BITS = 0b1111_1111
85 extern (C) void* rt_stackBottom();
86 extern (C) void* rt_stackTop();
88 extern (C) void rt_finalize( void* p, bool det = true );
90 alias void delegate( void*, void* ) scanFn;
92 extern (C) void rt_scanStaticData( scanFn scan );
94 version (MULTI_THREADED)
96 extern (C) bool thread_needLock();
97 extern (C) void thread_suspendAll();
98 extern (C) void thread_resumeAll();
100 extern (C) void thread_scanAll( scanFn fn, void* curStackTop = null );
103 extern (C) void onOutOfMemoryError();
107 OPFAIL = ~cast(size_t)0
115 /* ======================= Leak Detector =========================== */
130 printf(" p = %x, size = %d, parent = %x ", p, size, parent);
133 printf("%s(%u)", file, line);
153 void reserve(size_t nentries)
155 assert(dim <= allocdim);
156 if (allocdim - dim < nentries)
158 allocdim = (dim + nentries) * 2;
159 assert(dim + nentries <= allocdim);
162 data = cast(Log*)cstdlib.malloc(allocdim * Log.sizeof);
163 if (!data && allocdim)
164 onOutOfMemoryError();
169 newdata = cast(Log*)cstdlib.malloc(allocdim * Log.sizeof);
170 if (!newdata && allocdim)
171 onOutOfMemoryError();
172 memcpy(newdata, data, dim * Log.sizeof);
186 void remove(size_t i)
188 memmove(data + i, data + i + 1, (dim - i) * Log.sizeof);
195 for (size_t i = 0; i < dim; i++)
200 return OPFAIL; // not found
204 void copy(LogArray *from)
206 reserve(from.dim - dim);
207 assert(from.dim <= allocdim);
208 memcpy(data, from.data, from.dim * Log.sizeof);
215 /* ============================ GC =============================== */
218 class GCLock { } // just a dummy so we can get a global lock
221 const uint GCVERSION = 1; // increment every time we change interface
226 // For passing to debug code
230 uint gcversion = GCVERSION;
232 Gcx *gcx; // implementation
233 static ClassInfo gcLock; // global lock
238 gcLock = GCLock.classinfo;
239 gcx = cast(Gcx*)cstdlib.calloc(1, Gcx.sizeof);
241 onOutOfMemoryError();
243 setStackBottom(rt_stackBottom());
251 //debug(PRINTF) printf("Thread %x ", pthread_self());
252 //debug(PRINTF) printf("GC.Dtor()\n");
268 gcx.thread_Invariant();
278 if (!thread_needLock())
280 assert(gcx.disabled > 0);
283 else synchronized (gcLock)
285 assert(gcx.disabled > 0);
296 if (!thread_needLock())
300 else synchronized (gcLock)
310 uint getAttr(void* p)
319 Pool* pool = gcx.findPool(p);
324 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
326 oldb = gcx.getBits(pool, biti);
331 if (!thread_needLock())
335 else synchronized (gcLock)
345 uint setAttr(void* p, uint mask)
354 Pool* pool = gcx.findPool(p);
359 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
361 oldb = gcx.getBits(pool, biti);
362 gcx.setBits(pool, biti, mask);
367 if (!thread_needLock())
371 else synchronized (gcLock)
381 uint clrAttr(void* p, uint mask)
390 Pool* pool = gcx.findPool(p);
395 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
397 oldb = gcx.getBits(pool, biti);
398 gcx.clrBits(pool, biti, mask);
403 if (!thread_needLock())
407 else synchronized (gcLock)
417 void *malloc(size_t size, uint bits = 0)
424 if (!thread_needLock())
426 return mallocNoSync(size, bits);
428 else synchronized (gcLock)
430 return mallocNoSync(size, bits);
438 private void *mallocNoSync(size_t size, uint bits = 0)
445 //debug(PRINTF) printf("GC::malloc(size = %d, gcx = %p)\n", size, gcx);
447 //debug(PRINTF) printf("gcx.self = %x, pthread_self() = %x\n", gcx.self, pthread_self());
449 size += SENTINEL_EXTRA;
452 // Cache previous binsize lookup - Dave Fladebo.
453 static size_t lastsize = -1;
455 if (size == lastsize)
459 bin = gcx.findBin(size);
469 if (!gcx.allocPage(bin) && !gcx.disabled) // try to find a new page
471 if (!thread_needLock())
473 /* Then we haven't locked it yet. Be sure
474 * and lock for a collection, since a finalizer
475 * may start a new thread.
477 synchronized (gcLock)
479 gcx.fullcollectshell();
482 else if (!gcx.fullcollectshell()) // collect to find a new page
487 if (!gcx.bucket[bin] && !gcx.allocPage(bin))
490 gcx.newPool(1); // allocate new pool to find a new page
491 result = gcx.allocPage(bin);
493 onOutOfMemoryError();
498 // Return next item from free list
499 gcx.bucket[bin] = (cast(List*)p).next;
500 if( !(bits & BlkAttr.NO_SCAN) )
501 memset(p + size, 0, binsize[bin] - size);
502 //debug(PRINTF) printf("\tmalloc => %x\n", p);
503 debug (MEMSTOMP) memset(p, 0xF0, size);
507 p = gcx.bigAlloc(size);
509 onOutOfMemoryError();
511 size -= SENTINEL_EXTRA;
513 sentinel_init(p, size);
514 gcx.log_malloc(p, size);
518 Pool *pool = gcx.findPool(p);
521 gcx.setBits(pool, cast(size_t)(p - pool.baseAddr) / 16, bits);
530 void *calloc(size_t size, uint bits = 0)
537 if (!thread_needLock())
539 return callocNoSync(size, bits);
541 else synchronized (gcLock)
543 return callocNoSync(size, bits);
551 private void *callocNoSync(size_t size, uint bits = 0)
555 //debug(PRINTF) printf("calloc: %x len %d\n", p, len);
556 void *p = mallocNoSync(size, bits);
565 void *realloc(void *p, size_t size, uint bits = 0)
567 if (!thread_needLock())
569 return reallocNoSync(p, size, bits);
571 else synchronized (gcLock)
573 return reallocNoSync(p, size, bits);
581 private void *reallocNoSync(void *p, size_t size, uint bits = 0)
591 p = mallocNoSync(size, bits);
597 //debug(PRINTF) printf("GC::realloc(p = %x, size = %u)\n", p, size);
600 sentinel_Invariant(p);
601 psize = *sentinel_size(p);
606 Pool *pool = gcx.findPool(p);
610 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
614 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
615 gcx.setBits(pool, biti, bits);
619 bits = gcx.getBits(pool, biti);
623 p2 = mallocNoSync(size, bits);
626 //debug(PRINTF) printf("\tcopying %d bytes\n",size);
633 psize = gcx.findSize(p); // find allocated size
634 if (psize >= PAGESIZE && size >= PAGESIZE)
636 auto psz = psize / PAGESIZE;
637 auto newsz = (size + PAGESIZE - 1) / PAGESIZE;
641 auto pool = gcx.findPool(p);
642 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
646 synchronized (gcLock)
648 debug (MEMSTOMP) memset(p + size, 0xF2, psize - size);
649 pool.freePages(pagenum + newsz, psz - newsz);
653 else if (pagenum + newsz <= pool.npages)
655 // Attempt to expand in place
656 synchronized (gcLock)
658 for (size_t i = pagenum + psz; 1;)
660 if (i == pagenum + newsz)
662 debug (MEMSTOMP) memset(p + psize, 0xF0, size - psize);
663 memset(&pool.pagetable[pagenum + psz], B_PAGEPLUS, newsz - psz);
666 if (i == pool.ncommitted)
668 auto u = pool.extendPages(pagenum + newsz - pool.ncommitted);
674 if (pool.pagetable[i] != B_FREE)
681 if (psize < size || // if new size is bigger
682 psize > size * 2) // or less than half
686 Pool *pool = gcx.findPool(p);
690 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
694 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
695 gcx.setBits(pool, biti, bits);
699 bits = gcx.getBits(pool, biti);
703 p2 = mallocNoSync(size, bits);
706 //debug(PRINTF) printf("\tcopying %d bytes\n",size);
717 * Attempt to in-place enlarge the memory block pointed to by p by at least
718 * minbytes beyond its current capacity, up to a maximum of maxsize. This
719 * does not attempt to move the memory block (like realloc() does).
722 * 0 if could not extend p,
723 * total size of entire memory block if successful.
725 size_t extend(void* p, size_t minsize, size_t maxsize)
727 if (!thread_needLock())
729 return extendNoSync(p, minsize, maxsize);
731 else synchronized (gcLock)
733 return extendNoSync(p, minsize, maxsize);
741 private size_t extendNoSync(void* p, size_t minsize, size_t maxsize)
744 assert( minsize <= maxsize );
748 //debug(PRINTF) printf("GC::extend(p = %x, minsize = %u, maxsize = %u)\n", p, minsize, maxsize);
753 auto psize = gcx.findSize(p); // find allocated size
754 if (psize < PAGESIZE)
755 return 0; // cannot extend buckets
757 auto psz = psize / PAGESIZE;
758 auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE;
759 auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE;
761 auto pool = gcx.findPool(p);
762 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
765 for (sz = 0; sz < maxsz; sz++)
767 auto i = pagenum + psz + sz;
768 if (i == pool.ncommitted)
770 if (pool.pagetable[i] != B_FREE)
779 else if (pagenum + psz + sz == pool.ncommitted)
781 auto u = pool.extendPages(minsz - sz);
788 debug (MEMSTOMP) memset(p + psize, 0xF0, (psz + sz) * PAGESIZE - psize);
789 memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
792 return (psz + sz) * PAGESIZE;
799 size_t reserve(size_t size)
806 if (!thread_needLock())
808 return reserveNoSync(size);
810 else synchronized (gcLock)
812 return reserveNoSync(size);
820 private size_t reserveNoSync(size_t size)
825 return gcx.reserve(size);
839 if (!thread_needLock())
841 return freeNoSync(p);
843 else synchronized (gcLock)
845 return freeNoSync(p);
853 private void freeNoSync(void *p)
862 // Find which page it is in
863 pool = gcx.findPool(p);
864 if (!pool) // if not one of ours
866 sentinel_Invariant(p);
868 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
869 biti = cast(size_t)(p - pool.baseAddr) / 16;
870 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
872 bin = cast(Bins)pool.pagetable[pagenum];
873 if (bin == B_PAGE) // if large alloc
880 while (++n < pool.ncommitted && pool.pagetable[n] == B_PAGEPLUS)
882 debug (MEMSTOMP) memset(p, 0xF2, npages * PAGESIZE);
883 pool.freePages(pagenum, npages);
886 { // Add to free list
887 List *list = cast(List*)p;
889 debug (MEMSTOMP) memset(p, 0xF2, binsize[bin]);
891 list.next = gcx.bucket[bin];
892 gcx.bucket[bin] = list;
894 gcx.log_free(sentinel_add(p));
899 * Determine the base address of the block containing p. If p is not a gc
900 * allocated pointer, return null.
902 void* addrOf(void *p)
909 if (!thread_needLock())
911 return addrOfNoSync(p);
913 else synchronized (gcLock)
915 return addrOfNoSync(p);
923 void* addrOfNoSync(void *p)
930 return gcx.findBase(p);
935 * Determine the allocated size of pointer p. If p is an interior pointer
936 * or not a gc allocated pointer, return 0.
938 size_t sizeOf(void *p)
945 if (!thread_needLock())
947 return sizeOfNoSync(p);
949 else synchronized (gcLock)
951 return sizeOfNoSync(p);
959 private size_t sizeOfNoSync(void *p)
966 size_t size = gcx.findSize(p);
968 // Check for interior pointer
970 // 1) size is a power of 2 for less than PAGESIZE values
971 // 2) base of memory pool is aligned on PAGESIZE boundary
972 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
974 return size ? size - SENTINEL_EXTRA : 0;
978 if (p == gcx.p_cache)
979 return gcx.size_cache;
981 size_t size = gcx.findSize(p);
983 // Check for interior pointer
985 // 1) size is a power of 2 for less than PAGESIZE values
986 // 2) base of memory pool is aligned on PAGESIZE boundary
987 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
992 gcx.size_cache = size;
1001 * Determine the base address of the block containing p. If p is not a gc
1002 * allocated pointer, return null.
1004 BlkInfo query(void *p)
1012 if (!thread_needLock())
1014 return queryNoSync(p);
1016 else synchronized (gcLock)
1018 return queryNoSync(p);
1026 BlkInfo queryNoSync(void *p)
1030 return gcx.getInfo(p);
1035 * Verify that pointer p:
1036 * 1) belongs to this memory pool
1037 * 2) points to the start of an allocated piece of memory
1038 * 3) is not on a free list
1047 if (!thread_needLock())
1051 else synchronized (gcLock)
1061 private void checkNoSync(void *p)
1065 sentinel_Invariant(p);
1073 p = sentinel_sub(p);
1074 pool = gcx.findPool(p);
1076 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1077 bin = cast(Bins)pool.pagetable[pagenum];
1078 assert(bin <= B_PAGE);
1079 size = binsize[bin];
1080 assert((cast(size_t)p & (size - 1)) == 0);
1086 // Check that p is not on a free list
1089 for (list = gcx.bucket[bin]; list; list = list.next)
1091 assert(cast(void*)list != p);
1102 private void setStackBottom(void *p)
1104 version (STACKGROWSDOWN)
1106 //p = (void *)((uint *)p + 4);
1107 if (p > gcx.stackBottom)
1109 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1110 gcx.stackBottom = p;
1115 //p = (void *)((uint *)p - 4);
1116 if (p < gcx.stackBottom)
1118 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1119 gcx.stackBottom = cast(char*)p;
1126 * add p to list of roots
1128 void addRoot(void *p)
1135 if (!thread_needLock())
1139 else synchronized (gcLock)
1147 * remove p from list of roots
1149 void removeRoot(void *p)
1156 if (!thread_needLock())
1160 else synchronized (gcLock)
1168 * add range to scan for roots
1170 void addRange(void *p, size_t sz)
1177 //debug(PRINTF) printf("+GC.addRange(pbot = x%x, ptop = x%x)\n", pbot, ptop);
1178 if (!thread_needLock())
1180 gcx.addRange(p, p + sz);
1182 else synchronized (gcLock)
1184 gcx.addRange(p, p + sz);
1186 //debug(PRINTF) printf("-GC.addRange()\n");
1193 void removeRange(void *p)
1200 if (!thread_needLock())
1204 else synchronized (gcLock)
1212 * do full garbage collection
1216 debug(PRINTF) printf("GC.fullCollect()\n");
1218 if (!thread_needLock())
1220 gcx.fullcollectshell();
1222 else synchronized (gcLock)
1224 gcx.fullcollectshell();
1232 debug(PRINTF) printf("poolsize = %x, usedsize = %x, freelistsize = %x\n",
1233 stats.poolsize, stats.usedsize, stats.freelistsize);
1241 * do full garbage collection ignoring roots
1243 void fullCollectNoStack()
1245 if (!thread_needLock())
1248 gcx.fullcollectshell();
1251 else synchronized (gcLock)
1254 gcx.fullcollectshell();
1261 * minimize free space usage
1265 if (!thread_needLock())
1269 else synchronized (gcLock)
1277 * Retrieve statistics about garbage collection.
1278 * Useful for debugging and tuning.
1280 void getStats(out GCStats stats)
1282 if (!thread_needLock())
1284 getStatsNoSync(stats);
1286 else synchronized (gcLock)
1288 getStatsNoSync(stats);
1296 private void getStatsNoSync(out GCStats stats)
1305 //debug(PRINTF) printf("getStats()\n");
1306 memset(&stats, 0, GCStats.sizeof);
1308 for (n = 0; n < gcx.npools; n++)
1309 { Pool *pool = gcx.pooltable[n];
1311 psize += pool.ncommitted * PAGESIZE;
1312 for (size_t j = 0; j < pool.ncommitted; j++)
1314 Bins bin = cast(Bins)pool.pagetable[j];
1317 else if (bin == B_PAGE)
1319 else if (bin < B_PAGE)
1324 for (n = 0; n < B_PAGE; n++)
1326 //debug(PRINTF) printf("bin %d\n", n);
1327 for (List *list = gcx.bucket[n]; list; list = list.next)
1329 //debug(PRINTF) printf("\tlist %x\n", list);
1330 flsize += binsize[n];
1334 usize = bsize - flsize;
1336 stats.poolsize = psize;
1337 stats.usedsize = bsize - flsize;
1338 stats.freelistsize = flsize;
1343 /* ============================ Gcx =============================== */
1347 COMMITSIZE = (4096*16),
1348 POOLSIZE = (4096*256),
1362 B_PAGE, // start of large alloc
1363 B_PAGEPLUS, // continuation of large alloc
1364 B_FREE, // free page
1365 B_UNCOMMITTED, // memory not committed for this page
1386 const uint binsize[B_MAX] = [ 16,32,64,128,256,512,1024,2048,4096 ];
1387 const uint notbinsize[B_MAX] = [ ~(16u-1),~(32u-1),~(64u-1),~(128u-1),~(256u-1),
1388 ~(512u-1),~(1024u-1),~(2048u-1),~(4096u-1) ];
1390 /* ============================ Gcx =============================== */
1395 debug (THREADINVARIANT)
1398 void thread_Invariant()
1400 if (self != pthread_self())
1401 printf("thread_Invariant(): gcx = %x, self = %x, pthread_self() = %x\n", this, self, pthread_self());
1402 assert(self == pthread_self());
1407 void thread_Invariant() { }
1421 uint noStack; // !=0 means don't scan stack
1422 uint log; // turn on logging
1426 int disabled; // turn off collections if >0
1428 byte *minAddr; // min(baseAddr)
1429 byte *maxAddr; // max(topAddr)
1434 List *bucket[B_MAX]; // free list for each size
1440 (cast(byte*)this)[0 .. Gcx.sizeof] = 0;
1441 stackBottom = cast(char*)&dummy;
1443 debug (THREADINVARIANT)
1444 self = pthread_self();
1445 //printf("gcx = %p, self = %x\n", this, self);
1454 for (size_t i = 0; i < npools; i++)
1455 { Pool *pool = pooltable[i];
1461 cstdlib.free(pooltable);
1464 cstdlib.free(roots);
1467 cstdlib.free(ranges);
1471 void Invariant() { }
1478 //printf("Gcx.invariant(): this = %p\n", this);
1481 // Assure we're called on the right thread
1482 debug (THREADINVARIANT) assert(self == pthread_self());
1484 for (i = 0; i < npools; i++)
1485 { Pool *pool = pooltable[i];
1490 assert(minAddr == pool.baseAddr);
1494 assert(pool.opCmp(pooltable[i + 1]) < 0);
1496 else if (i + 1 == npools)
1498 assert(maxAddr == pool.topAddr);
1504 assert(rootdim != 0);
1505 assert(nroots <= rootdim);
1510 assert(rangedim != 0);
1511 assert(nranges <= rangedim);
1513 for (i = 0; i < nranges; i++)
1515 assert(ranges[i].pbot);
1516 assert(ranges[i].ptop);
1517 assert(ranges[i].pbot <= ranges[i].ptop);
1521 for (i = 0; i < B_PAGE; i++)
1523 for (List *list = bucket[i]; list; list = list.next)
1534 void addRoot(void *p)
1536 if (nroots == rootdim)
1538 size_t newdim = rootdim * 2 + 16;
1541 newroots = cast(void**)cstdlib.malloc(newdim * newroots[0].sizeof);
1543 onOutOfMemoryError();
1545 { memcpy(newroots, roots, nroots * newroots[0].sizeof);
1546 cstdlib.free(roots);
1559 void removeRoot(void *p)
1561 for (size_t i = nroots; i--;)
1566 memmove(roots + i, roots + i + 1, (nroots - i) * roots[0].sizeof);
1577 void addRange(void *pbot, void *ptop)
1579 debug(PRINTF) printf("Thread %x ", pthread_self());
1580 debug(PRINTF) printf("%x.Gcx::addRange(%x, %x), nranges = %d\n", this, pbot, ptop, nranges);
1581 if (nranges == rangedim)
1583 size_t newdim = rangedim * 2 + 16;
1586 newranges = cast(Range*)cstdlib.malloc(newdim * newranges[0].sizeof);
1588 onOutOfMemoryError();
1590 { memcpy(newranges, ranges, nranges * newranges[0].sizeof);
1591 cstdlib.free(ranges);
1596 ranges[nranges].pbot = pbot;
1597 ranges[nranges].ptop = ptop;
1605 void removeRange(void *pbot)
1607 debug(PRINTF) printf("Thread %x ", pthread_self());
1608 debug(PRINTF) printf("%x.Gcx.removeRange(%x), nranges = %d\n", this, pbot, nranges);
1609 for (size_t i = nranges; i--;)
1611 if (ranges[i].pbot == pbot)
1614 memmove(ranges + i, ranges + i + 1, (nranges - i) * ranges[0].sizeof);
1618 debug(PRINTF) printf("Wrong thread\n");
1620 // This is a fatal error, but ignore it.
1621 // The problem is that we can get a Close() call on a thread
1622 // other than the one the range was allocated on.
1628 * Find Pool that pointer is in.
1629 * Return null if not in a Pool.
1630 * Assume pooltable[] is sorted.
1632 Pool *findPool(void *p)
1634 if (p >= minAddr && p < maxAddr)
1638 return pooltable[0];
1641 for (size_t i = 0; i < npools; i++)
1644 pool = pooltable[i];
1645 if (p < pool.topAddr)
1646 { if (pool.baseAddr <= p)
1657 * Find base address of block containing pointer p.
1658 * Returns null if not a gc'd pointer
1660 void* findBase(void *p)
1667 size_t offset = cast(size_t)(p - pool.baseAddr);
1668 size_t pn = offset / PAGESIZE;
1669 Bins bin = cast(Bins)pool.pagetable[pn];
1671 // Adjust bit to be at start of allocated memory block
1674 return pool.baseAddr + (offset & notbinsize[bin]);
1676 else if (bin == B_PAGEPLUS)
1679 { --pn, offset -= PAGESIZE;
1680 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1682 return pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1686 // we are in a B_FREE or B_UNCOMMITTED page
1695 * Find size of pointer p.
1696 * Returns 0 if not a gc'd pointer
1698 size_t findSize(void *p)
1709 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1710 bin = cast(Bins)pool.pagetable[pagenum];
1711 size = binsize[bin];
1713 { size_t npages = pool.ncommitted;
1717 pt = &pool.pagetable[0];
1718 for (i = pagenum + 1; i < npages; i++)
1720 if (pt[i] != B_PAGEPLUS)
1723 size = (i - pagenum) * PAGESIZE;
1733 BlkInfo getInfo(void* p)
1741 size_t offset = cast(size_t)(p - pool.baseAddr);
1742 size_t pn = offset / PAGESIZE;
1743 Bins bin = cast(Bins)pool.pagetable[pn];
1745 ////////////////////////////////////////////////////////////////////
1747 ////////////////////////////////////////////////////////////////////
1751 info.base = pool.baseAddr + (offset & notbinsize[bin]);
1753 else if (bin == B_PAGEPLUS)
1756 { --pn, offset -= PAGESIZE;
1757 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1759 info.base = pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1761 // fix bin for use by size calc below
1762 bin = cast(Bins)pool.pagetable[pn];
1765 ////////////////////////////////////////////////////////////////////
1767 ////////////////////////////////////////////////////////////////////
1769 info.size = binsize[bin];
1771 { size_t npages = pool.ncommitted;
1775 pt = &pool.pagetable[0];
1776 for (i = pn + 1; i < npages; i++)
1778 if (pt[i] != B_PAGEPLUS)
1781 info.size = (i - pn) * PAGESIZE;
1784 ////////////////////////////////////////////////////////////////////
1786 ////////////////////////////////////////////////////////////////////
1788 info.attr = getBits(pool, cast(size_t)(offset / 16));
1795 * Compute bin for size.
1797 static Bins findBin(size_t size)
1806 else if (size <= 32)
1841 * Allocate a new pool of at least size bytes.
1842 * Sort it into pooltable[].
1843 * Mark all memory in the pool as B_FREE.
1844 * Return the actual number of bytes reserved or 0 on error.
1846 size_t reserve(size_t size)
1848 size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
1849 Pool* pool = newPool(npages);
1851 if (!pool || pool.extendPages(npages) == OPFAIL)
1853 return pool.ncommitted * PAGESIZE;
1858 * Minimizes physical memory usage by returning free pools to the OS.
1867 for (n = 0; n < npools; n++)
1869 pool = pooltable[n];
1870 ncommitted = pool.ncommitted;
1871 for (pn = 0; pn < ncommitted; pn++)
1873 if (cast(Bins)pool.pagetable[pn] != B_FREE)
1876 if (pn < ncommitted)
1883 memmove(pooltable + n,
1885 (--npools - n) * (Pool*).sizeof);
1886 minAddr = pooltable[0].baseAddr;
1887 maxAddr = pooltable[npools - 1].topAddr;
1893 * Allocate a chunk of memory that is larger than a page.
1894 * Return null if out of memory.
1896 void *bigAlloc(size_t size)
1906 npages = (size + PAGESIZE - 1) / PAGESIZE;
1910 // This code could use some refinement when repeatedly
1911 // allocating very large arrays.
1913 for (n = 0; n < npools; n++)
1915 pool = pooltable[n];
1916 pn = pool.allocPages(npages);
1930 freedpages = fullcollectshell();
1931 if (freedpages >= npools * ((POOLSIZE / PAGESIZE) / 4))
1935 // Release empty pools to prevent bloat
1937 // Allocate new pool
1938 pool = newPool(npages);
1943 pn = pool.allocPages(npages);
1944 assert(pn != OPFAIL);
1947 // Release empty pools to prevent bloat
1949 // Allocate new pool
1950 pool = newPool(npages);
1953 pn = pool.allocPages(npages);
1954 assert(pn != OPFAIL);
1964 pool.pagetable[pn] = B_PAGE;
1966 memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
1967 p = pool.baseAddr + pn * PAGESIZE;
1968 memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
1969 debug (MEMSTOMP) memset(p, 0xF1, size);
1970 //debug(PRINTF) printf("\tp = %x\n", p);
1974 return null; // let mallocNoSync handle the error
1979 * Allocate a new pool with at least npages in it.
1980 * Sort it into pooltable[].
1981 * Return null if failed.
1983 Pool *newPool(size_t npages)
1986 Pool** newpooltable;
1990 //debug(PRINTF) printf("************Gcx::newPool(npages = %d)****************\n", npages);
1992 // Round up to COMMITSIZE pages
1993 npages = (npages + (COMMITSIZE/PAGESIZE) - 1) & ~(COMMITSIZE/PAGESIZE - 1);
1995 // Minimum of POOLSIZE
1996 if (npages < POOLSIZE/PAGESIZE)
1997 npages = POOLSIZE/PAGESIZE;
1998 else if (npages > POOLSIZE/PAGESIZE)
1999 { // Give us 150% of requested size, so there's room to extend
2000 auto n = npages + (npages >> 1);
2001 if (n < size_t.max/PAGESIZE)
2005 // Allocate successively larger pools up to 8 megs
2011 n = 8; // cap pool size at 8 megs
2012 n *= (POOLSIZE / PAGESIZE);
2017 pool = cast(Pool *)cstdlib.calloc(1, Pool.sizeof);
2020 pool.initialize(npages);
2024 newnpools = npools + 1;
2025 newpooltable = cast(Pool **)cstdlib.realloc(pooltable, newnpools * (Pool *).sizeof);
2029 // Sort pool into newpooltable[]
2030 for (i = 0; i < npools; i++)
2032 if (pool.opCmp(newpooltable[i]) < 0)
2035 memmove(newpooltable + i + 1, newpooltable + i, (npools - i) * (Pool *).sizeof);
2036 newpooltable[i] = pool;
2038 pooltable = newpooltable;
2041 minAddr = pooltable[0].baseAddr;
2042 maxAddr = pooltable[npools - 1].topAddr;
2054 * Allocate a page of bin's.
2058 int allocPage(Bins bin)
2066 //debug(PRINTF) printf("Gcx::allocPage(bin = %d)\n", bin);
2067 for (n = 0; n < npools; n++)
2069 pool = pooltable[n];
2070 pn = pool.allocPages(1);
2077 pool.pagetable[pn] = cast(ubyte)bin;
2079 // Convert page to free list
2080 size_t size = binsize[bin];
2081 List **b = &bucket[bin];
2083 p = pool.baseAddr + pn * PAGESIZE;
2084 ptop = p + PAGESIZE;
2085 for (; p < ptop; p += size)
2087 (cast(List *)p).next = *b;
2095 * Search a range of memory values and mark any pointers into the GC pool.
2097 void mark(void *pbot, void *ptop)
2099 void **p1 = cast(void **)pbot;
2100 void **p2 = cast(void **)ptop;
2104 //printf("marking range: %p -> %p\n", pbot, ptop);
2105 for (; p1 < p2; p1++)
2108 byte *p = cast(byte *)(*p1);
2110 //if (log) debug(PRINTF) printf("\tmark %x\n", p);
2111 if (p >= minAddr && p < maxAddr)
2113 if ((cast(size_t)p & ~(PAGESIZE-1)) == pcache)
2119 size_t offset = cast(size_t)(p - pool.baseAddr);
2121 size_t pn = offset / PAGESIZE;
2122 Bins bin = cast(Bins)pool.pagetable[pn];
2124 //debug(PRINTF) printf("\t\tfound pool %x, base=%x, pn = %d, bin = %d, biti = x%x\n", pool, pool.baseAddr, pn, bin, biti);
2126 // Adjust bit to be at start of allocated memory block
2129 biti = (offset & notbinsize[bin]) >> 4;
2130 //debug(PRINTF) printf("\t\tbiti = x%x\n", biti);
2132 else if (bin == B_PAGEPLUS)
2136 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
2137 biti = pn * (PAGESIZE / 16);
2141 // Don't mark bits in B_FREE or B_UNCOMMITTED pages
2145 if (bin >= B_PAGE) // Cache B_PAGE and B_PAGEPLUS lookups
2146 pcache = cast(size_t)p & ~(PAGESIZE-1);
2148 //debug(PRINTF) printf("\t\tmark(x%x) = %d\n", biti, pool.mark.test(biti));
2149 if (!pool.mark.test(biti))
2151 //if (log) debug(PRINTF) printf("\t\tmarking %x\n", p);
2152 pool.mark.set(biti);
2153 if (!pool.noscan.test(biti))
2155 pool.scan.set(biti);
2158 log_parent(sentinel_add(pool.baseAddr + biti * 16), sentinel_add(pbot));
2163 anychanges |= changes;
2168 * Return number of full pages free'd.
2170 size_t fullcollectshell()
2172 // The purpose of the 'shell' is to ensure all the registers
2173 // get put on the stack so they'll be scanned
2178 __builtin_unwind_init();
2189 result = fullcollect(sp);
2208 size_t fullcollect(void *stackTop)
2213 debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n");
2215 thread_suspendAll();
2221 for (n = 0; n < npools; n++)
2223 pool = pooltable[n];
2226 pool.freebits.zero();
2229 // Mark each free entry, so it doesn't get scanned
2230 for (n = 0; n < B_PAGE; n++)
2232 for (List *list = bucket[n]; list; list = list.next)
2234 pool = findPool(list);
2236 pool.freebits.set(cast(size_t)(cast(byte*)list - pool.baseAddr) / 16);
2240 for (n = 0; n < npools; n++)
2242 pool = pooltable[n];
2243 pool.mark.copy(&pool.freebits);
2246 rt_scanStaticData( &mark );
2248 version (MULTI_THREADED)
2252 // Scan stacks and registers for each paused thread
2253 thread_scanAll( &mark, stackTop );
2260 // Scan stack for main thread
2261 debug(PRINTF) printf(" scan stack bot = %x, top = %x\n", stackTop, stackBottom);
2262 version (STACKGROWSDOWN)
2263 mark(stackTop, stackBottom);
2265 mark(stackBottom, stackTop);
2270 debug(COLLECT_PRINTF) printf("scan roots[]\n");
2271 mark(roots, roots + nroots);
2274 debug(COLLECT_PRINTF) printf("scan ranges[]\n");
2276 for (n = 0; n < nranges; n++)
2278 debug(COLLECT_PRINTF) printf("\t%x .. %x\n", ranges[n].pbot, ranges[n].ptop);
2279 mark(ranges[n].pbot, ranges[n].ptop);
2283 debug(COLLECT_PRINTF) printf("\tscan heap\n");
2287 for (n = 0; n < npools; n++)
2293 pool = pooltable[n];
2295 bbase = pool.scan.base();
2296 btop = bbase + pool.scan.nwords;
2297 for (b = bbase; b < btop;)
2311 o = pool.baseAddr + (b - bbase) * 32 * 16;
2312 if (!(bitm & 0xFFFF))
2317 for (; bitm; o += 16, bitm >>= 1)
2322 pn = cast(size_t)(o - pool.baseAddr) / PAGESIZE;
2323 bin = cast(Bins)pool.pagetable[pn];
2326 mark(o, o + binsize[bin]);
2328 else if (bin == B_PAGE || bin == B_PAGEPLUS)
2330 if (bin == B_PAGEPLUS)
2332 while (pool.pagetable[pn - 1] != B_PAGE)
2336 while (pn + u < pool.ncommitted && pool.pagetable[pn + u] == B_PAGEPLUS)
2338 mark(o, o + u * PAGESIZE);
2347 // Free up everything not marked
2348 debug(COLLECT_PRINTF) printf("\tfree'ing\n");
2349 size_t freedpages = 0;
2351 for (n = 0; n < npools; n++)
2356 pool = pooltable[n];
2357 bbase = pool.mark.base();
2358 ncommitted = pool.ncommitted;
2359 for (pn = 0; pn < ncommitted; pn++, bbase += PAGESIZE / (32 * 16))
2361 Bins bin = cast(Bins)pool.pagetable[pn];
2368 auto size = binsize[bin];
2370 p = pool.baseAddr + pn * PAGESIZE;
2371 ptop = p + PAGESIZE;
2372 biti = pn * (PAGESIZE/16);
2373 bitstride = size / 16;
2375 version(none) // BUG: doesn't work because freebits() must also be cleared
2377 // If free'd entire page
2378 if (bbase[0] == 0 && bbase[1] == 0 && bbase[2] == 0 && bbase[3] == 0 &&
2379 bbase[4] == 0 && bbase[5] == 0 && bbase[6] == 0 && bbase[7] == 0)
2381 for (; p < ptop; p += size, biti += bitstride)
2383 if (pool.finals.nbits && pool.finals.testClear(biti))
2384 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2385 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
2387 List *list = cast(List *)p;
2388 //debug(PRINTF) printf("\tcollecting %x\n", list);
2389 log_free(sentinel_add(list));
2391 debug (MEMSTOMP) memset(p, 0xF3, size);
2393 pool.pagetable[pn] = B_FREE;
2395 //debug(PRINTF) printf("freeing entire page %d\n", pn);
2399 for (; p < ptop; p += size, biti += bitstride)
2401 if (!pool.mark.test(biti))
2403 sentinel_Invariant(sentinel_add(p));
2405 pool.freebits.set(biti);
2406 if (pool.finals.nbits && pool.finals.testClear(biti))
2407 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2408 clrBits(pool, biti, BlkAttr.ALL_BITS);
2410 List *list = cast(List *)p;
2411 debug(PRINTF) printf("\tcollecting %x\n", list);
2412 log_free(sentinel_add(list));
2414 debug (MEMSTOMP) memset(p, 0xF3, size);
2420 else if (bin == B_PAGE)
2421 { size_t biti = pn * (PAGESIZE / 16);
2423 if (!pool.mark.test(biti))
2424 { byte *p = pool.baseAddr + pn * PAGESIZE;
2426 sentinel_Invariant(sentinel_add(p));
2427 if (pool.finals.nbits && pool.finals.testClear(biti))
2428 rt_finalize(sentinel_add(p), false/*noStack > 0*/);
2429 clrBits(pool, biti, BlkAttr.ALL_BITS);
2431 debug(COLLECT_PRINTF) printf("\tcollecting big %x\n", p);
2432 log_free(sentinel_add(p));
2433 pool.pagetable[pn] = B_FREE;
2435 debug (MEMSTOMP) memset(p, 0xF3, PAGESIZE);
2436 while (pn + 1 < ncommitted && pool.pagetable[pn + 1] == B_PAGEPLUS)
2439 pool.pagetable[pn] = B_FREE;
2444 memset(p, 0xF3, PAGESIZE);
2455 // Free complete pages, rebuild free list
2456 debug(COLLECT_PRINTF) printf("\tfree complete pages\n");
2457 size_t recoveredpages = 0;
2458 for (n = 0; n < npools; n++)
2462 pool = pooltable[n];
2463 ncommitted = pool.ncommitted;
2464 for (pn = 0; pn < ncommitted; pn++)
2466 Bins bin = cast(Bins)pool.pagetable[pn];
2472 size_t size = binsize[bin];
2473 size_t bitstride = size / 16;
2474 size_t bitbase = pn * (PAGESIZE / 16);
2475 size_t bittop = bitbase + (PAGESIZE / 16);
2479 for (biti = bitbase; biti < bittop; biti += bitstride)
2480 { if (!pool.freebits.test(biti))
2483 pool.pagetable[pn] = B_FREE;
2488 p = pool.baseAddr + pn * PAGESIZE;
2489 for (u = 0; u < PAGESIZE; u += size)
2490 { biti = bitbase + u / 16;
2491 if (pool.freebits.test(biti))
2494 list = cast(List *)(p + u);
2495 if (list.next != bucket[bin]) // avoid unnecessary writes
2496 list.next = bucket[bin];
2504 debug(COLLECT_PRINTF) printf("recovered pages = %d\n", recoveredpages);
2505 debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedpages, npools);
2507 return freedpages + recoveredpages;
2514 uint getBits(Pool* pool, size_t biti)
2523 if (pool.finals.nbits &&
2524 pool.finals.test(biti))
2525 bits |= BlkAttr.FINALIZE;
2526 if (pool.noscan.test(biti))
2527 bits |= BlkAttr.NO_SCAN;
2528 // if (pool.nomove.nbits &&
2529 // pool.nomove.test(biti))
2530 // bits |= BlkAttr.NO_MOVE;
2538 void setBits(Pool* pool, size_t biti, uint mask)
2545 if (mask & BlkAttr.FINALIZE)
2547 if (!pool.finals.nbits)
2548 pool.finals.alloc(pool.mark.nbits);
2549 pool.finals.set(biti);
2551 if (mask & BlkAttr.NO_SCAN)
2553 pool.noscan.set(biti);
2555 // if (mask & BlkAttr.NO_MOVE)
2557 // if (!pool.nomove.nbits)
2558 // pool.nomove.alloc(pool.mark.nbits);
2559 // pool.nomove.set(biti);
2567 void clrBits(Pool* pool, size_t biti, uint mask)
2574 if (mask & BlkAttr.FINALIZE && pool.finals.nbits)
2575 pool.finals.clear(biti);
2576 if (mask & BlkAttr.NO_SCAN)
2577 pool.noscan.clear(biti);
2578 // if (mask & BlkAttr.NO_MOVE && pool.nomove.nbits)
2579 // pool.nomove.clear(biti);
2583 /***** Leak Detector ******/
2594 //debug(PRINTF) printf("+log_init()\n");
2595 current.reserve(1000);
2597 //debug(PRINTF) printf("-log_init()\n");
2601 void log_malloc(void *p, size_t size)
2603 //debug(PRINTF) printf("+log_malloc(p = %x, size = %d)\n", p, size);
2616 //debug(PRINTF) printf("-log_malloc()\n");
2620 void log_free(void *p)
2622 //debug(PRINTF) printf("+log_free(%x)\n", p);
2625 i = current.find(p);
2628 debug(PRINTF) printf("free'ing unallocated memory %x\n", p);
2632 //debug(PRINTF) printf("-log_free()\n");
2638 //debug(PRINTF) printf("+log_collect()\n");
2639 // Print everything in current that is not in prev
2641 debug(PRINTF) printf("New pointers this cycle: --------------------------------\n");
2643 for (size_t i = 0; i < current.dim; i++)
2647 j = prev.find(current.data[i].p);
2649 current.data[i].print();
2654 debug(PRINTF) printf("All roots this cycle: --------------------------------\n");
2655 for (size_t i = 0; i < current.dim; i++)
2660 p = current.data[i].p;
2661 if (!findPool(current.data[i].parent))
2663 j = prev.find(current.data[i].p);
2665 debug(PRINTF) printf("N");
2667 debug(PRINTF) printf(" ");;
2668 current.data[i].print();
2672 debug(PRINTF) printf("Used = %d-------------------------------------------------\n", used);
2673 prev.copy(¤t);
2675 debug(PRINTF) printf("-log_collect()\n");
2679 void log_parent(void *p, void *parent)
2681 //debug(PRINTF) printf("+log_parent()\n");
2684 i = current.find(p);
2687 debug(PRINTF) printf("parent'ing unallocated memory %x, parent = %x\n", p, parent);
2691 size_t offset = cast(size_t)(p - pool.baseAddr);
2693 size_t pn = offset / PAGESIZE;
2694 Bins bin = cast(Bins)pool.pagetable[pn];
2695 biti = (offset & notbinsize[bin]);
2696 debug(PRINTF) printf("\tbin = %d, offset = x%x, biti = x%x\n", bin, offset, biti);
2700 current.data[i].parent = parent;
2702 //debug(PRINTF) printf("-log_parent()\n");
2709 void log_malloc(void *p, size_t size) { }
2710 void log_free(void *p) { }
2711 void log_collect() { }
2712 void log_parent(void *p, void *parent) { }
2717 /* ============================ Pool =============================== */
2724 GCBits mark; // entries already scanned, or should not be scanned
2725 GCBits scan; // entries that need to be scanned
2726 GCBits freebits; // entries that are on the free list
2727 GCBits finals; // entries that need finalizer run on them
2728 GCBits noscan; // entries that should not be scanned
2731 size_t ncommitted; // ncommitted <= npages
2735 void initialize(size_t npages)
2739 //debug(PRINTF) printf("Pool::Pool(%u)\n", npages);
2740 poolsize = npages * PAGESIZE;
2741 assert(poolsize >= POOLSIZE);
2742 baseAddr = cast(byte *)os_mem_map(poolsize);
2744 // Some of the code depends on page alignment of memory pools
2745 assert((cast(size_t)baseAddr & (PAGESIZE - 1)) == 0);
2749 //debug(PRINTF) printf("GC fail: poolsize = x%x, errno = %d\n", poolsize, errno);
2750 //debug(PRINTF) printf("message = '%s'\n", sys_errlist[errno]);
2756 topAddr = baseAddr + poolsize;
2758 mark.alloc(cast(size_t)poolsize / 16);
2759 scan.alloc(cast(size_t)poolsize / 16);
2760 freebits.alloc(cast(size_t)poolsize / 16);
2761 noscan.alloc(cast(size_t)poolsize / 16);
2763 pagetable = cast(ubyte*)cstdlib.malloc(npages);
2765 onOutOfMemoryError();
2766 memset(pagetable, B_UNCOMMITTED, npages);
2768 this.npages = npages;
2781 result = os_mem_decommit(baseAddr, 0, ncommitted * PAGESIZE);
2782 assert(result == 0);
2788 result = os_mem_unmap(baseAddr, npages * PAGESIZE);
2789 assert(result == 0);
2797 cstdlib.free(pagetable);
2807 void Invariant() { }
2814 //freebits.Invariant();
2815 //finals.Invariant();
2816 //noscan.Invariant();
2820 //if (baseAddr + npages * PAGESIZE != topAddr)
2821 //printf("baseAddr = %p, npages = %d, topAddr = %p\n", baseAddr, npages, topAddr);
2822 assert(baseAddr + npages * PAGESIZE == topAddr);
2823 assert(ncommitted <= npages);
2826 for (size_t i = 0; i < npages; i++)
2827 { Bins bin = cast(Bins)pagetable[i];
2829 assert(bin < B_MAX);
2835 * Allocate n pages from Pool.
2836 * Returns OPFAIL on failure.
2838 size_t allocPages(size_t n)
2843 //debug(PRINTF) printf("Pool::allocPages(n = %d)\n", n);
2845 for (i = 0; i < ncommitted; i++)
2847 if (pagetable[i] == B_FREE)
2850 { //debug(PRINTF) printf("\texisting pn = %d\n", i - n + 1);
2857 return extendPages(n);
2861 * Extend Pool by n pages.
2862 * Returns OPFAIL on failure.
2864 size_t extendPages(size_t n)
2866 //debug(PRINTF) printf("Pool::extendPages(n = %d)\n", n);
2867 if (ncommitted + n <= npages)
2871 tocommit = (n + (COMMITSIZE/PAGESIZE) - 1) & ~(COMMITSIZE/PAGESIZE - 1);
2872 if (ncommitted + tocommit > npages)
2873 tocommit = npages - ncommitted;
2874 //debug(PRINTF) printf("\tlooking to commit %d more pages\n", tocommit);
2876 if (os_mem_commit(baseAddr, ncommitted * PAGESIZE, tocommit * PAGESIZE) == 0)
2878 memset(pagetable + ncommitted, B_FREE, tocommit);
2879 auto i = ncommitted;
2880 ncommitted += tocommit;
2882 while (i && pagetable[i - 1] == B_FREE)
2887 //debug(PRINTF) printf("\tfailed to commit %d pages\n", tocommit);
2895 * Free npages pages starting with pagenum.
2897 void freePages(size_t pagenum, size_t npages)
2899 memset(&pagetable[pagenum], B_FREE, npages);
2904 * Used for sorting pooltable[]
2908 if (baseAddr < p2.baseAddr)
2911 return cast(int)(baseAddr > p2.baseAddr);
2916 /* ============================ SENTINEL =============================== */
2921 const size_t SENTINEL_PRE = cast(size_t) 0xF4F4F4F4F4F4F4F4UL; // 32 or 64 bits
2922 const ubyte SENTINEL_POST = 0xF5; // 8 bits
2923 const uint SENTINEL_EXTRA = 2 * size_t.sizeof + 1;
2926 size_t* sentinel_size(void *p) { return &(cast(size_t *)p)[-2]; }
2927 size_t* sentinel_pre(void *p) { return &(cast(size_t *)p)[-1]; }
2928 ubyte* sentinel_post(void *p) { return &(cast(ubyte *)p)[*sentinel_size(p)]; }
2931 void sentinel_init(void *p, size_t size)
2933 *sentinel_size(p) = size;
2934 *sentinel_pre(p) = SENTINEL_PRE;
2935 *sentinel_post(p) = SENTINEL_POST;
2939 void sentinel_Invariant(void *p)
2941 assert(*sentinel_pre(p) == SENTINEL_PRE);
2942 assert(*sentinel_post(p) == SENTINEL_POST);
2946 void *sentinel_add(void *p)
2948 return p + 2 * size_t.sizeof;
2952 void *sentinel_sub(void *p)
2954 return p - 2 * size_t.sizeof;
2959 const uint SENTINEL_EXTRA = 0;
2962 void sentinel_init(void *p, size_t size)
2967 void sentinel_Invariant(void *p)
2972 void *sentinel_add(void *p)
2978 void *sentinel_sub(void *p)