2 * This module contains the garbage collector implementation.
4 * Copyright: Copyright (C) 2001-2007 Digital Mars, www.digitalmars.com.
7 * This software is provided 'as-is', without any express or implied
8 * warranty. In no event will the authors be held liable for any damages
9 * arising from the use of this software.
11 * Permission is granted to anyone to use this software for any purpose,
12 * including commercial applications, and to alter it and redistribute it
13 * freely, in both source and binary form, subject to the following
16 * o The origin of this software must not be misrepresented; you must not
17 * claim that you wrote the original software. If you use this software
18 * in a product, an acknowledgment in the product documentation would be
19 * appreciated but is not required.
20 * o Altered source versions must be plainly marked as such, and must not
21 * be misrepresented as being the original software.
22 * o This notice may not be removed or altered from any source
24 * Authors: Walter Bright, David Friedman, Sean Kelly
29 // D Programming Language Garbage Collector implementation
31 /************** Debugging ***************************/
33 //debug = PRINTF; // turn on printf's
34 //debug = COLLECT_PRINTF; // turn on printf's
35 //debug = THREADINVARIANT; // check thread integrity
36 //debug = LOGGING; // log allocations / frees
37 //debug = MEMSTOMP; // stomp on memory
38 //debug = SENTINEL; // add underrun/overrrun protection
39 //debug = PTRCHECK; // more pointer checking
40 //debug = PTRCHECK2; // thorough but slow pointer checking
42 /*************** Configuration *********************/
44 version = STACKGROWSDOWN; // growing the stack means subtracting from the stack pointer
45 // (use for Intel X86 CPUs)
46 // else growing the stack means adding to the stack pointer
47 version = MULTI_THREADED; // produce multithreaded version
49 /***************************************************/
51 private import gcbits;
52 private import gcstats;
53 private import gcalloc;
55 private import cstdlib = core.stdc.stdlib : calloc, free, malloc, realloc;
56 private import core.stdc.string;
58 debug private import core.stdc.stdio;
62 // BUG: The following import will likely not work, since the gcc
63 // subdirectory is elsewhere. Instead, perhaps the functions
64 // could be declared directly or some other resolution could
66 private import gcc.builtins; // for __builtin_unwind_init
74 FINALIZE = 0b0000_0001,
75 NO_SCAN = 0b0000_0010,
76 NO_MOVE = 0b0000_0100,
77 ALL_BITS = 0b1111_1111
87 extern (C) void* rt_stackBottom();
88 extern (C) void* rt_stackTop();
90 extern (C) void rt_finalize( void* p, bool det = true );
92 version (MULTI_THREADED)
94 extern (C) bool thread_needLock();
95 extern (C) void thread_suspendAll();
96 extern (C) void thread_resumeAll();
98 alias void delegate( void*, void* ) scanFn;
99 extern (C) void thread_scanAll( scanFn fn, void* curStackTop = null );
102 extern (C) void onOutOfMemoryError();
106 OPFAIL = ~cast(size_t)0
114 /* ======================= Leak Detector =========================== */
129 printf(" p = %x, size = %d, parent = %x ", p, size, parent);
132 printf("%s(%u)", file, line);
152 void reserve(size_t nentries)
154 assert(dim <= allocdim);
155 if (allocdim - dim < nentries)
157 allocdim = (dim + nentries) * 2;
158 assert(dim + nentries <= allocdim);
161 data = cast(Log*)cstdlib.malloc(allocdim * Log.sizeof);
162 if (!data && allocdim)
163 onOutOfMemoryError();
168 newdata = cast(Log*)cstdlib.malloc(allocdim * Log.sizeof);
169 if (!newdata && allocdim)
170 onOutOfMemoryError();
171 memcpy(newdata, data, dim * Log.sizeof);
185 void remove(size_t i)
187 memmove(data + i, data + i + 1, (dim - i) * Log.sizeof);
194 for (size_t i = 0; i < dim; i++)
199 return OPFAIL; // not found
203 void copy(LogArray *from)
205 reserve(from.dim - dim);
206 assert(from.dim <= allocdim);
207 memcpy(data, from.data, from.dim * Log.sizeof);
214 /* ============================ GC =============================== */
217 class GCLock { } // just a dummy so we can get a global lock
220 const uint GCVERSION = 1; // increment every time we change interface
225 // For passing to debug code
229 uint gcversion = GCVERSION;
231 Gcx *gcx; // implementation
232 static ClassInfo gcLock; // global lock
237 gcLock = GCLock.classinfo;
238 gcx = cast(Gcx*)cstdlib.calloc(1, Gcx.sizeof);
240 onOutOfMemoryError();
242 setStackBottom(rt_stackBottom());
250 //debug(PRINTF) printf("Thread %x ", pthread_self());
251 //debug(PRINTF) printf("GC.Dtor()\n");
267 gcx.thread_Invariant();
277 if (!thread_needLock())
279 assert(gcx.disabled > 0);
282 else synchronized (gcLock)
284 assert(gcx.disabled > 0);
295 if (!thread_needLock())
299 else synchronized (gcLock)
309 uint getAttr(void* p)
318 Pool* pool = gcx.findPool(p);
323 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
325 oldb = gcx.getBits(pool, biti);
330 if (!thread_needLock())
334 else synchronized (gcLock)
344 uint setAttr(void* p, uint mask)
353 Pool* pool = gcx.findPool(p);
358 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
360 oldb = gcx.getBits(pool, biti);
361 gcx.setBits(pool, biti, mask);
366 if (!thread_needLock())
370 else synchronized (gcLock)
380 uint clrAttr(void* p, uint mask)
389 Pool* pool = gcx.findPool(p);
394 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
396 oldb = gcx.getBits(pool, biti);
397 gcx.clrBits(pool, biti, mask);
402 if (!thread_needLock())
406 else synchronized (gcLock)
416 void *malloc(size_t size, uint bits = 0)
423 if (!thread_needLock())
425 return mallocNoSync(size, bits);
427 else synchronized (gcLock)
429 return mallocNoSync(size, bits);
437 private void *mallocNoSync(size_t size, uint bits = 0)
444 //debug(PRINTF) printf("GC::malloc(size = %d, gcx = %p)\n", size, gcx);
446 //debug(PRINTF) printf("gcx.self = %x, pthread_self() = %x\n", gcx.self, pthread_self());
448 size += SENTINEL_EXTRA;
451 // Cache previous binsize lookup - Dave Fladebo.
452 static size_t lastsize = -1;
454 if (size == lastsize)
458 bin = gcx.findBin(size);
468 if (!gcx.allocPage(bin) && !gcx.disabled) // try to find a new page
470 if (!thread_needLock())
472 /* Then we haven't locked it yet. Be sure
473 * and lock for a collection, since a finalizer
474 * may start a new thread.
476 synchronized (gcLock)
478 gcx.fullcollectshell();
481 else if (!gcx.fullcollectshell()) // collect to find a new page
486 if (!gcx.bucket[bin] && !gcx.allocPage(bin))
489 gcx.newPool(1); // allocate new pool to find a new page
490 result = gcx.allocPage(bin);
492 onOutOfMemoryError();
497 // Return next item from free list
498 gcx.bucket[bin] = (cast(List*)p).next;
499 if( !(bits & BlkAttr.NO_SCAN) )
500 memset(p + size, 0, binsize[bin] - size);
501 //debug(PRINTF) printf("\tmalloc => %x\n", p);
502 debug (MEMSTOMP) memset(p, 0xF0, size);
506 p = gcx.bigAlloc(size);
508 onOutOfMemoryError();
510 size -= SENTINEL_EXTRA;
512 sentinel_init(p, size);
513 gcx.log_malloc(p, size);
517 Pool *pool = gcx.findPool(p);
520 gcx.setBits(pool, cast(size_t)(p - pool.baseAddr) / 16, bits);
529 void *calloc(size_t size, uint bits = 0)
536 if (!thread_needLock())
538 return callocNoSync(size, bits);
540 else synchronized (gcLock)
542 return callocNoSync(size, bits);
550 private void *callocNoSync(size_t size, uint bits = 0)
554 //debug(PRINTF) printf("calloc: %x len %d\n", p, len);
555 void *p = mallocNoSync(size, bits);
564 void *realloc(void *p, size_t size, uint bits = 0)
566 if (!thread_needLock())
568 return reallocNoSync(p, size, bits);
570 else synchronized (gcLock)
572 return reallocNoSync(p, size, bits);
580 private void *reallocNoSync(void *p, size_t size, uint bits = 0)
590 p = mallocNoSync(size, bits);
596 //debug(PRINTF) printf("GC::realloc(p = %x, size = %u)\n", p, size);
599 sentinel_Invariant(p);
600 psize = *sentinel_size(p);
605 Pool *pool = gcx.findPool(p);
609 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
613 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
614 gcx.setBits(pool, biti, bits);
618 bits = gcx.getBits(pool, biti);
622 p2 = mallocNoSync(size, bits);
625 //debug(PRINTF) printf("\tcopying %d bytes\n",size);
632 psize = gcx.findSize(p); // find allocated size
633 if (psize >= PAGESIZE && size >= PAGESIZE)
635 auto psz = psize / PAGESIZE;
636 auto newsz = (size + PAGESIZE - 1) / PAGESIZE;
640 auto pool = gcx.findPool(p);
641 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
645 synchronized (gcLock)
647 debug (MEMSTOMP) memset(p + size, 0xF2, psize - size);
648 pool.freePages(pagenum + newsz, psz - newsz);
652 else if (pagenum + newsz <= pool.npages)
654 // Attempt to expand in place
655 synchronized (gcLock)
657 for (size_t i = pagenum + psz; 1;)
659 if (i == pagenum + newsz)
661 debug (MEMSTOMP) memset(p + psize, 0xF0, size - psize);
662 memset(&pool.pagetable[pagenum + psz], B_PAGEPLUS, newsz - psz);
665 if (i == pool.ncommitted)
667 auto u = pool.extendPages(pagenum + newsz - pool.ncommitted);
673 if (pool.pagetable[i] != B_FREE)
680 if (psize < size || // if new size is bigger
681 psize > size * 2) // or less than half
685 Pool *pool = gcx.findPool(p);
689 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
693 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
694 gcx.setBits(pool, biti, bits);
698 bits = gcx.getBits(pool, biti);
702 p2 = mallocNoSync(size, bits);
705 //debug(PRINTF) printf("\tcopying %d bytes\n",size);
716 * Attempt to in-place enlarge the memory block pointed to by p by at least
717 * minbytes beyond its current capacity, up to a maximum of maxsize. This
718 * does not attempt to move the memory block (like realloc() does).
721 * 0 if could not extend p,
722 * total size of entire memory block if successful.
724 size_t extend(void* p, size_t minsize, size_t maxsize)
726 if (!thread_needLock())
728 return extendNoSync(p, minsize, maxsize);
730 else synchronized (gcLock)
732 return extendNoSync(p, minsize, maxsize);
740 private size_t extendNoSync(void* p, size_t minsize, size_t maxsize)
743 assert( minsize <= maxsize );
747 //debug(PRINTF) printf("GC::extend(p = %x, minsize = %u, maxsize = %u)\n", p, minsize, maxsize);
752 auto psize = gcx.findSize(p); // find allocated size
753 if (psize < PAGESIZE)
754 return 0; // cannot extend buckets
756 auto psz = psize / PAGESIZE;
757 auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE;
758 auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE;
760 auto pool = gcx.findPool(p);
761 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
764 for (sz = 0; sz < maxsz; sz++)
766 auto i = pagenum + psz + sz;
767 if (i == pool.ncommitted)
769 if (pool.pagetable[i] != B_FREE)
778 else if (pagenum + psz + sz == pool.ncommitted)
780 auto u = pool.extendPages(minsz - sz);
787 debug (MEMSTOMP) memset(p + psize, 0xF0, (psz + sz) * PAGESIZE - psize);
788 memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
791 return (psz + sz) * PAGESIZE;
798 size_t reserve(size_t size)
805 if (!thread_needLock())
807 return reserveNoSync(size);
809 else synchronized (gcLock)
811 return reserveNoSync(size);
819 private size_t reserveNoSync(size_t size)
824 return gcx.reserve(size);
838 if (!thread_needLock())
840 return freeNoSync(p);
842 else synchronized (gcLock)
844 return freeNoSync(p);
852 private void freeNoSync(void *p)
861 // Find which page it is in
862 pool = gcx.findPool(p);
863 if (!pool) // if not one of ours
865 sentinel_Invariant(p);
867 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
868 biti = cast(size_t)(p - pool.baseAddr) / 16;
869 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
871 bin = cast(Bins)pool.pagetable[pagenum];
872 if (bin == B_PAGE) // if large alloc
879 while (++n < pool.ncommitted && pool.pagetable[n] == B_PAGEPLUS)
881 debug (MEMSTOMP) memset(p, 0xF2, npages * PAGESIZE);
882 pool.freePages(pagenum, npages);
885 { // Add to free list
886 List *list = cast(List*)p;
888 debug (MEMSTOMP) memset(p, 0xF2, binsize[bin]);
890 list.next = gcx.bucket[bin];
891 gcx.bucket[bin] = list;
893 gcx.log_free(sentinel_add(p));
898 * Determine the base address of the block containing p. If p is not a gc
899 * allocated pointer, return null.
901 void* addrOf(void *p)
908 if (!thread_needLock())
910 return addrOfNoSync(p);
912 else synchronized (gcLock)
914 return addrOfNoSync(p);
922 void* addrOfNoSync(void *p)
929 return gcx.findBase(p);
934 * Determine the allocated size of pointer p. If p is an interior pointer
935 * or not a gc allocated pointer, return 0.
937 size_t sizeOf(void *p)
944 if (!thread_needLock())
946 return sizeOfNoSync(p);
948 else synchronized (gcLock)
950 return sizeOfNoSync(p);
958 private size_t sizeOfNoSync(void *p)
965 size_t size = gcx.findSize(p);
967 // Check for interior pointer
969 // 1) size is a power of 2 for less than PAGESIZE values
970 // 2) base of memory pool is aligned on PAGESIZE boundary
971 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
973 return size ? size - SENTINEL_EXTRA : 0;
977 if (p == gcx.p_cache)
978 return gcx.size_cache;
980 size_t size = gcx.findSize(p);
982 // Check for interior pointer
984 // 1) size is a power of 2 for less than PAGESIZE values
985 // 2) base of memory pool is aligned on PAGESIZE boundary
986 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
991 gcx.size_cache = size;
1000 * Determine the base address of the block containing p. If p is not a gc
1001 * allocated pointer, return null.
1003 BlkInfo query(void *p)
1011 if (!thread_needLock())
1013 return queryNoSync(p);
1015 else synchronized (gcLock)
1017 return queryNoSync(p);
1025 BlkInfo queryNoSync(void *p)
1029 return gcx.getInfo(p);
1034 * Verify that pointer p:
1035 * 1) belongs to this memory pool
1036 * 2) points to the start of an allocated piece of memory
1037 * 3) is not on a free list
1046 if (!thread_needLock())
1050 else synchronized (gcLock)
1060 private void checkNoSync(void *p)
1064 sentinel_Invariant(p);
1072 p = sentinel_sub(p);
1073 pool = gcx.findPool(p);
1075 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1076 bin = cast(Bins)pool.pagetable[pagenum];
1077 assert(bin <= B_PAGE);
1078 size = binsize[bin];
1079 assert((cast(size_t)p & (size - 1)) == 0);
1085 // Check that p is not on a free list
1088 for (list = gcx.bucket[bin]; list; list = list.next)
1090 assert(cast(void*)list != p);
1101 private void setStackBottom(void *p)
1103 version (STACKGROWSDOWN)
1105 //p = (void *)((uint *)p + 4);
1106 if (p > gcx.stackBottom)
1108 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1109 gcx.stackBottom = p;
1114 //p = (void *)((uint *)p - 4);
1115 if (p < gcx.stackBottom)
1117 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1118 gcx.stackBottom = cast(char*)p;
1125 * add p to list of roots
1127 void addRoot(void *p)
1134 if (!thread_needLock())
1138 else synchronized (gcLock)
1146 * remove p from list of roots
1148 void removeRoot(void *p)
1155 if (!thread_needLock())
1159 else synchronized (gcLock)
1169 int delegate(int delegate(inout void*)) rootIter()
1171 if (!thread_needLock())
1173 return &gcx.rootIter;
1175 else synchronized (gcLock)
1177 return &gcx.rootIter;
1183 * add range to scan for roots
1185 void addRange(void *p, size_t sz)
1192 //debug(PRINTF) printf("+GC.addRange(pbot = x%x, ptop = x%x)\n", pbot, ptop);
1193 if (!thread_needLock())
1195 gcx.addRange(p, p + sz);
1197 else synchronized (gcLock)
1199 gcx.addRange(p, p + sz);
1201 //debug(PRINTF) printf("-GC.addRange()\n");
1208 void removeRange(void *p)
1215 if (!thread_needLock())
1219 else synchronized (gcLock)
1229 int delegate(int delegate(inout Range)) rangeIter()
1231 if (!thread_needLock())
1233 return &gcx.rangeIter;
1235 else synchronized (gcLock)
1237 return &gcx.rangeIter;
1243 * do full garbage collection
1247 debug(PRINTF) printf("GC.fullCollect()\n");
1249 if (!thread_needLock())
1251 gcx.fullcollectshell();
1253 else synchronized (gcLock)
1255 gcx.fullcollectshell();
1263 debug(PRINTF) printf("poolsize = %x, usedsize = %x, freelistsize = %x\n",
1264 stats.poolsize, stats.usedsize, stats.freelistsize);
1272 * do full garbage collection ignoring roots
1274 void fullCollectNoStack()
1276 if (!thread_needLock())
1279 gcx.fullcollectshell();
1282 else synchronized (gcLock)
1285 gcx.fullcollectshell();
1292 * minimize free space usage
1296 if (!thread_needLock())
1300 else synchronized (gcLock)
1308 * Retrieve statistics about garbage collection.
1309 * Useful for debugging and tuning.
1311 void getStats(out GCStats stats)
1313 if (!thread_needLock())
1315 getStatsNoSync(stats);
1317 else synchronized (gcLock)
1319 getStatsNoSync(stats);
1327 private void getStatsNoSync(out GCStats stats)
1336 //debug(PRINTF) printf("getStats()\n");
1337 memset(&stats, 0, GCStats.sizeof);
1339 for (n = 0; n < gcx.npools; n++)
1340 { Pool *pool = gcx.pooltable[n];
1342 psize += pool.ncommitted * PAGESIZE;
1343 for (size_t j = 0; j < pool.ncommitted; j++)
1345 Bins bin = cast(Bins)pool.pagetable[j];
1348 else if (bin == B_PAGE)
1350 else if (bin < B_PAGE)
1355 for (n = 0; n < B_PAGE; n++)
1357 //debug(PRINTF) printf("bin %d\n", n);
1358 for (List *list = gcx.bucket[n]; list; list = list.next)
1360 //debug(PRINTF) printf("\tlist %x\n", list);
1361 flsize += binsize[n];
1365 usize = bsize - flsize;
1367 stats.poolsize = psize;
1368 stats.usedsize = bsize - flsize;
1369 stats.freelistsize = flsize;
1374 /* ============================ Gcx =============================== */
1378 COMMITSIZE = (4096*16),
1379 POOLSIZE = (4096*256),
1393 B_PAGE, // start of large alloc
1394 B_PAGEPLUS, // continuation of large alloc
1395 B_FREE, // free page
1396 B_UNCOMMITTED, // memory not committed for this page
1417 const uint binsize[B_MAX] = [ 16,32,64,128,256,512,1024,2048,4096 ];
1418 const uint notbinsize[B_MAX] = [ ~(16u-1),~(32u-1),~(64u-1),~(128u-1),~(256u-1),
1419 ~(512u-1),~(1024u-1),~(2048u-1),~(4096u-1) ];
1421 /* ============================ Gcx =============================== */
1426 debug (THREADINVARIANT)
1429 void thread_Invariant()
1431 if (self != pthread_self())
1432 printf("thread_Invariant(): gcx = %x, self = %x, pthread_self() = %x\n", this, self, pthread_self());
1433 assert(self == pthread_self());
1438 void thread_Invariant() { }
1452 uint noStack; // !=0 means don't scan stack
1453 uint log; // turn on logging
1457 int disabled; // turn off collections if >0
1459 byte *minAddr; // min(baseAddr)
1460 byte *maxAddr; // max(topAddr)
1465 List *bucket[B_MAX]; // free list for each size
1471 (cast(byte*)&this)[0 .. Gcx.sizeof] = 0;
1472 stackBottom = cast(char*)&dummy;
1474 debug (THREADINVARIANT)
1475 self = pthread_self();
1476 //printf("gcx = %p, self = %x\n", this, self);
1485 for (size_t i = 0; i < npools; i++)
1486 { Pool *pool = pooltable[i];
1492 cstdlib.free(pooltable);
1495 cstdlib.free(roots);
1498 cstdlib.free(ranges);
1502 void Invariant() { }
1509 //printf("Gcx.invariant(): this = %p\n", this);
1512 // Assure we're called on the right thread
1513 debug (THREADINVARIANT) assert(self == pthread_self());
1515 for (i = 0; i < npools; i++)
1516 { Pool *pool = pooltable[i];
1521 assert(minAddr == pool.baseAddr);
1525 assert(pool.opCmp(pooltable[i + 1]) < 0);
1527 else if (i + 1 == npools)
1529 assert(maxAddr == pool.topAddr);
1535 assert(rootdim != 0);
1536 assert(nroots <= rootdim);
1541 assert(rangedim != 0);
1542 assert(nranges <= rangedim);
1544 for (i = 0; i < nranges; i++)
1546 assert(ranges[i].pbot);
1547 assert(ranges[i].ptop);
1548 assert(ranges[i].pbot <= ranges[i].ptop);
1552 for (i = 0; i < B_PAGE; i++)
1554 for (List *list = bucket[i]; list; list = list.next)
1565 void addRoot(void *p)
1567 if (nroots == rootdim)
1569 size_t newdim = rootdim * 2 + 16;
1572 newroots = cast(void**)cstdlib.malloc(newdim * newroots[0].sizeof);
1574 onOutOfMemoryError();
1576 { memcpy(newroots, roots, nroots * newroots[0].sizeof);
1577 cstdlib.free(roots);
1590 void removeRoot(void *p)
1592 for (size_t i = nroots; i--;)
1597 memmove(roots + i, roots + i + 1, (nroots - i) * roots[0].sizeof);
1608 int rootIter(int delegate(inout void*) dg)
1611 for( size_t i = 0; i < nroots; ++i )
1613 result = dg(roots[i]);
1624 void addRange(void *pbot, void *ptop)
1626 debug(PRINTF) printf("Thread %x ", pthread_self());
1627 debug(PRINTF) printf("%x.Gcx::addRange(%x, %x), nranges = %d\n", this, pbot, ptop, nranges);
1628 if (nranges == rangedim)
1630 size_t newdim = rangedim * 2 + 16;
1633 newranges = cast(Range*)cstdlib.malloc(newdim * newranges[0].sizeof);
1635 onOutOfMemoryError();
1637 { memcpy(newranges, ranges, nranges * newranges[0].sizeof);
1638 cstdlib.free(ranges);
1643 ranges[nranges].pbot = pbot;
1644 ranges[nranges].ptop = ptop;
1652 void removeRange(void *pbot)
1654 debug(PRINTF) printf("Thread %x ", pthread_self());
1655 debug(PRINTF) printf("%x.Gcx.removeRange(%x), nranges = %d\n", this, pbot, nranges);
1656 for (size_t i = nranges; i--;)
1658 if (ranges[i].pbot == pbot)
1661 memmove(ranges + i, ranges + i + 1, (nranges - i) * ranges[0].sizeof);
1665 debug(PRINTF) printf("Wrong thread\n");
1667 // This is a fatal error, but ignore it.
1668 // The problem is that we can get a Close() call on a thread
1669 // other than the one the range was allocated on.
1677 int rangeIter(int delegate(inout Range) dg)
1680 for( size_t i = 0; i < nranges; ++i )
1682 result = dg(ranges[i]);
1691 * Find Pool that pointer is in.
1692 * Return null if not in a Pool.
1693 * Assume pooltable[] is sorted.
1695 Pool *findPool(void *p)
1697 if (p >= minAddr && p < maxAddr)
1701 return pooltable[0];
1704 for (size_t i = 0; i < npools; i++)
1707 pool = pooltable[i];
1708 if (p < pool.topAddr)
1709 { if (pool.baseAddr <= p)
1720 * Find base address of block containing pointer p.
1721 * Returns null if not a gc'd pointer
1723 void* findBase(void *p)
1730 size_t offset = cast(size_t)(p - pool.baseAddr);
1731 size_t pn = offset / PAGESIZE;
1732 Bins bin = cast(Bins)pool.pagetable[pn];
1734 // Adjust bit to be at start of allocated memory block
1737 return pool.baseAddr + (offset & notbinsize[bin]);
1739 else if (bin == B_PAGEPLUS)
1742 { --pn, offset -= PAGESIZE;
1743 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1745 return pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1749 // we are in a B_FREE or B_UNCOMMITTED page
1758 * Find size of pointer p.
1759 * Returns 0 if not a gc'd pointer
1761 size_t findSize(void *p)
1772 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1773 bin = cast(Bins)pool.pagetable[pagenum];
1774 size = binsize[bin];
1776 { size_t npages = pool.ncommitted;
1780 pt = &pool.pagetable[0];
1781 for (i = pagenum + 1; i < npages; i++)
1783 if (pt[i] != B_PAGEPLUS)
1786 size = (i - pagenum) * PAGESIZE;
1796 BlkInfo getInfo(void* p)
1804 size_t offset = cast(size_t)(p - pool.baseAddr);
1805 size_t pn = offset / PAGESIZE;
1806 Bins bin = cast(Bins)pool.pagetable[pn];
1808 ////////////////////////////////////////////////////////////////////
1810 ////////////////////////////////////////////////////////////////////
1814 info.base = pool.baseAddr + (offset & notbinsize[bin]);
1816 else if (bin == B_PAGEPLUS)
1819 { --pn, offset -= PAGESIZE;
1820 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1822 info.base = pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1824 // fix bin for use by size calc below
1825 bin = cast(Bins)pool.pagetable[pn];
1828 ////////////////////////////////////////////////////////////////////
1830 ////////////////////////////////////////////////////////////////////
1832 info.size = binsize[bin];
1834 { size_t npages = pool.ncommitted;
1838 pt = &pool.pagetable[0];
1839 for (i = pn + 1; i < npages; i++)
1841 if (pt[i] != B_PAGEPLUS)
1844 info.size = (i - pn) * PAGESIZE;
1847 ////////////////////////////////////////////////////////////////////
1849 ////////////////////////////////////////////////////////////////////
1851 info.attr = getBits(pool, cast(size_t)(offset / 16));
1858 * Compute bin for size.
1860 static Bins findBin(size_t size)
1869 else if (size <= 32)
1904 * Allocate a new pool of at least size bytes.
1905 * Sort it into pooltable[].
1906 * Mark all memory in the pool as B_FREE.
1907 * Return the actual number of bytes reserved or 0 on error.
1909 size_t reserve(size_t size)
1911 size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
1912 Pool* pool = newPool(npages);
1914 if (!pool || pool.extendPages(npages) == OPFAIL)
1916 return pool.ncommitted * PAGESIZE;
1921 * Minimizes physical memory usage by returning free pools to the OS.
1930 for (n = 0; n < npools; n++)
1932 pool = pooltable[n];
1933 ncommitted = pool.ncommitted;
1934 for (pn = 0; pn < ncommitted; pn++)
1936 if (cast(Bins)pool.pagetable[pn] != B_FREE)
1939 if (pn < ncommitted)
1946 memmove(pooltable + n,
1948 (--npools - n) * (Pool*).sizeof);
1949 minAddr = pooltable[0].baseAddr;
1950 maxAddr = pooltable[npools - 1].topAddr;
1956 * Allocate a chunk of memory that is larger than a page.
1957 * Return null if out of memory.
1959 void *bigAlloc(size_t size)
1969 npages = (size + PAGESIZE - 1) / PAGESIZE;
1973 // This code could use some refinement when repeatedly
1974 // allocating very large arrays.
1976 for (n = 0; n < npools; n++)
1978 pool = pooltable[n];
1979 pn = pool.allocPages(npages);
1993 freedpages = fullcollectshell();
1994 if (freedpages >= npools * ((POOLSIZE / PAGESIZE) / 4))
1998 // Release empty pools to prevent bloat
2000 // Allocate new pool
2001 pool = newPool(npages);
2006 pn = pool.allocPages(npages);
2007 assert(pn != OPFAIL);
2010 // Release empty pools to prevent bloat
2012 // Allocate new pool
2013 pool = newPool(npages);
2016 pn = pool.allocPages(npages);
2017 assert(pn != OPFAIL);
2027 pool.pagetable[pn] = B_PAGE;
2029 memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
2030 p = pool.baseAddr + pn * PAGESIZE;
2031 memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
2032 debug (MEMSTOMP) memset(p, 0xF1, size);
2033 //debug(PRINTF) printf("\tp = %x\n", p);
2037 return null; // let mallocNoSync handle the error
2042 * Allocate a new pool with at least npages in it.
2043 * Sort it into pooltable[].
2044 * Return null if failed.
2046 Pool *newPool(size_t npages)
2049 Pool** newpooltable;
2053 //debug(PRINTF) printf("************Gcx::newPool(npages = %d)****************\n", npages);
2055 // Round up to COMMITSIZE pages
2056 npages = (npages + (COMMITSIZE/PAGESIZE) - 1) & ~(COMMITSIZE/PAGESIZE - 1);
2058 // Minimum of POOLSIZE
2059 if (npages < POOLSIZE/PAGESIZE)
2060 npages = POOLSIZE/PAGESIZE;
2061 else if (npages > POOLSIZE/PAGESIZE)
2062 { // Give us 150% of requested size, so there's room to extend
2063 auto n = npages + (npages >> 1);
2064 if (n < size_t.max/PAGESIZE)
2068 // Allocate successively larger pools up to 8 megs
2074 n = 8; // cap pool size at 8 megs
2075 n *= (POOLSIZE / PAGESIZE);
2080 pool = cast(Pool *)cstdlib.calloc(1, Pool.sizeof);
2083 pool.initialize(npages);
2087 newnpools = npools + 1;
2088 newpooltable = cast(Pool **)cstdlib.realloc(pooltable, newnpools * (Pool *).sizeof);
2092 // Sort pool into newpooltable[]
2093 for (i = 0; i < npools; i++)
2095 if (pool.opCmp(newpooltable[i]) < 0)
2098 memmove(newpooltable + i + 1, newpooltable + i, (npools - i) * (Pool *).sizeof);
2099 newpooltable[i] = pool;
2101 pooltable = newpooltable;
2104 minAddr = pooltable[0].baseAddr;
2105 maxAddr = pooltable[npools - 1].topAddr;
2117 * Allocate a page of bin's.
2121 int allocPage(Bins bin)
2129 //debug(PRINTF) printf("Gcx::allocPage(bin = %d)\n", bin);
2130 for (n = 0; n < npools; n++)
2132 pool = pooltable[n];
2133 pn = pool.allocPages(1);
2140 pool.pagetable[pn] = cast(ubyte)bin;
2142 // Convert page to free list
2143 size_t size = binsize[bin];
2144 List **b = &bucket[bin];
2146 p = pool.baseAddr + pn * PAGESIZE;
2147 ptop = p + PAGESIZE;
2148 for (; p < ptop; p += size)
2150 (cast(List *)p).next = *b;
2158 * Search a range of memory values and mark any pointers into the GC pool.
2160 void mark(void *pbot, void *ptop)
2162 void **p1 = cast(void **)pbot;
2163 void **p2 = cast(void **)ptop;
2167 //printf("marking range: %p -> %p\n", pbot, ptop);
2168 for (; p1 < p2; p1++)
2171 byte *p = cast(byte *)(*p1);
2173 //if (log) debug(PRINTF) printf("\tmark %x\n", p);
2174 if (p >= minAddr && p < maxAddr)
2176 if ((cast(size_t)p & ~(PAGESIZE-1)) == pcache)
2182 size_t offset = cast(size_t)(p - pool.baseAddr);
2184 size_t pn = offset / PAGESIZE;
2185 Bins bin = cast(Bins)pool.pagetable[pn];
2187 //debug(PRINTF) printf("\t\tfound pool %x, base=%x, pn = %d, bin = %d, biti = x%x\n", pool, pool.baseAddr, pn, bin, biti);
2189 // Adjust bit to be at start of allocated memory block
2192 biti = (offset & notbinsize[bin]) >> 4;
2193 //debug(PRINTF) printf("\t\tbiti = x%x\n", biti);
2195 else if (bin == B_PAGEPLUS)
2199 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
2200 biti = pn * (PAGESIZE / 16);
2204 // Don't mark bits in B_FREE or B_UNCOMMITTED pages
2208 if (bin >= B_PAGE) // Cache B_PAGE and B_PAGEPLUS lookups
2209 pcache = cast(size_t)p & ~(PAGESIZE-1);
2211 //debug(PRINTF) printf("\t\tmark(x%x) = %d\n", biti, pool.mark.test(biti));
2212 if (!pool.mark.test(biti))
2214 //if (log) debug(PRINTF) printf("\t\tmarking %x\n", p);
2215 pool.mark.set(biti);
2216 if (!pool.noscan.test(biti))
2218 pool.scan.set(biti);
2221 log_parent(sentinel_add(pool.baseAddr + biti * 16), sentinel_add(pbot));
2226 anychanges |= changes;
2231 * Return number of full pages free'd.
2233 size_t fullcollectshell()
2235 // The purpose of the 'shell' is to ensure all the registers
2236 // get put on the stack so they'll be scanned
2241 __builtin_unwind_init();
2252 result = fullcollect(sp);
2271 size_t fullcollect(void *stackTop)
2276 debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n");
2278 thread_suspendAll();
2284 for (n = 0; n < npools; n++)
2286 pool = pooltable[n];
2289 pool.freebits.zero();
2292 // Mark each free entry, so it doesn't get scanned
2293 for (n = 0; n < B_PAGE; n++)
2295 for (List *list = bucket[n]; list; list = list.next)
2297 pool = findPool(list);
2299 pool.freebits.set(cast(size_t)(cast(byte*)list - pool.baseAddr) / 16);
2303 for (n = 0; n < npools; n++)
2305 pool = pooltable[n];
2306 pool.mark.copy(&pool.freebits);
2309 version (MULTI_THREADED)
2313 // Scan stacks and registers for each paused thread
2314 thread_scanAll( &mark, stackTop );
2321 // Scan stack for main thread
2322 debug(PRINTF) printf(" scan stack bot = %x, top = %x\n", stackTop, stackBottom);
2323 version (STACKGROWSDOWN)
2324 mark(stackTop, stackBottom);
2326 mark(stackBottom, stackTop);
2331 debug(COLLECT_PRINTF) printf("scan roots[]\n");
2332 mark(roots, roots + nroots);
2335 debug(COLLECT_PRINTF) printf("scan ranges[]\n");
2337 for (n = 0; n < nranges; n++)
2339 debug(COLLECT_PRINTF) printf("\t%x .. %x\n", ranges[n].pbot, ranges[n].ptop);
2340 mark(ranges[n].pbot, ranges[n].ptop);
2344 debug(COLLECT_PRINTF) printf("\tscan heap\n");
2348 for (n = 0; n < npools; n++)
2354 pool = pooltable[n];
2356 bbase = pool.scan.base();
2357 btop = bbase + pool.scan.nwords;
2358 for (b = bbase; b < btop;)
2372 o = pool.baseAddr + (b - bbase) * 32 * 16;
2373 if (!(bitm & 0xFFFF))
2378 for (; bitm; o += 16, bitm >>= 1)
2383 pn = cast(size_t)(o - pool.baseAddr) / PAGESIZE;
2384 bin = cast(Bins)pool.pagetable[pn];
2387 mark(o, o + binsize[bin]);
2389 else if (bin == B_PAGE || bin == B_PAGEPLUS)
2391 if (bin == B_PAGEPLUS)
2393 while (pool.pagetable[pn - 1] != B_PAGE)
2397 while (pn + u < pool.ncommitted && pool.pagetable[pn + u] == B_PAGEPLUS)
2399 mark(o, o + u * PAGESIZE);
2408 // Free up everything not marked
2409 debug(COLLECT_PRINTF) printf("\tfree'ing\n");
2410 size_t freedpages = 0;
2412 for (n = 0; n < npools; n++)
2417 pool = pooltable[n];
2418 bbase = pool.mark.base();
2419 ncommitted = pool.ncommitted;
2420 for (pn = 0; pn < ncommitted; pn++, bbase += PAGESIZE / (32 * 16))
2422 Bins bin = cast(Bins)pool.pagetable[pn];
2429 auto size = binsize[bin];
2431 p = pool.baseAddr + pn * PAGESIZE;
2432 ptop = p + PAGESIZE;
2433 biti = pn * (PAGESIZE/16);
2434 bitstride = size / 16;
2436 version(none) // BUG: doesn't work because freebits() must also be cleared
2438 // If free'd entire page
2439 if (bbase[0] == 0 && bbase[1] == 0 && bbase[2] == 0 && bbase[3] == 0 &&
2440 bbase[4] == 0 && bbase[5] == 0 && bbase[6] == 0 && bbase[7] == 0)
2442 for (; p < ptop; p += size, biti += bitstride)
2444 if (pool.finals.nbits && pool.finals.testClear(biti))
2445 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2446 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
2448 List *list = cast(List *)p;
2449 //debug(PRINTF) printf("\tcollecting %x\n", list);
2450 log_free(sentinel_add(list));
2452 debug (MEMSTOMP) memset(p, 0xF3, size);
2454 pool.pagetable[pn] = B_FREE;
2456 //debug(PRINTF) printf("freeing entire page %d\n", pn);
2460 for (; p < ptop; p += size, biti += bitstride)
2462 if (!pool.mark.test(biti))
2464 sentinel_Invariant(sentinel_add(p));
2466 pool.freebits.set(biti);
2467 if (pool.finals.nbits && pool.finals.testClear(biti))
2468 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2469 clrBits(pool, biti, BlkAttr.ALL_BITS);
2471 List *list = cast(List *)p;
2472 debug(PRINTF) printf("\tcollecting %x\n", list);
2473 log_free(sentinel_add(list));
2475 debug (MEMSTOMP) memset(p, 0xF3, size);
2481 else if (bin == B_PAGE)
2482 { size_t biti = pn * (PAGESIZE / 16);
2484 if (!pool.mark.test(biti))
2485 { byte *p = pool.baseAddr + pn * PAGESIZE;
2487 sentinel_Invariant(sentinel_add(p));
2488 if (pool.finals.nbits && pool.finals.testClear(biti))
2489 rt_finalize(sentinel_add(p), false/*noStack > 0*/);
2490 clrBits(pool, biti, BlkAttr.ALL_BITS);
2492 debug(COLLECT_PRINTF) printf("\tcollecting big %x\n", p);
2493 log_free(sentinel_add(p));
2494 pool.pagetable[pn] = B_FREE;
2496 debug (MEMSTOMP) memset(p, 0xF3, PAGESIZE);
2497 while (pn + 1 < ncommitted && pool.pagetable[pn + 1] == B_PAGEPLUS)
2500 pool.pagetable[pn] = B_FREE;
2505 memset(p, 0xF3, PAGESIZE);
2516 // Free complete pages, rebuild free list
2517 debug(COLLECT_PRINTF) printf("\tfree complete pages\n");
2518 size_t recoveredpages = 0;
2519 for (n = 0; n < npools; n++)
2523 pool = pooltable[n];
2524 ncommitted = pool.ncommitted;
2525 for (pn = 0; pn < ncommitted; pn++)
2527 Bins bin = cast(Bins)pool.pagetable[pn];
2533 size_t size = binsize[bin];
2534 size_t bitstride = size / 16;
2535 size_t bitbase = pn * (PAGESIZE / 16);
2536 size_t bittop = bitbase + (PAGESIZE / 16);
2540 for (biti = bitbase; biti < bittop; biti += bitstride)
2541 { if (!pool.freebits.test(biti))
2544 pool.pagetable[pn] = B_FREE;
2549 p = pool.baseAddr + pn * PAGESIZE;
2550 for (u = 0; u < PAGESIZE; u += size)
2551 { biti = bitbase + u / 16;
2552 if (pool.freebits.test(biti))
2555 list = cast(List *)(p + u);
2556 if (list.next != bucket[bin]) // avoid unnecessary writes
2557 list.next = bucket[bin];
2565 debug(COLLECT_PRINTF) printf("recovered pages = %d\n", recoveredpages);
2566 debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedpages, npools);
2568 return freedpages + recoveredpages;
2575 uint getBits(Pool* pool, size_t biti)
2584 if (pool.finals.nbits &&
2585 pool.finals.test(biti))
2586 bits |= BlkAttr.FINALIZE;
2587 if (pool.noscan.test(biti))
2588 bits |= BlkAttr.NO_SCAN;
2589 // if (pool.nomove.nbits &&
2590 // pool.nomove.test(biti))
2591 // bits |= BlkAttr.NO_MOVE;
2599 void setBits(Pool* pool, size_t biti, uint mask)
2606 if (mask & BlkAttr.FINALIZE)
2608 if (!pool.finals.nbits)
2609 pool.finals.alloc(pool.mark.nbits);
2610 pool.finals.set(biti);
2612 if (mask & BlkAttr.NO_SCAN)
2614 pool.noscan.set(biti);
2616 // if (mask & BlkAttr.NO_MOVE)
2618 // if (!pool.nomove.nbits)
2619 // pool.nomove.alloc(pool.mark.nbits);
2620 // pool.nomove.set(biti);
2628 void clrBits(Pool* pool, size_t biti, uint mask)
2635 if (mask & BlkAttr.FINALIZE && pool.finals.nbits)
2636 pool.finals.clear(biti);
2637 if (mask & BlkAttr.NO_SCAN)
2638 pool.noscan.clear(biti);
2639 // if (mask & BlkAttr.NO_MOVE && pool.nomove.nbits)
2640 // pool.nomove.clear(biti);
2644 /***** Leak Detector ******/
2655 //debug(PRINTF) printf("+log_init()\n");
2656 current.reserve(1000);
2658 //debug(PRINTF) printf("-log_init()\n");
2662 void log_malloc(void *p, size_t size)
2664 //debug(PRINTF) printf("+log_malloc(p = %x, size = %d)\n", p, size);
2677 //debug(PRINTF) printf("-log_malloc()\n");
2681 void log_free(void *p)
2683 //debug(PRINTF) printf("+log_free(%x)\n", p);
2686 i = current.find(p);
2689 debug(PRINTF) printf("free'ing unallocated memory %x\n", p);
2693 //debug(PRINTF) printf("-log_free()\n");
2699 //debug(PRINTF) printf("+log_collect()\n");
2700 // Print everything in current that is not in prev
2702 debug(PRINTF) printf("New pointers this cycle: --------------------------------\n");
2704 for (size_t i = 0; i < current.dim; i++)
2708 j = prev.find(current.data[i].p);
2710 current.data[i].print();
2715 debug(PRINTF) printf("All roots this cycle: --------------------------------\n");
2716 for (size_t i = 0; i < current.dim; i++)
2721 p = current.data[i].p;
2722 if (!findPool(current.data[i].parent))
2724 j = prev.find(current.data[i].p);
2726 debug(PRINTF) printf("N");
2728 debug(PRINTF) printf(" ");;
2729 current.data[i].print();
2733 debug(PRINTF) printf("Used = %d-------------------------------------------------\n", used);
2734 prev.copy(¤t);
2736 debug(PRINTF) printf("-log_collect()\n");
2740 void log_parent(void *p, void *parent)
2742 //debug(PRINTF) printf("+log_parent()\n");
2745 i = current.find(p);
2748 debug(PRINTF) printf("parent'ing unallocated memory %x, parent = %x\n", p, parent);
2752 size_t offset = cast(size_t)(p - pool.baseAddr);
2754 size_t pn = offset / PAGESIZE;
2755 Bins bin = cast(Bins)pool.pagetable[pn];
2756 biti = (offset & notbinsize[bin]);
2757 debug(PRINTF) printf("\tbin = %d, offset = x%x, biti = x%x\n", bin, offset, biti);
2761 current.data[i].parent = parent;
2763 //debug(PRINTF) printf("-log_parent()\n");
2770 void log_malloc(void *p, size_t size) { }
2771 void log_free(void *p) { }
2772 void log_collect() { }
2773 void log_parent(void *p, void *parent) { }
2778 /* ============================ Pool =============================== */
2785 GCBits mark; // entries already scanned, or should not be scanned
2786 GCBits scan; // entries that need to be scanned
2787 GCBits freebits; // entries that are on the free list
2788 GCBits finals; // entries that need finalizer run on them
2789 GCBits noscan; // entries that should not be scanned
2792 size_t ncommitted; // ncommitted <= npages
2796 void initialize(size_t npages)
2800 //debug(PRINTF) printf("Pool::Pool(%u)\n", npages);
2801 poolsize = npages * PAGESIZE;
2802 assert(poolsize >= POOLSIZE);
2803 baseAddr = cast(byte *)os_mem_map(poolsize);
2805 // Some of the code depends on page alignment of memory pools
2806 assert((cast(size_t)baseAddr & (PAGESIZE - 1)) == 0);
2810 //debug(PRINTF) printf("GC fail: poolsize = x%x, errno = %d\n", poolsize, errno);
2811 //debug(PRINTF) printf("message = '%s'\n", sys_errlist[errno]);
2817 topAddr = baseAddr + poolsize;
2819 mark.alloc(cast(size_t)poolsize / 16);
2820 scan.alloc(cast(size_t)poolsize / 16);
2821 freebits.alloc(cast(size_t)poolsize / 16);
2822 noscan.alloc(cast(size_t)poolsize / 16);
2824 pagetable = cast(ubyte*)cstdlib.malloc(npages);
2826 onOutOfMemoryError();
2827 memset(pagetable, B_UNCOMMITTED, npages);
2829 this.npages = npages;
2842 result = os_mem_decommit(baseAddr, 0, ncommitted * PAGESIZE);
2843 assert(result == 0);
2849 result = os_mem_unmap(baseAddr, npages * PAGESIZE);
2850 assert(result == 0);
2858 cstdlib.free(pagetable);
2868 void Invariant() { }
2875 //freebits.Invariant();
2876 //finals.Invariant();
2877 //noscan.Invariant();
2881 //if (baseAddr + npages * PAGESIZE != topAddr)
2882 //printf("baseAddr = %p, npages = %d, topAddr = %p\n", baseAddr, npages, topAddr);
2883 assert(baseAddr + npages * PAGESIZE == topAddr);
2884 assert(ncommitted <= npages);
2887 for (size_t i = 0; i < npages; i++)
2888 { Bins bin = cast(Bins)pagetable[i];
2890 assert(bin < B_MAX);
2896 * Allocate n pages from Pool.
2897 * Returns OPFAIL on failure.
2899 size_t allocPages(size_t n)
2904 //debug(PRINTF) printf("Pool::allocPages(n = %d)\n", n);
2906 for (i = 0; i < ncommitted; i++)
2908 if (pagetable[i] == B_FREE)
2911 { //debug(PRINTF) printf("\texisting pn = %d\n", i - n + 1);
2918 return extendPages(n);
2922 * Extend Pool by n pages.
2923 * Returns OPFAIL on failure.
2925 size_t extendPages(size_t n)
2927 //debug(PRINTF) printf("Pool::extendPages(n = %d)\n", n);
2928 if (ncommitted + n <= npages)
2932 tocommit = (n + (COMMITSIZE/PAGESIZE) - 1) & ~(COMMITSIZE/PAGESIZE - 1);
2933 if (ncommitted + tocommit > npages)
2934 tocommit = npages - ncommitted;
2935 //debug(PRINTF) printf("\tlooking to commit %d more pages\n", tocommit);
2937 if (os_mem_commit(baseAddr, ncommitted * PAGESIZE, tocommit * PAGESIZE) == 0)
2939 memset(pagetable + ncommitted, B_FREE, tocommit);
2940 auto i = ncommitted;
2941 ncommitted += tocommit;
2943 while (i && pagetable[i - 1] == B_FREE)
2948 //debug(PRINTF) printf("\tfailed to commit %d pages\n", tocommit);
2956 * Free npages pages starting with pagenum.
2958 void freePages(size_t pagenum, size_t npages)
2960 memset(&pagetable[pagenum], B_FREE, npages);
2965 * Used for sorting pooltable[]
2969 if (baseAddr < p2.baseAddr)
2972 return cast(int)(baseAddr > p2.baseAddr);
2977 /* ============================ SENTINEL =============================== */
2982 const size_t SENTINEL_PRE = cast(size_t) 0xF4F4F4F4F4F4F4F4UL; // 32 or 64 bits
2983 const ubyte SENTINEL_POST = 0xF5; // 8 bits
2984 const uint SENTINEL_EXTRA = 2 * size_t.sizeof + 1;
2987 size_t* sentinel_size(void *p) { return &(cast(size_t *)p)[-2]; }
2988 size_t* sentinel_pre(void *p) { return &(cast(size_t *)p)[-1]; }
2989 ubyte* sentinel_post(void *p) { return &(cast(ubyte *)p)[*sentinel_size(p)]; }
2992 void sentinel_init(void *p, size_t size)
2994 *sentinel_size(p) = size;
2995 *sentinel_pre(p) = SENTINEL_PRE;
2996 *sentinel_post(p) = SENTINEL_POST;
3000 void sentinel_Invariant(void *p)
3002 assert(*sentinel_pre(p) == SENTINEL_PRE);
3003 assert(*sentinel_post(p) == SENTINEL_POST);
3007 void *sentinel_add(void *p)
3009 return p + 2 * size_t.sizeof;
3013 void *sentinel_sub(void *p)
3015 return p - 2 * size_t.sizeof;
3020 const uint SENTINEL_EXTRA = 0;
3023 void sentinel_init(void *p, size_t size)
3028 void sentinel_Invariant(void *p)
3033 void *sentinel_add(void *p)
3039 void *sentinel_sub(void *p)