2 * This module contains the garbage collector implementation.
4 * Copyright: Copyright (C) 2001-2007 Digital Mars, www.digitalmars.com.
7 * This software is provided 'as-is', without any express or implied
8 * warranty. In no event will the authors be held liable for any damages
9 * arising from the use of this software.
11 * Permission is granted to anyone to use this software for any purpose,
12 * including commercial applications, and to alter it and redistribute it
13 * freely, in both source and binary form, subject to the following
16 * o The origin of this software must not be misrepresented; you must not
17 * claim that you wrote the original software. If you use this software
18 * in a product, an acknowledgment in the product documentation would be
19 * appreciated but is not required.
20 * o Altered source versions must be plainly marked as such, and must not
21 * be misrepresented as being the original software.
22 * o This notice may not be removed or altered from any source
24 * Authors: Walter Bright, David Friedman, Sean Kelly
29 // D Programming Language Garbage Collector implementation
31 /************** Debugging ***************************/
33 //debug = PRINTF; // turn on printf's
34 //debug = COLLECT_PRINTF; // turn on printf's
35 //debug = THREADINVARIANT; // check thread integrity
36 //debug = LOGGING; // log allocations / frees
37 //debug = MEMSTOMP; // stomp on memory
38 //debug = SENTINEL; // add underrun/overrrun protection
39 //debug = PTRCHECK; // more pointer checking
40 //debug = PTRCHECK2; // thorough but slow pointer checking
42 /*************** Configuration *********************/
44 version = STACKGROWSDOWN; // growing the stack means subtracting from the stack pointer
45 // (use for Intel X86 CPUs)
46 // else growing the stack means adding to the stack pointer
47 version = MULTI_THREADED; // produce multithreaded version
49 /***************************************************/
51 private import gcbits;
52 private import gcstats;
53 private import gcalloc;
55 private import cstdlib = stdc.stdlib : calloc, free, malloc, realloc;
56 private import stdc.string;
58 debug private import stdc.stdio;
62 // BUG: The following import will likely not work, since the gcc
63 // subdirectory is elsewhere. Instead, perhaps the functions
64 // could be declared directly or some other resolution could
66 private import gcc.builtins; // for __builtin_unwind_init
74 FINALIZE = 0b0000_0001,
75 NO_SCAN = 0b0000_0010,
76 NO_MOVE = 0b0000_0100,
77 ALL_BITS = 0b1111_1111
87 extern (C) void* rt_stackBottom();
88 extern (C) void* rt_stackTop();
89 extern (C) void* rt_staticDataBottom();
90 extern (C) void* rt_staticDataTop();
92 extern (C) void rt_finalize( void* p, bool det = true );
94 alias void delegate( void*, void* ) scanFn;
96 extern (C) void rt_scanStaticData( scanFn scan );
98 version (MULTI_THREADED)
100 extern (C) bool thread_needLock();
101 extern (C) void thread_suspendAll();
102 extern (C) void thread_resumeAll();
104 extern (C) void thread_scanAll( scanFn fn, void* curStackTop = null );
107 extern (C) void onOutOfMemoryError();
111 OPFAIL = ~cast(size_t)0
119 /* ======================= Leak Detector =========================== */
134 printf(" p = %x, size = %d, parent = %x ", p, size, parent);
137 printf("%s(%u)", file, line);
157 void reserve(size_t nentries)
159 assert(dim <= allocdim);
160 if (allocdim - dim < nentries)
162 allocdim = (dim + nentries) * 2;
163 assert(dim + nentries <= allocdim);
166 data = cast(Log*)cstdlib.malloc(allocdim * Log.sizeof);
167 if (!data && allocdim)
168 onOutOfMemoryError();
173 newdata = cast(Log*)cstdlib.malloc(allocdim * Log.sizeof);
174 if (!newdata && allocdim)
175 onOutOfMemoryError();
176 memcpy(newdata, data, dim * Log.sizeof);
190 void remove(size_t i)
192 memmove(data + i, data + i + 1, (dim - i) * Log.sizeof);
199 for (size_t i = 0; i < dim; i++)
204 return OPFAIL; // not found
208 void copy(LogArray *from)
210 reserve(from.dim - dim);
211 assert(from.dim <= allocdim);
212 memcpy(data, from.data, from.dim * Log.sizeof);
219 /* ============================ GC =============================== */
222 class GCLock { } // just a dummy so we can get a global lock
225 const uint GCVERSION = 1; // increment every time we change interface
230 // For passing to debug code
234 uint gcversion = GCVERSION;
236 Gcx *gcx; // implementation
237 static ClassInfo gcLock; // global lock
242 gcLock = GCLock.classinfo;
243 gcx = cast(Gcx*)cstdlib.calloc(1, Gcx.sizeof);
245 onOutOfMemoryError();
247 setStackBottom(rt_stackBottom());
255 //debug(PRINTF) printf("Thread %x ", pthread_self());
256 //debug(PRINTF) printf("GC.Dtor()\n");
272 gcx.thread_Invariant();
282 if (!thread_needLock())
284 assert(gcx.disabled > 0);
287 else synchronized (gcLock)
289 assert(gcx.disabled > 0);
300 if (!thread_needLock())
304 else synchronized (gcLock)
314 uint getAttr(void* p)
323 Pool* pool = gcx.findPool(p);
328 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
330 oldb = gcx.getBits(pool, biti);
335 if (!thread_needLock())
339 else synchronized (gcLock)
349 uint setAttr(void* p, uint mask)
358 Pool* pool = gcx.findPool(p);
363 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
365 oldb = gcx.getBits(pool, biti);
366 gcx.setBits(pool, biti, mask);
371 if (!thread_needLock())
375 else synchronized (gcLock)
385 uint clrAttr(void* p, uint mask)
394 Pool* pool = gcx.findPool(p);
399 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
401 oldb = gcx.getBits(pool, biti);
402 gcx.clrBits(pool, biti, mask);
407 if (!thread_needLock())
411 else synchronized (gcLock)
421 void *malloc(size_t size, uint bits = 0)
428 if (!thread_needLock())
430 return mallocNoSync(size, bits);
432 else synchronized (gcLock)
434 return mallocNoSync(size, bits);
442 private void *mallocNoSync(size_t size, uint bits = 0)
449 //debug(PRINTF) printf("GC::malloc(size = %d, gcx = %p)\n", size, gcx);
451 //debug(PRINTF) printf("gcx.self = %x, pthread_self() = %x\n", gcx.self, pthread_self());
453 size += SENTINEL_EXTRA;
456 // Cache previous binsize lookup - Dave Fladebo.
457 static size_t lastsize = -1;
459 if (size == lastsize)
463 bin = gcx.findBin(size);
473 if (!gcx.allocPage(bin) && !gcx.disabled) // try to find a new page
475 if (!thread_needLock())
477 /* Then we haven't locked it yet. Be sure
478 * and lock for a collection, since a finalizer
479 * may start a new thread.
481 synchronized (gcLock)
483 gcx.fullcollectshell();
486 else if (!gcx.fullcollectshell()) // collect to find a new page
491 if (!gcx.bucket[bin] && !gcx.allocPage(bin))
494 gcx.newPool(1); // allocate new pool to find a new page
495 result = gcx.allocPage(bin);
497 onOutOfMemoryError();
502 // Return next item from free list
503 gcx.bucket[bin] = (cast(List*)p).next;
504 if( !(bits & BlkAttr.NO_SCAN) )
505 memset(p + size, 0, binsize[bin] - size);
506 //debug(PRINTF) printf("\tmalloc => %x\n", p);
507 debug (MEMSTOMP) memset(p, 0xF0, size);
511 p = gcx.bigAlloc(size);
513 onOutOfMemoryError();
515 size -= SENTINEL_EXTRA;
517 sentinel_init(p, size);
518 gcx.log_malloc(p, size);
522 Pool *pool = gcx.findPool(p);
525 gcx.setBits(pool, cast(size_t)(p - pool.baseAddr) / 16, bits);
534 void *calloc(size_t size, uint bits = 0)
541 if (!thread_needLock())
543 return callocNoSync(size, bits);
545 else synchronized (gcLock)
547 return callocNoSync(size, bits);
555 private void *callocNoSync(size_t size, uint bits = 0)
559 //debug(PRINTF) printf("calloc: %x len %d\n", p, len);
560 void *p = mallocNoSync(size, bits);
569 void *realloc(void *p, size_t size, uint bits = 0)
571 if (!thread_needLock())
573 return reallocNoSync(p, size, bits);
575 else synchronized (gcLock)
577 return reallocNoSync(p, size, bits);
585 private void *reallocNoSync(void *p, size_t size, uint bits = 0)
595 p = mallocNoSync(size, bits);
601 //debug(PRINTF) printf("GC::realloc(p = %x, size = %u)\n", p, size);
604 sentinel_Invariant(p);
605 psize = *sentinel_size(p);
610 Pool *pool = gcx.findPool(p);
614 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
618 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
619 gcx.setBits(pool, biti, bits);
623 bits = gcx.getBits(pool, biti);
627 p2 = mallocNoSync(size, bits);
630 //debug(PRINTF) printf("\tcopying %d bytes\n",size);
637 psize = gcx.findSize(p); // find allocated size
638 if (psize >= PAGESIZE && size >= PAGESIZE)
640 auto psz = psize / PAGESIZE;
641 auto newsz = (size + PAGESIZE - 1) / PAGESIZE;
645 auto pool = gcx.findPool(p);
646 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
650 synchronized (gcLock)
652 debug (MEMSTOMP) memset(p + size, 0xF2, psize - size);
653 pool.freePages(pagenum + newsz, psz - newsz);
657 else if (pagenum + newsz <= pool.npages)
659 // Attempt to expand in place
660 synchronized (gcLock)
662 for (size_t i = pagenum + psz; 1;)
664 if (i == pagenum + newsz)
666 debug (MEMSTOMP) memset(p + psize, 0xF0, size - psize);
667 memset(&pool.pagetable[pagenum + psz], B_PAGEPLUS, newsz - psz);
670 if (i == pool.ncommitted)
672 auto u = pool.extendPages(pagenum + newsz - pool.ncommitted);
678 if (pool.pagetable[i] != B_FREE)
685 if (psize < size || // if new size is bigger
686 psize > size * 2) // or less than half
690 Pool *pool = gcx.findPool(p);
694 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
698 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
699 gcx.setBits(pool, biti, bits);
703 bits = gcx.getBits(pool, biti);
707 p2 = mallocNoSync(size, bits);
710 //debug(PRINTF) printf("\tcopying %d bytes\n",size);
721 * Attempt to in-place enlarge the memory block pointed to by p by at least
722 * minbytes beyond its current capacity, up to a maximum of maxsize. This
723 * does not attempt to move the memory block (like realloc() does).
726 * 0 if could not extend p,
727 * total size of entire memory block if successful.
729 size_t extend(void* p, size_t minsize, size_t maxsize)
731 if (!thread_needLock())
733 return extendNoSync(p, minsize, maxsize);
735 else synchronized (gcLock)
737 return extendNoSync(p, minsize, maxsize);
745 private size_t extendNoSync(void* p, size_t minsize, size_t maxsize)
748 assert( minsize <= maxsize );
752 //debug(PRINTF) printf("GC::extend(p = %x, minsize = %u, maxsize = %u)\n", p, minsize, maxsize);
757 auto psize = gcx.findSize(p); // find allocated size
758 if (psize < PAGESIZE)
759 return 0; // cannot extend buckets
761 auto psz = psize / PAGESIZE;
762 auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE;
763 auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE;
765 auto pool = gcx.findPool(p);
766 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
769 for (sz = 0; sz < maxsz; sz++)
771 auto i = pagenum + psz + sz;
772 if (i == pool.ncommitted)
774 if (pool.pagetable[i] != B_FREE)
783 else if (pagenum + psz + sz == pool.ncommitted)
785 auto u = pool.extendPages(minsz - sz);
792 debug (MEMSTOMP) memset(p + psize, 0xF0, (psz + sz) * PAGESIZE - psize);
793 memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
796 return (psz + sz) * PAGESIZE;
803 size_t reserve(size_t size)
810 if (!thread_needLock())
812 return reserveNoSync(size);
814 else synchronized (gcLock)
816 return reserveNoSync(size);
824 private size_t reserveNoSync(size_t size)
829 return gcx.reserve(size);
843 if (!thread_needLock())
845 return freeNoSync(p);
847 else synchronized (gcLock)
849 return freeNoSync(p);
857 private void freeNoSync(void *p)
866 // Find which page it is in
867 pool = gcx.findPool(p);
868 if (!pool) // if not one of ours
870 sentinel_Invariant(p);
872 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
873 biti = cast(size_t)(p - pool.baseAddr) / 16;
874 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
876 bin = cast(Bins)pool.pagetable[pagenum];
877 if (bin == B_PAGE) // if large alloc
884 while (++n < pool.ncommitted && pool.pagetable[n] == B_PAGEPLUS)
886 debug (MEMSTOMP) memset(p, 0xF2, npages * PAGESIZE);
887 pool.freePages(pagenum, npages);
890 { // Add to free list
891 List *list = cast(List*)p;
893 debug (MEMSTOMP) memset(p, 0xF2, binsize[bin]);
895 list.next = gcx.bucket[bin];
896 gcx.bucket[bin] = list;
898 gcx.log_free(sentinel_add(p));
903 * Determine the base address of the block containing p. If p is not a gc
904 * allocated pointer, return null.
906 void* addrOf(void *p)
913 if (!thread_needLock())
915 return addrOfNoSync(p);
917 else synchronized (gcLock)
919 return addrOfNoSync(p);
927 void* addrOfNoSync(void *p)
934 return gcx.findBase(p);
939 * Determine the allocated size of pointer p. If p is an interior pointer
940 * or not a gc allocated pointer, return 0.
942 size_t sizeOf(void *p)
949 if (!thread_needLock())
951 return sizeOfNoSync(p);
953 else synchronized (gcLock)
955 return sizeOfNoSync(p);
963 private size_t sizeOfNoSync(void *p)
970 size_t size = gcx.findSize(p);
972 // Check for interior pointer
974 // 1) size is a power of 2 for less than PAGESIZE values
975 // 2) base of memory pool is aligned on PAGESIZE boundary
976 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
978 return size ? size - SENTINEL_EXTRA : 0;
982 if (p == gcx.p_cache)
983 return gcx.size_cache;
985 size_t size = gcx.findSize(p);
987 // Check for interior pointer
989 // 1) size is a power of 2 for less than PAGESIZE values
990 // 2) base of memory pool is aligned on PAGESIZE boundary
991 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
996 gcx.size_cache = size;
1005 * Determine the base address of the block containing p. If p is not a gc
1006 * allocated pointer, return null.
1008 BlkInfo query(void *p)
1016 if (!thread_needLock())
1018 return queryNoSync(p);
1020 else synchronized (gcLock)
1022 return queryNoSync(p);
1030 BlkInfo queryNoSync(void *p)
1034 return gcx.getInfo(p);
1039 * Verify that pointer p:
1040 * 1) belongs to this memory pool
1041 * 2) points to the start of an allocated piece of memory
1042 * 3) is not on a free list
1051 if (!thread_needLock())
1055 else synchronized (gcLock)
1065 private void checkNoSync(void *p)
1069 sentinel_Invariant(p);
1077 p = sentinel_sub(p);
1078 pool = gcx.findPool(p);
1080 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1081 bin = cast(Bins)pool.pagetable[pagenum];
1082 assert(bin <= B_PAGE);
1083 size = binsize[bin];
1084 assert((cast(size_t)p & (size - 1)) == 0);
1090 // Check that p is not on a free list
1093 for (list = gcx.bucket[bin]; list; list = list.next)
1095 assert(cast(void*)list != p);
1106 private void setStackBottom(void *p)
1108 version (STACKGROWSDOWN)
1110 //p = (void *)((uint *)p + 4);
1111 if (p > gcx.stackBottom)
1113 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1114 gcx.stackBottom = p;
1119 //p = (void *)((uint *)p - 4);
1120 if (p < gcx.stackBottom)
1122 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1123 gcx.stackBottom = cast(char*)p;
1129 static void scanStaticData(gc_t g)
1131 //debug(PRINTF) printf("+GC.scanStaticData()\n");
1132 auto pbot = rt_staticDataBottom();
1133 auto ptop = rt_staticDataTop();
1134 g.addRange(pbot, ptop - pbot);
1135 //debug(PRINTF) printf("-GC.scanStaticData()\n");
1138 static void unscanStaticData(gc_t g)
1140 auto pbot = rt_staticDataBottom();
1141 g.removeRange(pbot);
1145 * add p to list of roots
1147 void addRoot(void *p)
1154 if (!thread_needLock())
1158 else synchronized (gcLock)
1166 * remove p from list of roots
1168 void removeRoot(void *p)
1175 if (!thread_needLock())
1179 else synchronized (gcLock)
1187 * add range to scan for roots
1189 void addRange(void *p, size_t sz)
1196 //debug(PRINTF) printf("+GC.addRange(pbot = x%x, ptop = x%x)\n", pbot, ptop);
1197 if (!thread_needLock())
1199 gcx.addRange(p, p + sz);
1201 else synchronized (gcLock)
1203 gcx.addRange(p, p + sz);
1205 //debug(PRINTF) printf("-GC.addRange()\n");
1212 void removeRange(void *p)
1219 if (!thread_needLock())
1223 else synchronized (gcLock)
1231 * do full garbage collection
1235 debug(PRINTF) printf("GC.fullCollect()\n");
1237 if (!thread_needLock())
1239 gcx.fullcollectshell();
1241 else synchronized (gcLock)
1243 gcx.fullcollectshell();
1251 debug(PRINTF) printf("poolsize = %x, usedsize = %x, freelistsize = %x\n",
1252 stats.poolsize, stats.usedsize, stats.freelistsize);
1260 * do full garbage collection ignoring roots
1262 void fullCollectNoStack()
1264 if (!thread_needLock())
1267 gcx.fullcollectshell();
1270 else synchronized (gcLock)
1273 gcx.fullcollectshell();
1280 * minimize free space usage
1284 if (!thread_needLock())
1288 else synchronized (gcLock)
1296 * Retrieve statistics about garbage collection.
1297 * Useful for debugging and tuning.
1299 void getStats(out GCStats stats)
1301 if (!thread_needLock())
1303 getStatsNoSync(stats);
1305 else synchronized (gcLock)
1307 getStatsNoSync(stats);
1315 private void getStatsNoSync(out GCStats stats)
1324 //debug(PRINTF) printf("getStats()\n");
1325 memset(&stats, 0, GCStats.sizeof);
1327 for (n = 0; n < gcx.npools; n++)
1328 { Pool *pool = gcx.pooltable[n];
1330 psize += pool.ncommitted * PAGESIZE;
1331 for (size_t j = 0; j < pool.ncommitted; j++)
1333 Bins bin = cast(Bins)pool.pagetable[j];
1336 else if (bin == B_PAGE)
1338 else if (bin < B_PAGE)
1343 for (n = 0; n < B_PAGE; n++)
1345 //debug(PRINTF) printf("bin %d\n", n);
1346 for (List *list = gcx.bucket[n]; list; list = list.next)
1348 //debug(PRINTF) printf("\tlist %x\n", list);
1349 flsize += binsize[n];
1353 usize = bsize - flsize;
1355 stats.poolsize = psize;
1356 stats.usedsize = bsize - flsize;
1357 stats.freelistsize = flsize;
1362 /* ============================ Gcx =============================== */
1366 COMMITSIZE = (4096*16),
1367 POOLSIZE = (4096*256),
1381 B_PAGE, // start of large alloc
1382 B_PAGEPLUS, // continuation of large alloc
1383 B_FREE, // free page
1384 B_UNCOMMITTED, // memory not committed for this page
1405 const uint binsize[B_MAX] = [ 16,32,64,128,256,512,1024,2048,4096 ];
1406 const uint notbinsize[B_MAX] = [ ~(16u-1),~(32u-1),~(64u-1),~(128u-1),~(256u-1),
1407 ~(512u-1),~(1024u-1),~(2048u-1),~(4096u-1) ];
1409 /* ============================ Gcx =============================== */
1414 debug (THREADINVARIANT)
1417 void thread_Invariant()
1419 if (self != pthread_self())
1420 printf("thread_Invariant(): gcx = %x, self = %x, pthread_self() = %x\n", this, self, pthread_self());
1421 assert(self == pthread_self());
1426 void thread_Invariant() { }
1440 uint noStack; // !=0 means don't scan stack
1441 uint log; // turn on logging
1445 int disabled; // turn off collections if >0
1447 byte *minAddr; // min(baseAddr)
1448 byte *maxAddr; // max(topAddr)
1453 List *bucket[B_MAX]; // free list for each size
1459 (cast(byte*)this)[0 .. Gcx.sizeof] = 0;
1460 stackBottom = cast(char*)&dummy;
1462 debug (THREADINVARIANT)
1463 self = pthread_self();
1464 //printf("gcx = %p, self = %x\n", this, self);
1473 for (size_t i = 0; i < npools; i++)
1474 { Pool *pool = pooltable[i];
1480 cstdlib.free(pooltable);
1483 cstdlib.free(roots);
1486 cstdlib.free(ranges);
1490 void Invariant() { }
1497 //printf("Gcx.invariant(): this = %p\n", this);
1500 // Assure we're called on the right thread
1501 debug (THREADINVARIANT) assert(self == pthread_self());
1503 for (i = 0; i < npools; i++)
1504 { Pool *pool = pooltable[i];
1509 assert(minAddr == pool.baseAddr);
1513 assert(pool.opCmp(pooltable[i + 1]) < 0);
1515 else if (i + 1 == npools)
1517 assert(maxAddr == pool.topAddr);
1523 assert(rootdim != 0);
1524 assert(nroots <= rootdim);
1529 assert(rangedim != 0);
1530 assert(nranges <= rangedim);
1532 for (i = 0; i < nranges; i++)
1534 assert(ranges[i].pbot);
1535 assert(ranges[i].ptop);
1536 assert(ranges[i].pbot <= ranges[i].ptop);
1540 for (i = 0; i < B_PAGE; i++)
1542 for (List *list = bucket[i]; list; list = list.next)
1553 void addRoot(void *p)
1555 if (nroots == rootdim)
1557 size_t newdim = rootdim * 2 + 16;
1560 newroots = cast(void**)cstdlib.malloc(newdim * newroots[0].sizeof);
1562 onOutOfMemoryError();
1564 { memcpy(newroots, roots, nroots * newroots[0].sizeof);
1565 cstdlib.free(roots);
1578 void removeRoot(void *p)
1580 for (size_t i = nroots; i--;)
1585 memmove(roots + i, roots + i + 1, (nroots - i) * roots[0].sizeof);
1596 void addRange(void *pbot, void *ptop)
1598 debug(PRINTF) printf("Thread %x ", pthread_self());
1599 debug(PRINTF) printf("%x.Gcx::addRange(%x, %x), nranges = %d\n", this, pbot, ptop, nranges);
1600 if (nranges == rangedim)
1602 size_t newdim = rangedim * 2 + 16;
1605 newranges = cast(Range*)cstdlib.malloc(newdim * newranges[0].sizeof);
1607 onOutOfMemoryError();
1609 { memcpy(newranges, ranges, nranges * newranges[0].sizeof);
1610 cstdlib.free(ranges);
1615 ranges[nranges].pbot = pbot;
1616 ranges[nranges].ptop = ptop;
1624 void removeRange(void *pbot)
1626 debug(PRINTF) printf("Thread %x ", pthread_self());
1627 debug(PRINTF) printf("%x.Gcx.removeRange(%x), nranges = %d\n", this, pbot, nranges);
1628 for (size_t i = nranges; i--;)
1630 if (ranges[i].pbot == pbot)
1633 memmove(ranges + i, ranges + i + 1, (nranges - i) * ranges[0].sizeof);
1637 debug(PRINTF) printf("Wrong thread\n");
1639 // This is a fatal error, but ignore it.
1640 // The problem is that we can get a Close() call on a thread
1641 // other than the one the range was allocated on.
1647 * Find Pool that pointer is in.
1648 * Return null if not in a Pool.
1649 * Assume pooltable[] is sorted.
1651 Pool *findPool(void *p)
1653 if (p >= minAddr && p < maxAddr)
1657 return pooltable[0];
1660 for (size_t i = 0; i < npools; i++)
1663 pool = pooltable[i];
1664 if (p < pool.topAddr)
1665 { if (pool.baseAddr <= p)
1676 * Find base address of block containing pointer p.
1677 * Returns null if not a gc'd pointer
1679 void* findBase(void *p)
1686 size_t offset = cast(size_t)(p - pool.baseAddr);
1687 size_t pn = offset / PAGESIZE;
1688 Bins bin = cast(Bins)pool.pagetable[pn];
1690 // Adjust bit to be at start of allocated memory block
1693 return pool.baseAddr + (offset & notbinsize[bin]);
1695 else if (bin == B_PAGEPLUS)
1698 { --pn, offset -= PAGESIZE;
1699 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1701 return pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1705 // we are in a B_FREE or B_UNCOMMITTED page
1714 * Find size of pointer p.
1715 * Returns 0 if not a gc'd pointer
1717 size_t findSize(void *p)
1728 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1729 bin = cast(Bins)pool.pagetable[pagenum];
1730 size = binsize[bin];
1732 { size_t npages = pool.ncommitted;
1736 pt = &pool.pagetable[0];
1737 for (i = pagenum + 1; i < npages; i++)
1739 if (pt[i] != B_PAGEPLUS)
1742 size = (i - pagenum) * PAGESIZE;
1752 BlkInfo getInfo(void* p)
1760 size_t offset = cast(size_t)(p - pool.baseAddr);
1761 size_t pn = offset / PAGESIZE;
1762 Bins bin = cast(Bins)pool.pagetable[pn];
1764 ////////////////////////////////////////////////////////////////////
1766 ////////////////////////////////////////////////////////////////////
1770 info.base = pool.baseAddr + (offset & notbinsize[bin]);
1772 else if (bin == B_PAGEPLUS)
1775 { --pn, offset -= PAGESIZE;
1776 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1778 info.base = pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1780 // fix bin for use by size calc below
1781 bin = cast(Bins)pool.pagetable[pn];
1784 ////////////////////////////////////////////////////////////////////
1786 ////////////////////////////////////////////////////////////////////
1788 info.size = binsize[bin];
1790 { size_t npages = pool.ncommitted;
1794 pt = &pool.pagetable[0];
1795 for (i = pn + 1; i < npages; i++)
1797 if (pt[i] != B_PAGEPLUS)
1800 info.size = (i - pn) * PAGESIZE;
1803 ////////////////////////////////////////////////////////////////////
1805 ////////////////////////////////////////////////////////////////////
1807 info.attr = getBits(pool, cast(size_t)(offset / 16));
1814 * Compute bin for size.
1816 static Bins findBin(size_t size)
1825 else if (size <= 32)
1860 * Allocate a new pool of at least size bytes.
1861 * Sort it into pooltable[].
1862 * Mark all memory in the pool as B_FREE.
1863 * Return the actual number of bytes reserved or 0 on error.
1865 size_t reserve(size_t size)
1867 size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
1868 Pool* pool = newPool(npages);
1870 if (!pool || pool.extendPages(npages) == OPFAIL)
1872 return pool.ncommitted * PAGESIZE;
1877 * Minimizes physical memory usage by returning free pools to the OS.
1886 for (n = 0; n < npools; n++)
1888 pool = pooltable[n];
1889 ncommitted = pool.ncommitted;
1890 for (pn = 0; pn < ncommitted; pn++)
1892 if (cast(Bins)pool.pagetable[pn] != B_FREE)
1895 if (pn < ncommitted)
1902 memmove(pooltable + n,
1904 (--npools - n) * (Pool*).sizeof);
1905 minAddr = pooltable[0].baseAddr;
1906 maxAddr = pooltable[npools - 1].topAddr;
1912 * Allocate a chunk of memory that is larger than a page.
1913 * Return null if out of memory.
1915 void *bigAlloc(size_t size)
1925 npages = (size + PAGESIZE - 1) / PAGESIZE;
1929 // This code could use some refinement when repeatedly
1930 // allocating very large arrays.
1932 for (n = 0; n < npools; n++)
1934 pool = pooltable[n];
1935 pn = pool.allocPages(npages);
1949 freedpages = fullcollectshell();
1950 if (freedpages >= npools * ((POOLSIZE / PAGESIZE) / 4))
1954 // Release empty pools to prevent bloat
1956 // Allocate new pool
1957 pool = newPool(npages);
1962 pn = pool.allocPages(npages);
1963 assert(pn != OPFAIL);
1966 // Release empty pools to prevent bloat
1968 // Allocate new pool
1969 pool = newPool(npages);
1972 pn = pool.allocPages(npages);
1973 assert(pn != OPFAIL);
1983 pool.pagetable[pn] = B_PAGE;
1985 memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
1986 p = pool.baseAddr + pn * PAGESIZE;
1987 memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
1988 debug (MEMSTOMP) memset(p, 0xF1, size);
1989 //debug(PRINTF) printf("\tp = %x\n", p);
1993 return null; // let mallocNoSync handle the error
1998 * Allocate a new pool with at least npages in it.
1999 * Sort it into pooltable[].
2000 * Return null if failed.
2002 Pool *newPool(size_t npages)
2005 Pool** newpooltable;
2009 //debug(PRINTF) printf("************Gcx::newPool(npages = %d)****************\n", npages);
2011 // Round up to COMMITSIZE pages
2012 npages = (npages + (COMMITSIZE/PAGESIZE) - 1) & ~(COMMITSIZE/PAGESIZE - 1);
2014 // Minimum of POOLSIZE
2015 if (npages < POOLSIZE/PAGESIZE)
2016 npages = POOLSIZE/PAGESIZE;
2017 else if (npages > POOLSIZE/PAGESIZE)
2018 { // Give us 150% of requested size, so there's room to extend
2019 auto n = npages + (npages >> 1);
2020 if (n < size_t.max/PAGESIZE)
2024 // Allocate successively larger pools up to 8 megs
2030 n = 8; // cap pool size at 8 megs
2031 n *= (POOLSIZE / PAGESIZE);
2036 pool = cast(Pool *)cstdlib.calloc(1, Pool.sizeof);
2039 pool.initialize(npages);
2043 newnpools = npools + 1;
2044 newpooltable = cast(Pool **)cstdlib.realloc(pooltable, newnpools * (Pool *).sizeof);
2048 // Sort pool into newpooltable[]
2049 for (i = 0; i < npools; i++)
2051 if (pool.opCmp(newpooltable[i]) < 0)
2054 memmove(newpooltable + i + 1, newpooltable + i, (npools - i) * (Pool *).sizeof);
2055 newpooltable[i] = pool;
2057 pooltable = newpooltable;
2060 minAddr = pooltable[0].baseAddr;
2061 maxAddr = pooltable[npools - 1].topAddr;
2073 * Allocate a page of bin's.
2077 int allocPage(Bins bin)
2085 //debug(PRINTF) printf("Gcx::allocPage(bin = %d)\n", bin);
2086 for (n = 0; n < npools; n++)
2088 pool = pooltable[n];
2089 pn = pool.allocPages(1);
2096 pool.pagetable[pn] = cast(ubyte)bin;
2098 // Convert page to free list
2099 size_t size = binsize[bin];
2100 List **b = &bucket[bin];
2102 p = pool.baseAddr + pn * PAGESIZE;
2103 ptop = p + PAGESIZE;
2104 for (; p < ptop; p += size)
2106 (cast(List *)p).next = *b;
2114 * Search a range of memory values and mark any pointers into the GC pool.
2116 void mark(void *pbot, void *ptop)
2118 void **p1 = cast(void **)pbot;
2119 void **p2 = cast(void **)ptop;
2123 //printf("marking range: %p -> %p\n", pbot, ptop);
2124 for (; p1 < p2; p1++)
2127 byte *p = cast(byte *)(*p1);
2129 //if (log) debug(PRINTF) printf("\tmark %x\n", p);
2130 if (p >= minAddr && p < maxAddr)
2132 if ((cast(size_t)p & ~(PAGESIZE-1)) == pcache)
2138 size_t offset = cast(size_t)(p - pool.baseAddr);
2140 size_t pn = offset / PAGESIZE;
2141 Bins bin = cast(Bins)pool.pagetable[pn];
2143 //debug(PRINTF) printf("\t\tfound pool %x, base=%x, pn = %d, bin = %d, biti = x%x\n", pool, pool.baseAddr, pn, bin, biti);
2145 // Adjust bit to be at start of allocated memory block
2148 biti = (offset & notbinsize[bin]) >> 4;
2149 //debug(PRINTF) printf("\t\tbiti = x%x\n", biti);
2151 else if (bin == B_PAGEPLUS)
2155 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
2156 biti = pn * (PAGESIZE / 16);
2160 // Don't mark bits in B_FREE or B_UNCOMMITTED pages
2164 if (bin >= B_PAGE) // Cache B_PAGE and B_PAGEPLUS lookups
2165 pcache = cast(size_t)p & ~(PAGESIZE-1);
2167 //debug(PRINTF) printf("\t\tmark(x%x) = %d\n", biti, pool.mark.test(biti));
2168 if (!pool.mark.test(biti))
2170 //if (log) debug(PRINTF) printf("\t\tmarking %x\n", p);
2171 pool.mark.set(biti);
2172 if (!pool.noscan.test(biti))
2174 pool.scan.set(biti);
2177 log_parent(sentinel_add(pool.baseAddr + biti * 16), sentinel_add(pbot));
2182 anychanges |= changes;
2187 * Return number of full pages free'd.
2189 size_t fullcollectshell()
2191 // The purpose of the 'shell' is to ensure all the registers
2192 // get put on the stack so they'll be scanned
2197 __builtin_unwind_init();
2208 result = fullcollect(sp);
2227 size_t fullcollect(void *stackTop)
2232 debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n");
2234 thread_suspendAll();
2240 for (n = 0; n < npools; n++)
2242 pool = pooltable[n];
2245 pool.freebits.zero();
2248 // Mark each free entry, so it doesn't get scanned
2249 for (n = 0; n < B_PAGE; n++)
2251 for (List *list = bucket[n]; list; list = list.next)
2253 pool = findPool(list);
2255 pool.freebits.set(cast(size_t)(cast(byte*)list - pool.baseAddr) / 16);
2259 for (n = 0; n < npools; n++)
2261 pool = pooltable[n];
2262 pool.mark.copy(&pool.freebits);
2265 rt_scanStaticData( &mark );
2267 version (MULTI_THREADED)
2271 // Scan stacks and registers for each paused thread
2272 thread_scanAll( &mark, stackTop );
2279 // Scan stack for main thread
2280 debug(PRINTF) printf(" scan stack bot = %x, top = %x\n", stackTop, stackBottom);
2281 version (STACKGROWSDOWN)
2282 mark(stackTop, stackBottom);
2284 mark(stackBottom, stackTop);
2289 debug(COLLECT_PRINTF) printf("scan roots[]\n");
2290 mark(roots, roots + nroots);
2293 debug(COLLECT_PRINTF) printf("scan ranges[]\n");
2295 for (n = 0; n < nranges; n++)
2297 debug(COLLECT_PRINTF) printf("\t%x .. %x\n", ranges[n].pbot, ranges[n].ptop);
2298 mark(ranges[n].pbot, ranges[n].ptop);
2302 debug(COLLECT_PRINTF) printf("\tscan heap\n");
2306 for (n = 0; n < npools; n++)
2312 pool = pooltable[n];
2314 bbase = pool.scan.base();
2315 btop = bbase + pool.scan.nwords;
2316 for (b = bbase; b < btop;)
2330 o = pool.baseAddr + (b - bbase) * 32 * 16;
2331 if (!(bitm & 0xFFFF))
2336 for (; bitm; o += 16, bitm >>= 1)
2341 pn = cast(size_t)(o - pool.baseAddr) / PAGESIZE;
2342 bin = cast(Bins)pool.pagetable[pn];
2345 mark(o, o + binsize[bin]);
2347 else if (bin == B_PAGE || bin == B_PAGEPLUS)
2349 if (bin == B_PAGEPLUS)
2351 while (pool.pagetable[pn - 1] != B_PAGE)
2355 while (pn + u < pool.ncommitted && pool.pagetable[pn + u] == B_PAGEPLUS)
2357 mark(o, o + u * PAGESIZE);
2366 // Free up everything not marked
2367 debug(COLLECT_PRINTF) printf("\tfree'ing\n");
2368 size_t freedpages = 0;
2370 for (n = 0; n < npools; n++)
2375 pool = pooltable[n];
2376 bbase = pool.mark.base();
2377 ncommitted = pool.ncommitted;
2378 for (pn = 0; pn < ncommitted; pn++, bbase += PAGESIZE / (32 * 16))
2380 Bins bin = cast(Bins)pool.pagetable[pn];
2387 auto size = binsize[bin];
2389 p = pool.baseAddr + pn * PAGESIZE;
2390 ptop = p + PAGESIZE;
2391 biti = pn * (PAGESIZE/16);
2392 bitstride = size / 16;
2394 version(none) // BUG: doesn't work because freebits() must also be cleared
2396 // If free'd entire page
2397 if (bbase[0] == 0 && bbase[1] == 0 && bbase[2] == 0 && bbase[3] == 0 &&
2398 bbase[4] == 0 && bbase[5] == 0 && bbase[6] == 0 && bbase[7] == 0)
2400 for (; p < ptop; p += size, biti += bitstride)
2402 if (pool.finals.nbits && pool.finals.testClear(biti))
2403 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2404 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
2406 List *list = cast(List *)p;
2407 //debug(PRINTF) printf("\tcollecting %x\n", list);
2408 log_free(sentinel_add(list));
2410 debug (MEMSTOMP) memset(p, 0xF3, size);
2412 pool.pagetable[pn] = B_FREE;
2414 //debug(PRINTF) printf("freeing entire page %d\n", pn);
2418 for (; p < ptop; p += size, biti += bitstride)
2420 if (!pool.mark.test(biti))
2422 sentinel_Invariant(sentinel_add(p));
2424 pool.freebits.set(biti);
2425 if (pool.finals.nbits && pool.finals.testClear(biti))
2426 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2427 clrBits(pool, biti, BlkAttr.ALL_BITS);
2429 List *list = cast(List *)p;
2430 debug(PRINTF) printf("\tcollecting %x\n", list);
2431 log_free(sentinel_add(list));
2433 debug (MEMSTOMP) memset(p, 0xF3, size);
2439 else if (bin == B_PAGE)
2440 { size_t biti = pn * (PAGESIZE / 16);
2442 if (!pool.mark.test(biti))
2443 { byte *p = pool.baseAddr + pn * PAGESIZE;
2445 sentinel_Invariant(sentinel_add(p));
2446 if (pool.finals.nbits && pool.finals.testClear(biti))
2447 rt_finalize(sentinel_add(p), false/*noStack > 0*/);
2448 clrBits(pool, biti, BlkAttr.ALL_BITS);
2450 debug(COLLECT_PRINTF) printf("\tcollecting big %x\n", p);
2451 log_free(sentinel_add(p));
2452 pool.pagetable[pn] = B_FREE;
2454 debug (MEMSTOMP) memset(p, 0xF3, PAGESIZE);
2455 while (pn + 1 < ncommitted && pool.pagetable[pn + 1] == B_PAGEPLUS)
2458 pool.pagetable[pn] = B_FREE;
2463 memset(p, 0xF3, PAGESIZE);
2474 // Free complete pages, rebuild free list
2475 debug(COLLECT_PRINTF) printf("\tfree complete pages\n");
2476 size_t recoveredpages = 0;
2477 for (n = 0; n < npools; n++)
2481 pool = pooltable[n];
2482 ncommitted = pool.ncommitted;
2483 for (pn = 0; pn < ncommitted; pn++)
2485 Bins bin = cast(Bins)pool.pagetable[pn];
2491 size_t size = binsize[bin];
2492 size_t bitstride = size / 16;
2493 size_t bitbase = pn * (PAGESIZE / 16);
2494 size_t bittop = bitbase + (PAGESIZE / 16);
2498 for (biti = bitbase; biti < bittop; biti += bitstride)
2499 { if (!pool.freebits.test(biti))
2502 pool.pagetable[pn] = B_FREE;
2507 p = pool.baseAddr + pn * PAGESIZE;
2508 for (u = 0; u < PAGESIZE; u += size)
2509 { biti = bitbase + u / 16;
2510 if (pool.freebits.test(biti))
2513 list = cast(List *)(p + u);
2514 if (list.next != bucket[bin]) // avoid unnecessary writes
2515 list.next = bucket[bin];
2523 debug(COLLECT_PRINTF) printf("recovered pages = %d\n", recoveredpages);
2524 debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedpages, npools);
2526 return freedpages + recoveredpages;
2533 uint getBits(Pool* pool, size_t biti)
2542 if (pool.finals.nbits &&
2543 pool.finals.test(biti))
2544 bits |= BlkAttr.FINALIZE;
2545 if (pool.noscan.test(biti))
2546 bits |= BlkAttr.NO_SCAN;
2547 // if (pool.nomove.nbits &&
2548 // pool.nomove.test(biti))
2549 // bits |= BlkAttr.NO_MOVE;
2557 void setBits(Pool* pool, size_t biti, uint mask)
2564 if (mask & BlkAttr.FINALIZE)
2566 if (!pool.finals.nbits)
2567 pool.finals.alloc(pool.mark.nbits);
2568 pool.finals.set(biti);
2570 if (mask & BlkAttr.NO_SCAN)
2572 pool.noscan.set(biti);
2574 // if (mask & BlkAttr.NO_MOVE)
2576 // if (!pool.nomove.nbits)
2577 // pool.nomove.alloc(pool.mark.nbits);
2578 // pool.nomove.set(biti);
2586 void clrBits(Pool* pool, size_t biti, uint mask)
2593 if (mask & BlkAttr.FINALIZE && pool.finals.nbits)
2594 pool.finals.clear(biti);
2595 if (mask & BlkAttr.NO_SCAN)
2596 pool.noscan.clear(biti);
2597 // if (mask & BlkAttr.NO_MOVE && pool.nomove.nbits)
2598 // pool.nomove.clear(biti);
2602 /***** Leak Detector ******/
2613 //debug(PRINTF) printf("+log_init()\n");
2614 current.reserve(1000);
2616 //debug(PRINTF) printf("-log_init()\n");
2620 void log_malloc(void *p, size_t size)
2622 //debug(PRINTF) printf("+log_malloc(p = %x, size = %d)\n", p, size);
2635 //debug(PRINTF) printf("-log_malloc()\n");
2639 void log_free(void *p)
2641 //debug(PRINTF) printf("+log_free(%x)\n", p);
2644 i = current.find(p);
2647 debug(PRINTF) printf("free'ing unallocated memory %x\n", p);
2651 //debug(PRINTF) printf("-log_free()\n");
2657 //debug(PRINTF) printf("+log_collect()\n");
2658 // Print everything in current that is not in prev
2660 debug(PRINTF) printf("New pointers this cycle: --------------------------------\n");
2662 for (size_t i = 0; i < current.dim; i++)
2666 j = prev.find(current.data[i].p);
2668 current.data[i].print();
2673 debug(PRINTF) printf("All roots this cycle: --------------------------------\n");
2674 for (size_t i = 0; i < current.dim; i++)
2679 p = current.data[i].p;
2680 if (!findPool(current.data[i].parent))
2682 j = prev.find(current.data[i].p);
2684 debug(PRINTF) printf("N");
2686 debug(PRINTF) printf(" ");;
2687 current.data[i].print();
2691 debug(PRINTF) printf("Used = %d-------------------------------------------------\n", used);
2692 prev.copy(¤t);
2694 debug(PRINTF) printf("-log_collect()\n");
2698 void log_parent(void *p, void *parent)
2700 //debug(PRINTF) printf("+log_parent()\n");
2703 i = current.find(p);
2706 debug(PRINTF) printf("parent'ing unallocated memory %x, parent = %x\n", p, parent);
2710 size_t offset = cast(size_t)(p - pool.baseAddr);
2712 size_t pn = offset / PAGESIZE;
2713 Bins bin = cast(Bins)pool.pagetable[pn];
2714 biti = (offset & notbinsize[bin]);
2715 debug(PRINTF) printf("\tbin = %d, offset = x%x, biti = x%x\n", bin, offset, biti);
2719 current.data[i].parent = parent;
2721 //debug(PRINTF) printf("-log_parent()\n");
2728 void log_malloc(void *p, size_t size) { }
2729 void log_free(void *p) { }
2730 void log_collect() { }
2731 void log_parent(void *p, void *parent) { }
2736 /* ============================ Pool =============================== */
2743 GCBits mark; // entries already scanned, or should not be scanned
2744 GCBits scan; // entries that need to be scanned
2745 GCBits freebits; // entries that are on the free list
2746 GCBits finals; // entries that need finalizer run on them
2747 GCBits noscan; // entries that should not be scanned
2750 size_t ncommitted; // ncommitted <= npages
2754 void initialize(size_t npages)
2758 //debug(PRINTF) printf("Pool::Pool(%u)\n", npages);
2759 poolsize = npages * PAGESIZE;
2760 assert(poolsize >= POOLSIZE);
2761 baseAddr = cast(byte *)os_mem_map(poolsize);
2763 // Some of the code depends on page alignment of memory pools
2764 assert((cast(size_t)baseAddr & (PAGESIZE - 1)) == 0);
2768 //debug(PRINTF) printf("GC fail: poolsize = x%x, errno = %d\n", poolsize, errno);
2769 //debug(PRINTF) printf("message = '%s'\n", sys_errlist[errno]);
2775 topAddr = baseAddr + poolsize;
2777 mark.alloc(cast(size_t)poolsize / 16);
2778 scan.alloc(cast(size_t)poolsize / 16);
2779 freebits.alloc(cast(size_t)poolsize / 16);
2780 noscan.alloc(cast(size_t)poolsize / 16);
2782 pagetable = cast(ubyte*)cstdlib.malloc(npages);
2784 onOutOfMemoryError();
2785 memset(pagetable, B_UNCOMMITTED, npages);
2787 this.npages = npages;
2800 result = os_mem_decommit(baseAddr, 0, ncommitted * PAGESIZE);
2801 assert(result == 0);
2807 result = os_mem_unmap(baseAddr, npages * PAGESIZE);
2808 assert(result == 0);
2816 cstdlib.free(pagetable);
2826 void Invariant() { }
2833 //freebits.Invariant();
2834 //finals.Invariant();
2835 //noscan.Invariant();
2839 //if (baseAddr + npages * PAGESIZE != topAddr)
2840 //printf("baseAddr = %p, npages = %d, topAddr = %p\n", baseAddr, npages, topAddr);
2841 assert(baseAddr + npages * PAGESIZE == topAddr);
2842 assert(ncommitted <= npages);
2845 for (size_t i = 0; i < npages; i++)
2846 { Bins bin = cast(Bins)pagetable[i];
2848 assert(bin < B_MAX);
2854 * Allocate n pages from Pool.
2855 * Returns OPFAIL on failure.
2857 size_t allocPages(size_t n)
2862 //debug(PRINTF) printf("Pool::allocPages(n = %d)\n", n);
2864 for (i = 0; i < ncommitted; i++)
2866 if (pagetable[i] == B_FREE)
2869 { //debug(PRINTF) printf("\texisting pn = %d\n", i - n + 1);
2876 return extendPages(n);
2880 * Extend Pool by n pages.
2881 * Returns OPFAIL on failure.
2883 size_t extendPages(size_t n)
2885 //debug(PRINTF) printf("Pool::extendPages(n = %d)\n", n);
2886 if (ncommitted + n <= npages)
2890 tocommit = (n + (COMMITSIZE/PAGESIZE) - 1) & ~(COMMITSIZE/PAGESIZE - 1);
2891 if (ncommitted + tocommit > npages)
2892 tocommit = npages - ncommitted;
2893 //debug(PRINTF) printf("\tlooking to commit %d more pages\n", tocommit);
2895 if (os_mem_commit(baseAddr, ncommitted * PAGESIZE, tocommit * PAGESIZE) == 0)
2897 memset(pagetable + ncommitted, B_FREE, tocommit);
2898 auto i = ncommitted;
2899 ncommitted += tocommit;
2901 while (i && pagetable[i - 1] == B_FREE)
2906 //debug(PRINTF) printf("\tfailed to commit %d pages\n", tocommit);
2914 * Free npages pages starting with pagenum.
2916 void freePages(size_t pagenum, size_t npages)
2918 memset(&pagetable[pagenum], B_FREE, npages);
2923 * Used for sorting pooltable[]
2927 if (baseAddr < p2.baseAddr)
2930 return cast(int)(baseAddr > p2.baseAddr);
2935 /* ============================ SENTINEL =============================== */
2940 const size_t SENTINEL_PRE = cast(size_t) 0xF4F4F4F4F4F4F4F4UL; // 32 or 64 bits
2941 const ubyte SENTINEL_POST = 0xF5; // 8 bits
2942 const uint SENTINEL_EXTRA = 2 * size_t.sizeof + 1;
2945 size_t* sentinel_size(void *p) { return &(cast(size_t *)p)[-2]; }
2946 size_t* sentinel_pre(void *p) { return &(cast(size_t *)p)[-1]; }
2947 ubyte* sentinel_post(void *p) { return &(cast(ubyte *)p)[*sentinel_size(p)]; }
2950 void sentinel_init(void *p, size_t size)
2952 *sentinel_size(p) = size;
2953 *sentinel_pre(p) = SENTINEL_PRE;
2954 *sentinel_post(p) = SENTINEL_POST;
2958 void sentinel_Invariant(void *p)
2960 assert(*sentinel_pre(p) == SENTINEL_PRE);
2961 assert(*sentinel_post(p) == SENTINEL_POST);
2965 void *sentinel_add(void *p)
2967 return p + 2 * size_t.sizeof;
2971 void *sentinel_sub(void *p)
2973 return p - 2 * size_t.sizeof;
2978 const uint SENTINEL_EXTRA = 0;
2981 void sentinel_init(void *p, size_t size)
2986 void sentinel_Invariant(void *p)
2991 void *sentinel_add(void *p)
2997 void *sentinel_sub(void *p)