2 * This module contains the garbage collector implementation.
4 * Copyright: Copyright (C) 2001-2007 Digital Mars, www.digitalmars.com.
7 * This software is provided 'as-is', without any express or implied
8 * warranty. In no event will the authors be held liable for any damages
9 * arising from the use of this software.
11 * Permission is granted to anyone to use this software for any purpose,
12 * including commercial applications, and to alter it and redistribute it
13 * freely, in both source and binary form, subject to the following
16 * o The origin of this software must not be misrepresented; you must not
17 * claim that you wrote the original software. If you use this software
18 * in a product, an acknowledgment in the product documentation would be
19 * appreciated but is not required.
20 * o Altered source versions must be plainly marked as such, and must not
21 * be misrepresented as being the original software.
22 * o This notice may not be removed or altered from any source
24 * Authors: Walter Bright, David Friedman, Sean Kelly
29 // D Programming Language Garbage Collector implementation
31 /************** Debugging ***************************/
33 //debug = PRINTF; // turn on printf's
34 //debug = COLLECT_PRINTF; // turn on printf's
35 //debug = LOGGING; // log allocations / frees
36 //debug = MEMSTOMP; // stomp on memory
37 //debug = SENTINEL; // add underrun/overrrun protection
38 //debug = PTRCHECK; // more pointer checking
39 //debug = PTRCHECK2; // thorough but slow pointer checking
41 /*************** Configuration *********************/
43 version = STACKGROWSDOWN; // growing the stack means subtracting from the stack pointer
44 // (use for Intel X86 CPUs)
45 // else growing the stack means adding to the stack pointer
46 version = MULTI_THREADED; // produce multithreaded version
48 /***************************************************/
54 import cstdlib = tango.stdc.stdlib : calloc, free, malloc, realloc;
55 import cstring = tango.stdc.string : memcpy, memmove, memset;
57 debug import tango.stdc.stdio : printf;
61 // BUG: The following import will likely not work, since the gcc
62 // subdirectory is elsewhere. Instead, perhaps the functions
63 // could be declared directly or some other resolution could
65 import gcc.builtins; // for __builtin_unwind_init
80 FINALIZE = 0b0000_0001,
81 NO_SCAN = 0b0000_0010,
82 NO_MOVE = 0b0000_0100,
83 ALL_BITS = 0b1111_1111
86 extern (C) void* rt_stackBottom();
87 extern (C) void* rt_stackTop();
89 extern (C) void rt_finalize( void* p, bool det = true );
91 alias void delegate( void*, void* ) scanFn;
93 extern (C) void rt_scanStaticData( scanFn scan );
95 version (MULTI_THREADED)
97 extern (C) bool thread_needLock();
98 extern (C) void thread_suspendAll();
99 extern (C) void thread_resumeAll();
101 extern (C) void thread_scanAll( scanFn fn, void* curStackTop = null );
104 extern (C) void onOutOfMemoryError();
108 OPFAIL = ~cast(size_t)0
116 /* ======================= Leak Detector =========================== */
131 printf(" p = %x, size = %d, parent = %x ", p, size, parent);
134 printf("%s(%u)", file, line);
154 void reserve(size_t nentries)
156 assert(dim <= allocdim);
157 if (allocdim - dim < nentries)
159 allocdim = (dim + nentries) * 2;
160 assert(dim + nentries <= allocdim);
163 data = cast(Log*)cstdlib.malloc(allocdim * Log.sizeof);
164 if (!data && allocdim)
165 onOutOfMemoryError();
170 newdata = cast(Log*)cstdlib.malloc(allocdim * Log.sizeof);
171 if (!newdata && allocdim)
172 onOutOfMemoryError();
173 cstring.memcpy(newdata, data, dim * Log.sizeof);
187 void remove(size_t i)
189 cstring.memmove(data + i, data + i + 1, (dim - i) * Log.sizeof);
196 for (size_t i = 0; i < dim; i++)
201 return OPFAIL; // not found
205 void copy(LogArray *from)
207 reserve(from.dim - dim);
208 assert(from.dim <= allocdim);
209 cstring.memcpy(data, from.data, from.dim * Log.sizeof);
216 /* ============================ GC =============================== */
219 class GCLock { } // just a dummy so we can get a global lock
222 const uint GCVERSION = 1; // increment every time we change interface
227 // For passing to debug code
231 uint gcversion = GCVERSION;
233 Gcx *gcx; // implementation
234 static ClassInfo gcLock; // global lock
239 gcLock = GCLock.classinfo;
240 gcx = cast(Gcx*)cstdlib.calloc(1, Gcx.sizeof);
242 onOutOfMemoryError();
244 setStackBottom(rt_stackBottom());
264 if (!thread_needLock())
266 assert(gcx.disabled > 0);
269 else synchronized (gcLock)
271 assert(gcx.disabled > 0);
282 if (!thread_needLock())
286 else synchronized (gcLock)
296 uint getAttr(void* p)
305 Pool* pool = gcx.findPool(p);
310 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
312 oldb = gcx.getBits(pool, biti);
317 if (!thread_needLock())
321 else synchronized (gcLock)
331 uint setAttr(void* p, uint mask)
340 Pool* pool = gcx.findPool(p);
345 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
347 oldb = gcx.getBits(pool, biti);
348 gcx.setBits(pool, biti, mask);
353 if (!thread_needLock())
357 else synchronized (gcLock)
367 uint clrAttr(void* p, uint mask)
376 Pool* pool = gcx.findPool(p);
381 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
383 oldb = gcx.getBits(pool, biti);
384 gcx.clrBits(pool, biti, mask);
389 if (!thread_needLock())
393 else synchronized (gcLock)
403 void *malloc(size_t size, uint bits = 0)
410 if (!thread_needLock())
412 return mallocNoSync(size, bits);
414 else synchronized (gcLock)
416 return mallocNoSync(size, bits);
424 private void *mallocNoSync(size_t size, uint bits = 0)
431 //debug(PRINTF) printf("GC::malloc(size = %d, gcx = %p)\n", size, gcx);
434 size += SENTINEL_EXTRA;
437 // Cache previous binsize lookup - Dave Fladebo.
438 static size_t lastsize = -1;
440 if (size == lastsize)
444 bin = gcx.findBin(size);
454 if (!gcx.allocPage(bin) && !gcx.disabled) // try to find a new page
456 if (!thread_needLock())
458 /* Then we haven't locked it yet. Be sure
459 * and lock for a collection, since a finalizer
460 * may start a new thread.
462 synchronized (gcLock)
464 gcx.fullcollectshell();
467 else if (!gcx.fullcollectshell()) // collect to find a new page
472 if (!gcx.bucket[bin] && !gcx.allocPage(bin))
475 gcx.newPool(1); // allocate new pool to find a new page
476 result = gcx.allocPage(bin);
478 onOutOfMemoryError();
483 // Return next item from free list
484 gcx.bucket[bin] = (cast(List*)p).next;
485 if( !(bits & BlkAttr.NO_SCAN) )
486 cstring.memset(p + size, 0, binsize[bin] - size);
487 //debug(PRINTF) printf("\tmalloc => %x\n", p);
488 debug (MEMSTOMP) cstring.memset(p, 0xF0, size);
492 p = gcx.bigAlloc(size);
494 onOutOfMemoryError();
496 size -= SENTINEL_EXTRA;
498 sentinel_init(p, size);
499 gcx.log_malloc(p, size);
503 Pool *pool = gcx.findPool(p);
506 gcx.setBits(pool, cast(size_t)(p - pool.baseAddr) / 16, bits);
515 void *calloc(size_t size, uint bits = 0)
522 if (!thread_needLock())
524 return callocNoSync(size, bits);
526 else synchronized (gcLock)
528 return callocNoSync(size, bits);
536 private void *callocNoSync(size_t size, uint bits = 0)
540 //debug(PRINTF) printf("calloc: %x len %d\n", p, len);
541 void *p = mallocNoSync(size, bits);
542 cstring.memset(p, 0, size);
550 void *realloc(void *p, size_t size, uint bits = 0)
552 if (!thread_needLock())
554 return reallocNoSync(p, size, bits);
556 else synchronized (gcLock)
558 return reallocNoSync(p, size, bits);
566 private void *reallocNoSync(void *p, size_t size, uint bits = 0)
576 p = mallocNoSync(size, bits);
582 //debug(PRINTF) printf("GC::realloc(p = %x, size = %u)\n", p, size);
585 sentinel_Invariant(p);
586 psize = *sentinel_size(p);
591 Pool *pool = gcx.findPool(p);
595 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
599 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
600 gcx.setBits(pool, biti, bits);
604 bits = gcx.getBits(pool, biti);
608 p2 = mallocNoSync(size, bits);
611 //debug(PRINTF) printf("\tcopying %d bytes\n",size);
612 cstring.memcpy(p2, p, size);
618 psize = gcx.findSize(p); // find allocated size
619 if (psize >= PAGESIZE && size >= PAGESIZE)
621 auto psz = psize / PAGESIZE;
622 auto newsz = (size + PAGESIZE - 1) / PAGESIZE;
626 auto pool = gcx.findPool(p);
627 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
631 synchronized (gcLock)
633 debug (MEMSTOMP) cstring.memset(p + size, 0xF2, psize - size);
634 pool.freePages(pagenum + newsz, psz - newsz);
638 else if (pagenum + newsz <= pool.npages)
640 // Attempt to expand in place
641 synchronized (gcLock)
643 for (size_t i = pagenum + psz; 1;)
645 if (i == pagenum + newsz)
647 debug (MEMSTOMP) cstring.memset(p + psize, 0xF0, size - psize);
648 cstring.memset(&pool.pagetable[pagenum + psz], B_PAGEPLUS, newsz - psz);
651 if (i == pool.ncommitted)
653 auto u = pool.extendPages(pagenum + newsz - pool.ncommitted);
659 if (pool.pagetable[i] != B_FREE)
666 if (psize < size || // if new size is bigger
667 psize > size * 2) // or less than half
671 Pool *pool = gcx.findPool(p);
675 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
679 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
680 gcx.setBits(pool, biti, bits);
684 bits = gcx.getBits(pool, biti);
688 p2 = mallocNoSync(size, bits);
691 //debug(PRINTF) printf("\tcopying %d bytes\n",size);
692 cstring.memcpy(p2, p, size);
702 * Attempt to in-place enlarge the memory block pointed to by p by at least
703 * minbytes beyond its current capacity, up to a maximum of maxsize. This
704 * does not attempt to move the memory block (like realloc() does).
707 * 0 if could not extend p,
708 * total size of entire memory block if successful.
710 size_t extend(void* p, size_t minsize, size_t maxsize)
712 if (!thread_needLock())
714 return extendNoSync(p, minsize, maxsize);
716 else synchronized (gcLock)
718 return extendNoSync(p, minsize, maxsize);
726 private size_t extendNoSync(void* p, size_t minsize, size_t maxsize)
729 assert( minsize <= maxsize );
733 //debug(PRINTF) printf("GC::extend(p = %x, minsize = %u, maxsize = %u)\n", p, minsize, maxsize);
738 auto psize = gcx.findSize(p); // find allocated size
739 if (psize < PAGESIZE)
740 return 0; // cannot extend buckets
742 auto psz = psize / PAGESIZE;
743 auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE;
744 auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE;
746 auto pool = gcx.findPool(p);
747 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
750 for (sz = 0; sz < maxsz; sz++)
752 auto i = pagenum + psz + sz;
753 if (i == pool.ncommitted)
755 if (pool.pagetable[i] != B_FREE)
764 else if (pagenum + psz + sz == pool.ncommitted)
766 auto u = pool.extendPages(minsz - sz);
773 debug (MEMSTOMP) cstring.memset(p + psize, 0xF0, (psz + sz) * PAGESIZE - psize);
774 cstring.memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
777 return (psz + sz) * PAGESIZE;
784 size_t reserve(size_t size)
791 if (!thread_needLock())
793 return reserveNoSync(size);
795 else synchronized (gcLock)
797 return reserveNoSync(size);
805 private size_t reserveNoSync(size_t size)
810 return gcx.reserve(size);
824 if (!thread_needLock())
826 return freeNoSync(p);
828 else synchronized (gcLock)
830 return freeNoSync(p);
838 private void freeNoSync(void *p)
847 // Find which page it is in
848 pool = gcx.findPool(p);
849 if (!pool) // if not one of ours
851 sentinel_Invariant(p);
853 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
854 biti = cast(size_t)(p - pool.baseAddr) / 16;
855 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
857 bin = cast(Bins)pool.pagetable[pagenum];
858 if (bin == B_PAGE) // if large alloc
865 while (++n < pool.ncommitted && pool.pagetable[n] == B_PAGEPLUS)
867 debug (MEMSTOMP) cstring.memset(p, 0xF2, npages * PAGESIZE);
868 pool.freePages(pagenum, npages);
871 { // Add to free list
872 List *list = cast(List*)p;
874 debug (MEMSTOMP) cstring.memset(p, 0xF2, binsize[bin]);
876 list.next = gcx.bucket[bin];
877 gcx.bucket[bin] = list;
879 gcx.log_free(sentinel_add(p));
884 * Determine the base address of the block containing p. If p is not a gc
885 * allocated pointer, return null.
887 void* addrOf(void *p)
894 if (!thread_needLock())
896 return addrOfNoSync(p);
898 else synchronized (gcLock)
900 return addrOfNoSync(p);
908 void* addrOfNoSync(void *p)
915 return gcx.findBase(p);
920 * Determine the allocated size of pointer p. If p is an interior pointer
921 * or not a gc allocated pointer, return 0.
923 size_t sizeOf(void *p)
930 if (!thread_needLock())
932 return sizeOfNoSync(p);
934 else synchronized (gcLock)
936 return sizeOfNoSync(p);
944 private size_t sizeOfNoSync(void *p)
951 size_t size = gcx.findSize(p);
953 // Check for interior pointer
955 // 1) size is a power of 2 for less than PAGESIZE values
956 // 2) base of memory pool is aligned on PAGESIZE boundary
957 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
959 return size ? size - SENTINEL_EXTRA : 0;
963 if (p == gcx.p_cache)
964 return gcx.size_cache;
966 size_t size = gcx.findSize(p);
968 // Check for interior pointer
970 // 1) size is a power of 2 for less than PAGESIZE values
971 // 2) base of memory pool is aligned on PAGESIZE boundary
972 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
977 gcx.size_cache = size;
986 * Determine the base address of the block containing p. If p is not a gc
987 * allocated pointer, return null.
989 BlkInfo query(void *p)
997 if (!thread_needLock())
999 return queryNoSync(p);
1001 else synchronized (gcLock)
1003 return queryNoSync(p);
1011 BlkInfo queryNoSync(void *p)
1015 return gcx.getInfo(p);
1020 * Verify that pointer p:
1021 * 1) belongs to this memory pool
1022 * 2) points to the start of an allocated piece of memory
1023 * 3) is not on a free list
1032 if (!thread_needLock())
1036 else synchronized (gcLock)
1046 private void checkNoSync(void *p)
1050 sentinel_Invariant(p);
1058 p = sentinel_sub(p);
1059 pool = gcx.findPool(p);
1061 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1062 bin = cast(Bins)pool.pagetable[pagenum];
1063 assert(bin <= B_PAGE);
1064 size = binsize[bin];
1065 assert((cast(size_t)p & (size - 1)) == 0);
1071 // Check that p is not on a free list
1074 for (list = gcx.bucket[bin]; list; list = list.next)
1076 assert(cast(void*)list != p);
1087 private void setStackBottom(void *p)
1089 version (STACKGROWSDOWN)
1091 //p = (void *)((uint *)p + 4);
1092 if (p > gcx.stackBottom)
1094 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1095 gcx.stackBottom = p;
1100 //p = (void *)((uint *)p - 4);
1101 if (p < gcx.stackBottom)
1103 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1104 gcx.stackBottom = cast(char*)p;
1111 * add p to list of roots
1113 void addRoot(void *p)
1120 if (!thread_needLock())
1124 else synchronized (gcLock)
1132 * remove p from list of roots
1134 void removeRoot(void *p)
1141 if (!thread_needLock())
1145 else synchronized (gcLock)
1153 * add range to scan for roots
1155 void addRange(void *p, size_t sz)
1162 //debug(PRINTF) printf("+GC.addRange(pbot = x%x, ptop = x%x)\n", pbot, ptop);
1163 if (!thread_needLock())
1165 gcx.addRange(p, p + sz);
1167 else synchronized (gcLock)
1169 gcx.addRange(p, p + sz);
1171 //debug(PRINTF) printf("-GC.addRange()\n");
1178 void removeRange(void *p)
1185 if (!thread_needLock())
1189 else synchronized (gcLock)
1197 * do full garbage collection
1201 debug(PRINTF) printf("GC.fullCollect()\n");
1203 if (!thread_needLock())
1205 gcx.fullcollectshell();
1207 else synchronized (gcLock)
1209 gcx.fullcollectshell();
1217 debug(PRINTF) printf("poolsize = %x, usedsize = %x, freelistsize = %x\n",
1218 stats.poolsize, stats.usedsize, stats.freelistsize);
1226 * do full garbage collection ignoring roots
1228 void fullCollectNoStack()
1230 if (!thread_needLock())
1233 gcx.fullcollectshell();
1236 else synchronized (gcLock)
1239 gcx.fullcollectshell();
1246 * minimize free space usage
1250 if (!thread_needLock())
1254 else synchronized (gcLock)
1262 * Retrieve statistics about garbage collection.
1263 * Useful for debugging and tuning.
1265 void getStats(out GCStats stats)
1267 if (!thread_needLock())
1269 getStatsNoSync(stats);
1271 else synchronized (gcLock)
1273 getStatsNoSync(stats);
1281 private void getStatsNoSync(out GCStats stats)
1290 //debug(PRINTF) printf("getStats()\n");
1291 cstring.memset(&stats, 0, GCStats.sizeof);
1293 for (n = 0; n < gcx.npools; n++)
1294 { Pool *pool = gcx.pooltable[n];
1296 psize += pool.ncommitted * PAGESIZE;
1297 for (size_t j = 0; j < pool.ncommitted; j++)
1299 Bins bin = cast(Bins)pool.pagetable[j];
1302 else if (bin == B_PAGE)
1304 else if (bin < B_PAGE)
1309 for (n = 0; n < B_PAGE; n++)
1311 //debug(PRINTF) printf("bin %d\n", n);
1312 for (List *list = gcx.bucket[n]; list; list = list.next)
1314 //debug(PRINTF) printf("\tlist %x\n", list);
1315 flsize += binsize[n];
1319 usize = bsize - flsize;
1321 stats.poolsize = psize;
1322 stats.usedsize = bsize - flsize;
1323 stats.freelistsize = flsize;
1328 /* ============================ Gcx =============================== */
1332 COMMITSIZE = (4096*16),
1333 POOLSIZE = (4096*256),
1347 B_PAGE, // start of large alloc
1348 B_PAGEPLUS, // continuation of large alloc
1349 B_FREE, // free page
1350 B_UNCOMMITTED, // memory not committed for this page
1371 const uint binsize[B_MAX] = [ 16,32,64,128,256,512,1024,2048,4096 ];
1372 const uint notbinsize[B_MAX] = [ ~(16u-1),~(32u-1),~(64u-1),~(128u-1),~(256u-1),
1373 ~(512u-1),~(1024u-1),~(2048u-1),~(4096u-1) ];
1375 /* ============================ Gcx =============================== */
1392 uint noStack; // !=0 means don't scan stack
1393 uint log; // turn on logging
1397 int disabled; // turn off collections if >0
1399 byte *minAddr; // min(baseAddr)
1400 byte *maxAddr; // max(topAddr)
1405 List *bucket[B_MAX]; // free list for each size
1411 (cast(byte*)this)[0 .. Gcx.sizeof] = 0;
1412 stackBottom = cast(char*)&dummy;
1414 //printf("gcx = %p, self = %x\n", this, self);
1423 for (size_t i = 0; i < npools; i++)
1424 { Pool *pool = pooltable[i];
1430 cstdlib.free(pooltable);
1433 cstdlib.free(roots);
1436 cstdlib.free(ranges);
1440 void Invariant() { }
1447 //printf("Gcx.invariant(): this = %p\n", this);
1450 for (i = 0; i < npools; i++)
1451 { Pool *pool = pooltable[i];
1456 assert(minAddr == pool.baseAddr);
1460 assert(pool.opCmp(pooltable[i + 1]) < 0);
1462 else if (i + 1 == npools)
1464 assert(maxAddr == pool.topAddr);
1470 assert(rootdim != 0);
1471 assert(nroots <= rootdim);
1476 assert(rangedim != 0);
1477 assert(nranges <= rangedim);
1479 for (i = 0; i < nranges; i++)
1481 assert(ranges[i].pbot);
1482 assert(ranges[i].ptop);
1483 assert(ranges[i].pbot <= ranges[i].ptop);
1487 for (i = 0; i < B_PAGE; i++)
1489 for (List *list = bucket[i]; list; list = list.next)
1500 void addRoot(void *p)
1502 if (nroots == rootdim)
1504 size_t newdim = rootdim * 2 + 16;
1507 newroots = cast(void**)cstdlib.malloc(newdim * newroots[0].sizeof);
1509 onOutOfMemoryError();
1511 { cstring.memcpy(newroots, roots, nroots * newroots[0].sizeof);
1512 cstdlib.free(roots);
1525 void removeRoot(void *p)
1527 for (size_t i = nroots; i--;)
1532 cstring.memmove(roots + i, roots + i + 1, (nroots - i) * roots[0].sizeof);
1543 void addRange(void *pbot, void *ptop)
1545 debug (PRINTF) printf("%x.Gcx::addRange(%x, %x), nranges = %d\n", this,
1546 pbot, ptop, nranges);
1547 if (nranges == rangedim)
1549 size_t newdim = rangedim * 2 + 16;
1552 newranges = cast(Range*)cstdlib.malloc(newdim * newranges[0].sizeof);
1554 onOutOfMemoryError();
1556 { cstring.memcpy(newranges, ranges, nranges * newranges[0].sizeof);
1557 cstdlib.free(ranges);
1562 ranges[nranges].pbot = pbot;
1563 ranges[nranges].ptop = ptop;
1571 void removeRange(void *pbot)
1573 debug (PRINTF) printf("%x.Gcx.removeRange(%x), nranges = %d\n", this,
1575 for (size_t i = nranges; i--;)
1577 if (ranges[i].pbot == pbot)
1580 cstring.memmove(ranges + i, ranges + i + 1, (nranges - i) * ranges[0].sizeof);
1584 debug(PRINTF) printf("Wrong thread\n");
1586 // This is a fatal error, but ignore it.
1587 // The problem is that we can get a Close() call on a thread
1588 // other than the one the range was allocated on.
1594 * Find Pool that pointer is in.
1595 * Return null if not in a Pool.
1596 * Assume pooltable[] is sorted.
1598 Pool *findPool(void *p)
1600 if (p >= minAddr && p < maxAddr)
1604 return pooltable[0];
1607 for (size_t i = 0; i < npools; i++)
1610 pool = pooltable[i];
1611 if (p < pool.topAddr)
1612 { if (pool.baseAddr <= p)
1623 * Find base address of block containing pointer p.
1624 * Returns null if not a gc'd pointer
1626 void* findBase(void *p)
1633 size_t offset = cast(size_t)(p - pool.baseAddr);
1634 size_t pn = offset / PAGESIZE;
1635 Bins bin = cast(Bins)pool.pagetable[pn];
1637 // Adjust bit to be at start of allocated memory block
1640 return pool.baseAddr + (offset & notbinsize[bin]);
1642 else if (bin == B_PAGEPLUS)
1645 { --pn, offset -= PAGESIZE;
1646 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1648 return pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1652 // we are in a B_FREE or B_UNCOMMITTED page
1661 * Find size of pointer p.
1662 * Returns 0 if not a gc'd pointer
1664 size_t findSize(void *p)
1675 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1676 bin = cast(Bins)pool.pagetable[pagenum];
1677 size = binsize[bin];
1679 { size_t npages = pool.ncommitted;
1683 pt = &pool.pagetable[0];
1684 for (i = pagenum + 1; i < npages; i++)
1686 if (pt[i] != B_PAGEPLUS)
1689 size = (i - pagenum) * PAGESIZE;
1699 BlkInfo getInfo(void* p)
1707 size_t offset = cast(size_t)(p - pool.baseAddr);
1708 size_t pn = offset / PAGESIZE;
1709 Bins bin = cast(Bins)pool.pagetable[pn];
1711 ////////////////////////////////////////////////////////////////////
1713 ////////////////////////////////////////////////////////////////////
1717 info.base = pool.baseAddr + (offset & notbinsize[bin]);
1719 else if (bin == B_PAGEPLUS)
1722 { --pn, offset -= PAGESIZE;
1723 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1725 info.base = pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1727 // fix bin for use by size calc below
1728 bin = cast(Bins)pool.pagetable[pn];
1731 ////////////////////////////////////////////////////////////////////
1733 ////////////////////////////////////////////////////////////////////
1735 info.size = binsize[bin];
1737 { size_t npages = pool.ncommitted;
1741 pt = &pool.pagetable[0];
1742 for (i = pn + 1; i < npages; i++)
1744 if (pt[i] != B_PAGEPLUS)
1747 info.size = (i - pn) * PAGESIZE;
1750 ////////////////////////////////////////////////////////////////////
1752 ////////////////////////////////////////////////////////////////////
1754 info.attr = getBits(pool, cast(size_t)(offset / 16));
1761 * Compute bin for size.
1763 static Bins findBin(size_t size)
1772 else if (size <= 32)
1807 * Allocate a new pool of at least size bytes.
1808 * Sort it into pooltable[].
1809 * Mark all memory in the pool as B_FREE.
1810 * Return the actual number of bytes reserved or 0 on error.
1812 size_t reserve(size_t size)
1814 size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
1815 Pool* pool = newPool(npages);
1817 if (!pool || pool.extendPages(npages) == OPFAIL)
1819 return pool.ncommitted * PAGESIZE;
1824 * Minimizes physical memory usage by returning free pools to the OS.
1833 for (n = 0; n < npools; n++)
1835 pool = pooltable[n];
1836 ncommitted = pool.ncommitted;
1837 for (pn = 0; pn < ncommitted; pn++)
1839 if (cast(Bins)pool.pagetable[pn] != B_FREE)
1842 if (pn < ncommitted)
1849 cstring.memmove(pooltable + n,
1851 (--npools - n) * (Pool*).sizeof);
1852 minAddr = pooltable[0].baseAddr;
1853 maxAddr = pooltable[npools - 1].topAddr;
1859 * Allocate a chunk of memory that is larger than a page.
1860 * Return null if out of memory.
1862 void *bigAlloc(size_t size)
1872 npages = (size + PAGESIZE - 1) / PAGESIZE;
1876 // This code could use some refinement when repeatedly
1877 // allocating very large arrays.
1879 for (n = 0; n < npools; n++)
1881 pool = pooltable[n];
1882 pn = pool.allocPages(npages);
1896 freedpages = fullcollectshell();
1897 if (freedpages >= npools * ((POOLSIZE / PAGESIZE) / 4))
1901 // Release empty pools to prevent bloat
1903 // Allocate new pool
1904 pool = newPool(npages);
1909 pn = pool.allocPages(npages);
1910 assert(pn != OPFAIL);
1913 // Release empty pools to prevent bloat
1915 // Allocate new pool
1916 pool = newPool(npages);
1919 pn = pool.allocPages(npages);
1920 assert(pn != OPFAIL);
1930 pool.pagetable[pn] = B_PAGE;
1932 cstring.memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
1933 p = pool.baseAddr + pn * PAGESIZE;
1934 cstring.memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
1935 debug (MEMSTOMP) cstring.memset(p, 0xF1, size);
1936 //debug(PRINTF) printf("\tp = %x\n", p);
1940 return null; // let mallocNoSync handle the error
1945 * Allocate a new pool with at least npages in it.
1946 * Sort it into pooltable[].
1947 * Return null if failed.
1949 Pool *newPool(size_t npages)
1952 Pool** newpooltable;
1956 //debug(PRINTF) printf("************Gcx::newPool(npages = %d)****************\n", npages);
1958 // Round up to COMMITSIZE pages
1959 npages = (npages + (COMMITSIZE/PAGESIZE) - 1) & ~(COMMITSIZE/PAGESIZE - 1);
1961 // Minimum of POOLSIZE
1962 if (npages < POOLSIZE/PAGESIZE)
1963 npages = POOLSIZE/PAGESIZE;
1964 else if (npages > POOLSIZE/PAGESIZE)
1965 { // Give us 150% of requested size, so there's room to extend
1966 auto n = npages + (npages >> 1);
1967 if (n < size_t.max/PAGESIZE)
1971 // Allocate successively larger pools up to 8 megs
1977 n = 8; // cap pool size at 8 megs
1978 n *= (POOLSIZE / PAGESIZE);
1983 pool = cast(Pool *)cstdlib.calloc(1, Pool.sizeof);
1986 pool.initialize(npages);
1990 newnpools = npools + 1;
1991 newpooltable = cast(Pool **)cstdlib.realloc(pooltable, newnpools * (Pool *).sizeof);
1995 // Sort pool into newpooltable[]
1996 for (i = 0; i < npools; i++)
1998 if (pool.opCmp(newpooltable[i]) < 0)
2001 cstring.memmove(newpooltable + i + 1, newpooltable + i, (npools - i) * (Pool *).sizeof);
2002 newpooltable[i] = pool;
2004 pooltable = newpooltable;
2007 minAddr = pooltable[0].baseAddr;
2008 maxAddr = pooltable[npools - 1].topAddr;
2020 * Allocate a page of bin's.
2024 int allocPage(Bins bin)
2032 //debug(PRINTF) printf("Gcx::allocPage(bin = %d)\n", bin);
2033 for (n = 0; n < npools; n++)
2035 pool = pooltable[n];
2036 pn = pool.allocPages(1);
2043 pool.pagetable[pn] = cast(ubyte)bin;
2045 // Convert page to free list
2046 size_t size = binsize[bin];
2047 List **b = &bucket[bin];
2049 p = pool.baseAddr + pn * PAGESIZE;
2050 ptop = p + PAGESIZE;
2051 for (; p < ptop; p += size)
2053 (cast(List *)p).next = *b;
2061 * Search a range of memory values and mark any pointers into the GC pool.
2063 void mark(void *pbot, void *ptop)
2065 void **p1 = cast(void **)pbot;
2066 void **p2 = cast(void **)ptop;
2070 //printf("marking range: %p -> %p\n", pbot, ptop);
2071 for (; p1 < p2; p1++)
2074 byte *p = cast(byte *)(*p1);
2076 //if (log) debug(PRINTF) printf("\tmark %x\n", p);
2077 if (p >= minAddr && p < maxAddr)
2079 if ((cast(size_t)p & ~(PAGESIZE-1)) == pcache)
2085 size_t offset = cast(size_t)(p - pool.baseAddr);
2087 size_t pn = offset / PAGESIZE;
2088 Bins bin = cast(Bins)pool.pagetable[pn];
2090 //debug(PRINTF) printf("\t\tfound pool %x, base=%x, pn = %d, bin = %d, biti = x%x\n", pool, pool.baseAddr, pn, bin, biti);
2092 // Adjust bit to be at start of allocated memory block
2095 biti = (offset & notbinsize[bin]) >> 4;
2096 //debug(PRINTF) printf("\t\tbiti = x%x\n", biti);
2098 else if (bin == B_PAGEPLUS)
2102 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
2103 biti = pn * (PAGESIZE / 16);
2107 // Don't mark bits in B_FREE or B_UNCOMMITTED pages
2111 if (bin >= B_PAGE) // Cache B_PAGE and B_PAGEPLUS lookups
2112 pcache = cast(size_t)p & ~(PAGESIZE-1);
2114 //debug(PRINTF) printf("\t\tmark(x%x) = %d\n", biti, pool.mark.test(biti));
2115 if (!pool.mark.test(biti))
2117 //if (log) debug(PRINTF) printf("\t\tmarking %x\n", p);
2118 pool.mark.set(biti);
2119 if (!pool.noscan.test(biti))
2121 pool.scan.set(biti);
2124 log_parent(sentinel_add(pool.baseAddr + biti * 16), sentinel_add(pbot));
2129 anychanges |= changes;
2134 * Return number of full pages free'd.
2136 size_t fullcollectshell()
2138 // The purpose of the 'shell' is to ensure all the registers
2139 // get put on the stack so they'll be scanned
2144 __builtin_unwind_init();
2151 uint eax,ecx,edx,ebx,ebp,esi,edi;
2164 else version (X86_64)
2166 ulong rax,rbx,rcx,rdx,rbp,rsi,rdi,rsp,r8,r9,r10,r11,r12,r13,r14,r15;
2169 movq rax[RBP], RAX ;
2170 movq rbx[RBP], RBX ;
2171 movq rcx[RBP], RCX ;
2172 movq rdx[RBP], RDX ;
2173 movq rbp[RBP], RBP ;
2174 movq rsi[RBP], RSI ;
2175 movq rdi[RBP], RDI ;
2176 movq rsp[RBP], RSP ;
2179 movq r10[RBP], R10 ;
2180 movq r11[RBP], R11 ;
2181 movq r12[RBP], R12 ;
2182 movq r13[RBP], R13 ;
2183 movq r14[RBP], R14 ;
2184 movq r15[RBP], R15 ;
2189 static assert( false, "Architecture not supported." );
2200 result = fullcollect(sp);
2223 size_t fullcollect(void *stackTop)
2228 debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n");
2230 thread_suspendAll();
2236 for (n = 0; n < npools; n++)
2238 pool = pooltable[n];
2241 pool.freebits.zero();
2244 // Mark each free entry, so it doesn't get scanned
2245 for (n = 0; n < B_PAGE; n++)
2247 for (List *list = bucket[n]; list; list = list.next)
2249 pool = findPool(list);
2251 pool.freebits.set(cast(size_t)(cast(byte*)list - pool.baseAddr) / 16);
2255 for (n = 0; n < npools; n++)
2257 pool = pooltable[n];
2258 pool.mark.copy(&pool.freebits);
2261 rt_scanStaticData( &mark );
2263 version (MULTI_THREADED)
2267 // Scan stacks and registers for each paused thread
2268 thread_scanAll( &mark, stackTop );
2275 // Scan stack for main thread
2276 debug(PRINTF) printf(" scan stack bot = %x, top = %x\n", stackTop, stackBottom);
2277 version (STACKGROWSDOWN)
2278 mark(stackTop, stackBottom);
2280 mark(stackBottom, stackTop);
2285 debug(COLLECT_PRINTF) printf("scan roots[]\n");
2286 mark(roots, roots + nroots);
2289 debug(COLLECT_PRINTF) printf("scan ranges[]\n");
2291 for (n = 0; n < nranges; n++)
2293 debug(COLLECT_PRINTF) printf("\t%x .. %x\n", ranges[n].pbot, ranges[n].ptop);
2294 mark(ranges[n].pbot, ranges[n].ptop);
2298 debug(COLLECT_PRINTF) printf("\tscan heap\n");
2302 for (n = 0; n < npools; n++)
2308 pool = pooltable[n];
2310 bbase = pool.scan.base();
2311 btop = bbase + pool.scan.nwords;
2312 for (b = bbase; b < btop;)
2326 o = pool.baseAddr + (b - bbase) * 32 * 16;
2327 if (!(bitm & 0xFFFF))
2332 for (; bitm; o += 16, bitm >>= 1)
2337 pn = cast(size_t)(o - pool.baseAddr) / PAGESIZE;
2338 bin = cast(Bins)pool.pagetable[pn];
2341 mark(o, o + binsize[bin]);
2343 else if (bin == B_PAGE || bin == B_PAGEPLUS)
2345 if (bin == B_PAGEPLUS)
2347 while (pool.pagetable[pn - 1] != B_PAGE)
2351 while (pn + u < pool.ncommitted && pool.pagetable[pn + u] == B_PAGEPLUS)
2353 mark(o, o + u * PAGESIZE);
2362 // Free up everything not marked
2363 debug(COLLECT_PRINTF) printf("\tfree'ing\n");
2364 size_t freedpages = 0;
2366 for (n = 0; n < npools; n++)
2371 pool = pooltable[n];
2372 bbase = pool.mark.base();
2373 ncommitted = pool.ncommitted;
2374 for (pn = 0; pn < ncommitted; pn++, bbase += PAGESIZE / (32 * 16))
2376 Bins bin = cast(Bins)pool.pagetable[pn];
2383 auto size = binsize[bin];
2385 p = pool.baseAddr + pn * PAGESIZE;
2386 ptop = p + PAGESIZE;
2387 biti = pn * (PAGESIZE/16);
2388 bitstride = size / 16;
2390 version(none) // BUG: doesn't work because freebits() must also be cleared
2392 // If free'd entire page
2393 if (bbase[0] == 0 && bbase[1] == 0 && bbase[2] == 0 && bbase[3] == 0 &&
2394 bbase[4] == 0 && bbase[5] == 0 && bbase[6] == 0 && bbase[7] == 0)
2396 for (; p < ptop; p += size, biti += bitstride)
2398 if (pool.finals.nbits && pool.finals.testClear(biti))
2399 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2400 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
2402 List *list = cast(List *)p;
2403 //debug(PRINTF) printf("\tcollecting %x\n", list);
2404 log_free(sentinel_add(list));
2406 debug (MEMSTOMP) cstring.memset(p, 0xF3, size);
2408 pool.pagetable[pn] = B_FREE;
2410 //debug(PRINTF) printf("freeing entire page %d\n", pn);
2414 for (; p < ptop; p += size, biti += bitstride)
2416 if (!pool.mark.test(biti))
2418 sentinel_Invariant(sentinel_add(p));
2420 pool.freebits.set(biti);
2421 if (pool.finals.nbits && pool.finals.testClear(biti))
2422 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2423 clrBits(pool, biti, BlkAttr.ALL_BITS);
2425 List *list = cast(List *)p;
2426 debug(PRINTF) printf("\tcollecting %x\n", list);
2427 log_free(sentinel_add(list));
2429 debug (MEMSTOMP) cstring.memset(p, 0xF3, size);
2435 else if (bin == B_PAGE)
2436 { size_t biti = pn * (PAGESIZE / 16);
2438 if (!pool.mark.test(biti))
2439 { byte *p = pool.baseAddr + pn * PAGESIZE;
2441 sentinel_Invariant(sentinel_add(p));
2442 if (pool.finals.nbits && pool.finals.testClear(biti))
2443 rt_finalize(sentinel_add(p), false/*noStack > 0*/);
2444 clrBits(pool, biti, BlkAttr.ALL_BITS);
2446 debug(COLLECT_PRINTF) printf("\tcollecting big %x\n", p);
2447 log_free(sentinel_add(p));
2448 pool.pagetable[pn] = B_FREE;
2450 debug (MEMSTOMP) cstring.memset(p, 0xF3, PAGESIZE);
2451 while (pn + 1 < ncommitted && pool.pagetable[pn + 1] == B_PAGEPLUS)
2454 pool.pagetable[pn] = B_FREE;
2459 cstring.memset(p, 0xF3, PAGESIZE);
2470 // Free complete pages, rebuild free list
2471 debug(COLLECT_PRINTF) printf("\tfree complete pages\n");
2472 size_t recoveredpages = 0;
2473 for (n = 0; n < npools; n++)
2477 pool = pooltable[n];
2478 ncommitted = pool.ncommitted;
2479 for (pn = 0; pn < ncommitted; pn++)
2481 Bins bin = cast(Bins)pool.pagetable[pn];
2487 size_t size = binsize[bin];
2488 size_t bitstride = size / 16;
2489 size_t bitbase = pn * (PAGESIZE / 16);
2490 size_t bittop = bitbase + (PAGESIZE / 16);
2494 for (biti = bitbase; biti < bittop; biti += bitstride)
2495 { if (!pool.freebits.test(biti))
2498 pool.pagetable[pn] = B_FREE;
2503 p = pool.baseAddr + pn * PAGESIZE;
2504 for (u = 0; u < PAGESIZE; u += size)
2505 { biti = bitbase + u / 16;
2506 if (pool.freebits.test(biti))
2509 list = cast(List *)(p + u);
2510 if (list.next != bucket[bin]) // avoid unnecessary writes
2511 list.next = bucket[bin];
2519 debug(COLLECT_PRINTF) printf("recovered pages = %d\n", recoveredpages);
2520 debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedpages, npools);
2522 return freedpages + recoveredpages;
2529 uint getBits(Pool* pool, size_t biti)
2538 if (pool.finals.nbits &&
2539 pool.finals.test(biti))
2540 bits |= BlkAttr.FINALIZE;
2541 if (pool.noscan.test(biti))
2542 bits |= BlkAttr.NO_SCAN;
2543 // if (pool.nomove.nbits &&
2544 // pool.nomove.test(biti))
2545 // bits |= BlkAttr.NO_MOVE;
2553 void setBits(Pool* pool, size_t biti, uint mask)
2560 if (mask & BlkAttr.FINALIZE)
2562 if (!pool.finals.nbits)
2563 pool.finals.alloc(pool.mark.nbits);
2564 pool.finals.set(biti);
2566 if (mask & BlkAttr.NO_SCAN)
2568 pool.noscan.set(biti);
2570 // if (mask & BlkAttr.NO_MOVE)
2572 // if (!pool.nomove.nbits)
2573 // pool.nomove.alloc(pool.mark.nbits);
2574 // pool.nomove.set(biti);
2582 void clrBits(Pool* pool, size_t biti, uint mask)
2589 if (mask & BlkAttr.FINALIZE && pool.finals.nbits)
2590 pool.finals.clear(biti);
2591 if (mask & BlkAttr.NO_SCAN)
2592 pool.noscan.clear(biti);
2593 // if (mask & BlkAttr.NO_MOVE && pool.nomove.nbits)
2594 // pool.nomove.clear(biti);
2598 /***** Leak Detector ******/
2609 //debug(PRINTF) printf("+log_init()\n");
2610 current.reserve(1000);
2612 //debug(PRINTF) printf("-log_init()\n");
2616 void log_malloc(void *p, size_t size)
2618 //debug(PRINTF) printf("+log_malloc(p = %x, size = %d)\n", p, size);
2631 //debug(PRINTF) printf("-log_malloc()\n");
2635 void log_free(void *p)
2637 //debug(PRINTF) printf("+log_free(%x)\n", p);
2640 i = current.find(p);
2643 debug(PRINTF) printf("free'ing unallocated memory %x\n", p);
2647 //debug(PRINTF) printf("-log_free()\n");
2653 //debug(PRINTF) printf("+log_collect()\n");
2654 // Print everything in current that is not in prev
2656 debug(PRINTF) printf("New pointers this cycle: --------------------------------\n");
2658 for (size_t i = 0; i < current.dim; i++)
2662 j = prev.find(current.data[i].p);
2664 current.data[i].print();
2669 debug(PRINTF) printf("All roots this cycle: --------------------------------\n");
2670 for (size_t i = 0; i < current.dim; i++)
2675 p = current.data[i].p;
2676 if (!findPool(current.data[i].parent))
2678 j = prev.find(current.data[i].p);
2680 debug(PRINTF) printf("N");
2682 debug(PRINTF) printf(" ");;
2683 current.data[i].print();
2687 debug(PRINTF) printf("Used = %d-------------------------------------------------\n", used);
2688 prev.copy(¤t);
2690 debug(PRINTF) printf("-log_collect()\n");
2694 void log_parent(void *p, void *parent)
2696 //debug(PRINTF) printf("+log_parent()\n");
2699 i = current.find(p);
2702 debug(PRINTF) printf("parent'ing unallocated memory %x, parent = %x\n", p, parent);
2706 size_t offset = cast(size_t)(p - pool.baseAddr);
2708 size_t pn = offset / PAGESIZE;
2709 Bins bin = cast(Bins)pool.pagetable[pn];
2710 biti = (offset & notbinsize[bin]);
2711 debug(PRINTF) printf("\tbin = %d, offset = x%x, biti = x%x\n", bin, offset, biti);
2715 current.data[i].parent = parent;
2717 //debug(PRINTF) printf("-log_parent()\n");
2724 void log_malloc(void *p, size_t size) { }
2725 void log_free(void *p) { }
2726 void log_collect() { }
2727 void log_parent(void *p, void *parent) { }
2732 /* ============================ Pool =============================== */
2739 GCBits mark; // entries already scanned, or should not be scanned
2740 GCBits scan; // entries that need to be scanned
2741 GCBits freebits; // entries that are on the free list
2742 GCBits finals; // entries that need finalizer run on them
2743 GCBits noscan; // entries that should not be scanned
2746 size_t ncommitted; // ncommitted <= npages
2750 void initialize(size_t npages)
2754 //debug(PRINTF) printf("Pool::Pool(%u)\n", npages);
2755 poolsize = npages * PAGESIZE;
2756 assert(poolsize >= POOLSIZE);
2757 baseAddr = cast(byte *)os_mem_map(poolsize);
2759 // Some of the code depends on page alignment of memory pools
2760 assert((cast(size_t)baseAddr & (PAGESIZE - 1)) == 0);
2764 //debug(PRINTF) printf("GC fail: poolsize = x%x, errno = %d\n", poolsize, errno);
2765 //debug(PRINTF) printf("message = '%s'\n", sys_errlist[errno]);
2771 topAddr = baseAddr + poolsize;
2773 mark.alloc(cast(size_t)poolsize / 16);
2774 scan.alloc(cast(size_t)poolsize / 16);
2775 freebits.alloc(cast(size_t)poolsize / 16);
2776 noscan.alloc(cast(size_t)poolsize / 16);
2778 pagetable = cast(ubyte*)cstdlib.malloc(npages);
2780 onOutOfMemoryError();
2781 cstring.memset(pagetable, B_UNCOMMITTED, npages);
2783 this.npages = npages;
2796 result = os_mem_decommit(baseAddr, 0, ncommitted * PAGESIZE);
2797 assert(result == 0);
2803 result = os_mem_unmap(baseAddr, npages * PAGESIZE);
2804 assert(result == 0);
2812 cstdlib.free(pagetable);
2822 void Invariant() { }
2829 //freebits.Invariant();
2830 //finals.Invariant();
2831 //noscan.Invariant();
2835 //if (baseAddr + npages * PAGESIZE != topAddr)
2836 //printf("baseAddr = %p, npages = %d, topAddr = %p\n", baseAddr, npages, topAddr);
2837 assert(baseAddr + npages * PAGESIZE == topAddr);
2838 assert(ncommitted <= npages);
2841 for (size_t i = 0; i < npages; i++)
2842 { Bins bin = cast(Bins)pagetable[i];
2844 assert(bin < B_MAX);
2850 * Allocate n pages from Pool.
2851 * Returns OPFAIL on failure.
2853 size_t allocPages(size_t n)
2858 //debug(PRINTF) printf("Pool::allocPages(n = %d)\n", n);
2860 for (i = 0; i < ncommitted; i++)
2862 if (pagetable[i] == B_FREE)
2865 { //debug(PRINTF) printf("\texisting pn = %d\n", i - n + 1);
2872 return extendPages(n);
2876 * Extend Pool by n pages.
2877 * Returns OPFAIL on failure.
2879 size_t extendPages(size_t n)
2881 //debug(PRINTF) printf("Pool::extendPages(n = %d)\n", n);
2882 if (ncommitted + n <= npages)
2886 tocommit = (n + (COMMITSIZE/PAGESIZE) - 1) & ~(COMMITSIZE/PAGESIZE - 1);
2887 if (ncommitted + tocommit > npages)
2888 tocommit = npages - ncommitted;
2889 //debug(PRINTF) printf("\tlooking to commit %d more pages\n", tocommit);
2891 if (os_mem_commit(baseAddr, ncommitted * PAGESIZE, tocommit * PAGESIZE) == 0)
2893 cstring.memset(pagetable + ncommitted, B_FREE, tocommit);
2894 auto i = ncommitted;
2895 ncommitted += tocommit;
2897 while (i && pagetable[i - 1] == B_FREE)
2902 //debug(PRINTF) printf("\tfailed to commit %d pages\n", tocommit);
2910 * Free npages pages starting with pagenum.
2912 void freePages(size_t pagenum, size_t npages)
2914 cstring.memset(&pagetable[pagenum], B_FREE, npages);
2919 * Used for sorting pooltable[]
2923 if (baseAddr < p2.baseAddr)
2926 return cast(int)(baseAddr > p2.baseAddr);
2931 /* ============================ SENTINEL =============================== */
2936 const size_t SENTINEL_PRE = cast(size_t) 0xF4F4F4F4F4F4F4F4UL; // 32 or 64 bits
2937 const ubyte SENTINEL_POST = 0xF5; // 8 bits
2938 const uint SENTINEL_EXTRA = 2 * size_t.sizeof + 1;
2941 size_t* sentinel_size(void *p) { return &(cast(size_t *)p)[-2]; }
2942 size_t* sentinel_pre(void *p) { return &(cast(size_t *)p)[-1]; }
2943 ubyte* sentinel_post(void *p) { return &(cast(ubyte *)p)[*sentinel_size(p)]; }
2946 void sentinel_init(void *p, size_t size)
2948 *sentinel_size(p) = size;
2949 *sentinel_pre(p) = SENTINEL_PRE;
2950 *sentinel_post(p) = SENTINEL_POST;
2954 void sentinel_Invariant(void *p)
2956 assert(*sentinel_pre(p) == SENTINEL_PRE);
2957 assert(*sentinel_post(p) == SENTINEL_POST);
2961 void *sentinel_add(void *p)
2963 return p + 2 * size_t.sizeof;
2967 void *sentinel_sub(void *p)
2969 return p - 2 * size_t.sizeof;
2974 const uint SENTINEL_EXTRA = 0;
2977 void sentinel_init(void *p, size_t size)
2982 void sentinel_Invariant(void *p)
2987 void *sentinel_add(void *p)
2993 void *sentinel_sub(void *p)