2 * This module contains the garbage collector implementation.
4 * Copyright: Copyright (C) 2001-2007 Digital Mars, www.digitalmars.com.
7 * This software is provided 'as-is', without any express or implied
8 * warranty. In no event will the authors be held liable for any damages
9 * arising from the use of this software.
11 * Permission is granted to anyone to use this software for any purpose,
12 * including commercial applications, and to alter it and redistribute it
13 * freely, in both source and binary form, subject to the following
16 * o The origin of this software must not be misrepresented; you must not
17 * claim that you wrote the original software. If you use this software
18 * in a product, an acknowledgment in the product documentation would be
19 * appreciated but is not required.
20 * o Altered source versions must be plainly marked as such, and must not
21 * be misrepresented as being the original software.
22 * o This notice may not be removed or altered from any source
24 * Authors: Walter Bright, David Friedman, Sean Kelly
27 // D Programming Language Garbage Collector implementation
29 /************** Debugging ***************************/
31 //debug = PRINTF; // turn on printf's
32 //debug = COLLECT_PRINTF; // turn on printf's
33 //debug = THREADINVARIANT; // check thread integrity
34 //debug = LOGGING; // log allocations / frees
35 //debug = MEMSTOMP; // stomp on memory
36 //debug = SENTINEL; // add underrun/overrrun protection
37 //debug = PTRCHECK; // more pointer checking
38 //debug = PTRCHECK2; // thorough but slow pointer checking
40 /*************** Configuration *********************/
42 version = STACKGROWSDOWN; // growing the stack means subtracting from the stack pointer
43 // (use for Intel X86 CPUs)
44 // else growing the stack means adding to the stack pointer
45 version = MULTI_THREADED; // produce multithreaded version
47 /***************************************************/
53 import cstdlib = tango.stdc.stdlib : calloc, free, malloc, realloc;
54 import cstring = tango.stdc.string : memcpy, memmove, memset;
56 debug(THREADINVARIANT) import tango.stdc.posix.pthread;
57 debug(PRINTF) import tango.stdc.posix.pthread : pthread_self, pthread_t;
58 debug import tango.stdc.stdio : printf;
62 // BUG: The following import will likely not work, since the gcc
63 // subdirectory is elsewhere. Instead, perhaps the functions
64 // could be declared directly or some other resolution could
66 import gcc.builtins; // for __builtin_unwind_init
81 FINALIZE = 0b0000_0001,
82 NO_SCAN = 0b0000_0010,
83 NO_MOVE = 0b0000_0100,
84 ALL_BITS = 0b1111_1111
87 extern (C) void* rt_stackBottom();
88 extern (C) void* rt_stackTop();
90 extern (C) void rt_finalize( void* p, bool det = true );
92 alias void delegate( void*, void* ) scanFn;
94 extern (C) void rt_scanStaticData( scanFn scan );
96 version (MULTI_THREADED)
98 extern (C) bool thread_needLock();
99 extern (C) void thread_suspendAll();
100 extern (C) void thread_resumeAll();
102 extern (C) void thread_scanAll( scanFn fn, void* curStackTop = null );
105 extern (C) void onOutOfMemoryError();
109 OPFAIL = ~cast(size_t)0
117 /* ======================= Leak Detector =========================== */
132 printf(" p = %x, size = %d, parent = %x ", p, size, parent);
135 printf("%s(%u)", file, line);
155 void reserve(size_t nentries)
157 assert(dim <= allocdim);
158 if (allocdim - dim < nentries)
160 allocdim = (dim + nentries) * 2;
161 assert(dim + nentries <= allocdim);
164 data = cast(Log*)cstdlib.malloc(allocdim * Log.sizeof);
165 if (!data && allocdim)
166 onOutOfMemoryError();
171 newdata = cast(Log*)cstdlib.malloc(allocdim * Log.sizeof);
172 if (!newdata && allocdim)
173 onOutOfMemoryError();
174 cstring.memcpy(newdata, data, dim * Log.sizeof);
188 void remove(size_t i)
190 cstring.memmove(data + i, data + i + 1, (dim - i) * Log.sizeof);
197 for (size_t i = 0; i < dim; i++)
202 return OPFAIL; // not found
206 void copy(LogArray *from)
208 reserve(from.dim - dim);
209 assert(from.dim <= allocdim);
210 cstring.memcpy(data, from.data, from.dim * Log.sizeof);
217 /* ============================ GC =============================== */
220 class GCLock { } // just a dummy so we can get a global lock
223 const uint GCVERSION = 1; // increment every time we change interface
228 // For passing to debug code
232 uint gcversion = GCVERSION;
234 Gcx *gcx; // implementation
235 static ClassInfo gcLock; // global lock
240 gcLock = GCLock.classinfo;
241 gcx = cast(Gcx*)cstdlib.calloc(1, Gcx.sizeof);
243 onOutOfMemoryError();
245 setStackBottom(rt_stackBottom());
253 //debug(PRINTF) printf("Thread %x ", pthread_self());
254 //debug(PRINTF) printf("GC.Dtor()\n");
270 gcx.thread_Invariant();
280 if (!thread_needLock())
282 assert(gcx.disabled > 0);
285 else synchronized (gcLock)
287 assert(gcx.disabled > 0);
298 if (!thread_needLock())
302 else synchronized (gcLock)
312 uint getAttr(void* p)
321 Pool* pool = gcx.findPool(p);
326 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
328 oldb = gcx.getBits(pool, biti);
333 if (!thread_needLock())
337 else synchronized (gcLock)
347 uint setAttr(void* p, uint mask)
356 Pool* pool = gcx.findPool(p);
361 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
363 oldb = gcx.getBits(pool, biti);
364 gcx.setBits(pool, biti, mask);
369 if (!thread_needLock())
373 else synchronized (gcLock)
383 uint clrAttr(void* p, uint mask)
392 Pool* pool = gcx.findPool(p);
397 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
399 oldb = gcx.getBits(pool, biti);
400 gcx.clrBits(pool, biti, mask);
405 if (!thread_needLock())
409 else synchronized (gcLock)
419 void *malloc(size_t size, uint bits = 0)
426 if (!thread_needLock())
428 return mallocNoSync(size, bits);
430 else synchronized (gcLock)
432 return mallocNoSync(size, bits);
440 private void *mallocNoSync(size_t size, uint bits = 0)
447 //debug(PRINTF) printf("GC::malloc(size = %d, gcx = %p)\n", size, gcx);
449 //debug(PRINTF) printf("gcx.self = %x, pthread_self() = %x\n", gcx.self, pthread_self());
451 size += SENTINEL_EXTRA;
454 // Cache previous binsize lookup - Dave Fladebo.
455 static size_t lastsize = -1;
457 if (size == lastsize)
461 bin = gcx.findBin(size);
471 if (!gcx.allocPage(bin) && !gcx.disabled) // try to find a new page
473 if (!thread_needLock())
475 /* Then we haven't locked it yet. Be sure
476 * and lock for a collection, since a finalizer
477 * may start a new thread.
479 synchronized (gcLock)
481 gcx.fullcollectshell();
484 else if (!gcx.fullcollectshell()) // collect to find a new page
489 if (!gcx.bucket[bin] && !gcx.allocPage(bin))
492 gcx.newPool(1); // allocate new pool to find a new page
493 result = gcx.allocPage(bin);
495 onOutOfMemoryError();
500 // Return next item from free list
501 gcx.bucket[bin] = (cast(List*)p).next;
502 if( !(bits & BlkAttr.NO_SCAN) )
503 cstring.memset(p + size, 0, binsize[bin] - size);
504 //debug(PRINTF) printf("\tmalloc => %x\n", p);
505 debug (MEMSTOMP) cstring.memset(p, 0xF0, size);
509 p = gcx.bigAlloc(size);
511 onOutOfMemoryError();
513 size -= SENTINEL_EXTRA;
515 sentinel_init(p, size);
516 gcx.log_malloc(p, size);
520 Pool *pool = gcx.findPool(p);
523 gcx.setBits(pool, cast(size_t)(p - pool.baseAddr) / 16, bits);
532 void *calloc(size_t size, uint bits = 0)
539 if (!thread_needLock())
541 return callocNoSync(size, bits);
543 else synchronized (gcLock)
545 return callocNoSync(size, bits);
553 private void *callocNoSync(size_t size, uint bits = 0)
557 //debug(PRINTF) printf("calloc: %x len %d\n", p, len);
558 void *p = mallocNoSync(size, bits);
559 cstring.memset(p, 0, size);
567 void *realloc(void *p, size_t size, uint bits = 0)
569 if (!thread_needLock())
571 return reallocNoSync(p, size, bits);
573 else synchronized (gcLock)
575 return reallocNoSync(p, size, bits);
583 private void *reallocNoSync(void *p, size_t size, uint bits = 0)
593 p = mallocNoSync(size, bits);
599 //debug(PRINTF) printf("GC::realloc(p = %x, size = %u)\n", p, size);
602 sentinel_Invariant(p);
603 psize = *sentinel_size(p);
608 Pool *pool = gcx.findPool(p);
612 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
616 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
617 gcx.setBits(pool, biti, bits);
621 bits = gcx.getBits(pool, biti);
625 p2 = mallocNoSync(size, bits);
628 //debug(PRINTF) printf("\tcopying %d bytes\n",size);
629 cstring.memcpy(p2, p, size);
635 psize = gcx.findSize(p); // find allocated size
636 if (psize >= PAGESIZE && size >= PAGESIZE)
638 auto psz = psize / PAGESIZE;
639 auto newsz = (size + PAGESIZE - 1) / PAGESIZE;
643 auto pool = gcx.findPool(p);
644 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
648 synchronized (gcLock)
650 debug (MEMSTOMP) cstring.memset(p + size, 0xF2, psize - size);
651 pool.freePages(pagenum + newsz, psz - newsz);
655 else if (pagenum + newsz <= pool.npages)
657 // Attempt to expand in place
658 synchronized (gcLock)
660 for (size_t i = pagenum + psz; 1;)
662 if (i == pagenum + newsz)
664 debug (MEMSTOMP) cstring.memset(p + psize, 0xF0, size - psize);
665 cstring.memset(&pool.pagetable[pagenum + psz], B_PAGEPLUS, newsz - psz);
668 if (i == pool.ncommitted)
670 auto u = pool.extendPages(pagenum + newsz - pool.ncommitted);
676 if (pool.pagetable[i] != B_FREE)
683 if (psize < size || // if new size is bigger
684 psize > size * 2) // or less than half
688 Pool *pool = gcx.findPool(p);
692 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
696 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
697 gcx.setBits(pool, biti, bits);
701 bits = gcx.getBits(pool, biti);
705 p2 = mallocNoSync(size, bits);
708 //debug(PRINTF) printf("\tcopying %d bytes\n",size);
709 cstring.memcpy(p2, p, size);
719 * Attempt to in-place enlarge the memory block pointed to by p by at least
720 * minbytes beyond its current capacity, up to a maximum of maxsize. This
721 * does not attempt to move the memory block (like realloc() does).
724 * 0 if could not extend p,
725 * total size of entire memory block if successful.
727 size_t extend(void* p, size_t minsize, size_t maxsize)
729 if (!thread_needLock())
731 return extendNoSync(p, minsize, maxsize);
733 else synchronized (gcLock)
735 return extendNoSync(p, minsize, maxsize);
743 private size_t extendNoSync(void* p, size_t minsize, size_t maxsize)
746 assert( minsize <= maxsize );
750 //debug(PRINTF) printf("GC::extend(p = %x, minsize = %u, maxsize = %u)\n", p, minsize, maxsize);
755 auto psize = gcx.findSize(p); // find allocated size
756 if (psize < PAGESIZE)
757 return 0; // cannot extend buckets
759 auto psz = psize / PAGESIZE;
760 auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE;
761 auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE;
763 auto pool = gcx.findPool(p);
764 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
767 for (sz = 0; sz < maxsz; sz++)
769 auto i = pagenum + psz + sz;
770 if (i == pool.ncommitted)
772 if (pool.pagetable[i] != B_FREE)
781 else if (pagenum + psz + sz == pool.ncommitted)
783 auto u = pool.extendPages(minsz - sz);
790 debug (MEMSTOMP) cstring.memset(p + psize, 0xF0, (psz + sz) * PAGESIZE - psize);
791 cstring.memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
794 return (psz + sz) * PAGESIZE;
801 size_t reserve(size_t size)
808 if (!thread_needLock())
810 return reserveNoSync(size);
812 else synchronized (gcLock)
814 return reserveNoSync(size);
822 private size_t reserveNoSync(size_t size)
827 return gcx.reserve(size);
841 if (!thread_needLock())
843 return freeNoSync(p);
845 else synchronized (gcLock)
847 return freeNoSync(p);
855 private void freeNoSync(void *p)
864 // Find which page it is in
865 pool = gcx.findPool(p);
866 if (!pool) // if not one of ours
868 sentinel_Invariant(p);
870 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
871 biti = cast(size_t)(p - pool.baseAddr) / 16;
872 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
874 bin = cast(Bins)pool.pagetable[pagenum];
875 if (bin == B_PAGE) // if large alloc
882 while (++n < pool.ncommitted && pool.pagetable[n] == B_PAGEPLUS)
884 debug (MEMSTOMP) cstring.memset(p, 0xF2, npages * PAGESIZE);
885 pool.freePages(pagenum, npages);
888 { // Add to free list
889 List *list = cast(List*)p;
891 debug (MEMSTOMP) cstring.memset(p, 0xF2, binsize[bin]);
893 list.next = gcx.bucket[bin];
894 gcx.bucket[bin] = list;
896 gcx.log_free(sentinel_add(p));
901 * Determine the base address of the block containing p. If p is not a gc
902 * allocated pointer, return null.
904 void* addrOf(void *p)
911 if (!thread_needLock())
913 return addrOfNoSync(p);
915 else synchronized (gcLock)
917 return addrOfNoSync(p);
925 void* addrOfNoSync(void *p)
932 return gcx.findBase(p);
937 * Determine the allocated size of pointer p. If p is an interior pointer
938 * or not a gc allocated pointer, return 0.
940 size_t sizeOf(void *p)
947 if (!thread_needLock())
949 return sizeOfNoSync(p);
951 else synchronized (gcLock)
953 return sizeOfNoSync(p);
961 private size_t sizeOfNoSync(void *p)
968 size_t size = gcx.findSize(p);
970 // Check for interior pointer
972 // 1) size is a power of 2 for less than PAGESIZE values
973 // 2) base of memory pool is aligned on PAGESIZE boundary
974 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
976 return size ? size - SENTINEL_EXTRA : 0;
980 if (p == gcx.p_cache)
981 return gcx.size_cache;
983 size_t size = gcx.findSize(p);
985 // Check for interior pointer
987 // 1) size is a power of 2 for less than PAGESIZE values
988 // 2) base of memory pool is aligned on PAGESIZE boundary
989 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
994 gcx.size_cache = size;
1003 * Determine the base address of the block containing p. If p is not a gc
1004 * allocated pointer, return null.
1006 BlkInfo query(void *p)
1014 if (!thread_needLock())
1016 return queryNoSync(p);
1018 else synchronized (gcLock)
1020 return queryNoSync(p);
1028 BlkInfo queryNoSync(void *p)
1032 return gcx.getInfo(p);
1037 * Verify that pointer p:
1038 * 1) belongs to this memory pool
1039 * 2) points to the start of an allocated piece of memory
1040 * 3) is not on a free list
1049 if (!thread_needLock())
1053 else synchronized (gcLock)
1063 private void checkNoSync(void *p)
1067 sentinel_Invariant(p);
1075 p = sentinel_sub(p);
1076 pool = gcx.findPool(p);
1078 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1079 bin = cast(Bins)pool.pagetable[pagenum];
1080 assert(bin <= B_PAGE);
1081 size = binsize[bin];
1082 assert((cast(size_t)p & (size - 1)) == 0);
1088 // Check that p is not on a free list
1091 for (list = gcx.bucket[bin]; list; list = list.next)
1093 assert(cast(void*)list != p);
1104 private void setStackBottom(void *p)
1106 version (STACKGROWSDOWN)
1108 //p = (void *)((uint *)p + 4);
1109 if (p > gcx.stackBottom)
1111 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1112 gcx.stackBottom = p;
1117 //p = (void *)((uint *)p - 4);
1118 if (p < gcx.stackBottom)
1120 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1121 gcx.stackBottom = cast(char*)p;
1128 * add p to list of roots
1130 void addRoot(void *p)
1137 if (!thread_needLock())
1141 else synchronized (gcLock)
1149 * remove p from list of roots
1151 void removeRoot(void *p)
1158 if (!thread_needLock())
1162 else synchronized (gcLock)
1170 * add range to scan for roots
1172 void addRange(void *p, size_t sz)
1179 //debug(PRINTF) printf("+GC.addRange(pbot = x%x, ptop = x%x)\n", pbot, ptop);
1180 if (!thread_needLock())
1182 gcx.addRange(p, p + sz);
1184 else synchronized (gcLock)
1186 gcx.addRange(p, p + sz);
1188 //debug(PRINTF) printf("-GC.addRange()\n");
1195 void removeRange(void *p)
1202 if (!thread_needLock())
1206 else synchronized (gcLock)
1214 * do full garbage collection
1218 debug(PRINTF) printf("GC.fullCollect()\n");
1220 if (!thread_needLock())
1222 gcx.fullcollectshell();
1224 else synchronized (gcLock)
1226 gcx.fullcollectshell();
1234 debug(PRINTF) printf("poolsize = %x, usedsize = %x, freelistsize = %x\n",
1235 stats.poolsize, stats.usedsize, stats.freelistsize);
1243 * do full garbage collection ignoring roots
1245 void fullCollectNoStack()
1247 if (!thread_needLock())
1250 gcx.fullcollectshell();
1253 else synchronized (gcLock)
1256 gcx.fullcollectshell();
1263 * minimize free space usage
1267 if (!thread_needLock())
1271 else synchronized (gcLock)
1279 * Retrieve statistics about garbage collection.
1280 * Useful for debugging and tuning.
1282 void getStats(out GCStats stats)
1284 if (!thread_needLock())
1286 getStatsNoSync(stats);
1288 else synchronized (gcLock)
1290 getStatsNoSync(stats);
1298 private void getStatsNoSync(out GCStats stats)
1307 //debug(PRINTF) printf("getStats()\n");
1308 cstring.memset(&stats, 0, GCStats.sizeof);
1310 for (n = 0; n < gcx.npools; n++)
1311 { Pool *pool = gcx.pooltable[n];
1313 psize += pool.ncommitted * PAGESIZE;
1314 for (size_t j = 0; j < pool.ncommitted; j++)
1316 Bins bin = cast(Bins)pool.pagetable[j];
1319 else if (bin == B_PAGE)
1321 else if (bin < B_PAGE)
1326 for (n = 0; n < B_PAGE; n++)
1328 //debug(PRINTF) printf("bin %d\n", n);
1329 for (List *list = gcx.bucket[n]; list; list = list.next)
1331 //debug(PRINTF) printf("\tlist %x\n", list);
1332 flsize += binsize[n];
1336 usize = bsize - flsize;
1338 stats.poolsize = psize;
1339 stats.usedsize = bsize - flsize;
1340 stats.freelistsize = flsize;
1345 /* ============================ Gcx =============================== */
1349 COMMITSIZE = (4096*16),
1350 POOLSIZE = (4096*256),
1364 B_PAGE, // start of large alloc
1365 B_PAGEPLUS, // continuation of large alloc
1366 B_FREE, // free page
1367 B_UNCOMMITTED, // memory not committed for this page
1388 const uint binsize[B_MAX] = [ 16,32,64,128,256,512,1024,2048,4096 ];
1389 const uint notbinsize[B_MAX] = [ ~(16u-1),~(32u-1),~(64u-1),~(128u-1),~(256u-1),
1390 ~(512u-1),~(1024u-1),~(2048u-1),~(4096u-1) ];
1392 /* ============================ Gcx =============================== */
1397 debug (THREADINVARIANT)
1400 void thread_Invariant()
1402 if (self != pthread_self())
1403 printf("thread_Invariant(): gcx = %x, self = %x, pthread_self() = %x\n", this, self, pthread_self());
1404 assert(self == pthread_self());
1409 void thread_Invariant() { }
1423 uint noStack; // !=0 means don't scan stack
1424 uint log; // turn on logging
1428 int disabled; // turn off collections if >0
1430 byte *minAddr; // min(baseAddr)
1431 byte *maxAddr; // max(topAddr)
1436 List *bucket[B_MAX]; // free list for each size
1442 (cast(byte*)this)[0 .. Gcx.sizeof] = 0;
1443 stackBottom = cast(char*)&dummy;
1445 debug (THREADINVARIANT)
1446 self = pthread_self();
1447 //printf("gcx = %p, self = %x\n", this, self);
1456 for (size_t i = 0; i < npools; i++)
1457 { Pool *pool = pooltable[i];
1463 cstdlib.free(pooltable);
1466 cstdlib.free(roots);
1469 cstdlib.free(ranges);
1473 void Invariant() { }
1480 //printf("Gcx.invariant(): this = %p\n", this);
1483 // Assure we're called on the right thread
1484 debug (THREADINVARIANT) assert(self == pthread_self());
1486 for (i = 0; i < npools; i++)
1487 { Pool *pool = pooltable[i];
1492 assert(minAddr == pool.baseAddr);
1496 assert(pool.opCmp(pooltable[i + 1]) < 0);
1498 else if (i + 1 == npools)
1500 assert(maxAddr == pool.topAddr);
1506 assert(rootdim != 0);
1507 assert(nroots <= rootdim);
1512 assert(rangedim != 0);
1513 assert(nranges <= rangedim);
1515 for (i = 0; i < nranges; i++)
1517 assert(ranges[i].pbot);
1518 assert(ranges[i].ptop);
1519 assert(ranges[i].pbot <= ranges[i].ptop);
1523 for (i = 0; i < B_PAGE; i++)
1525 for (List *list = bucket[i]; list; list = list.next)
1536 void addRoot(void *p)
1538 if (nroots == rootdim)
1540 size_t newdim = rootdim * 2 + 16;
1543 newroots = cast(void**)cstdlib.malloc(newdim * newroots[0].sizeof);
1545 onOutOfMemoryError();
1547 { cstring.memcpy(newroots, roots, nroots * newroots[0].sizeof);
1548 cstdlib.free(roots);
1561 void removeRoot(void *p)
1563 for (size_t i = nroots; i--;)
1568 cstring.memmove(roots + i, roots + i + 1, (nroots - i) * roots[0].sizeof);
1579 void addRange(void *pbot, void *ptop)
1581 debug(PRINTF) printf("Thread %x ", pthread_self());
1582 debug(PRINTF) printf("%x.Gcx::addRange(%x, %x), nranges = %d\n", this, pbot, ptop, nranges);
1583 if (nranges == rangedim)
1585 size_t newdim = rangedim * 2 + 16;
1588 newranges = cast(Range*)cstdlib.malloc(newdim * newranges[0].sizeof);
1590 onOutOfMemoryError();
1592 { cstring.memcpy(newranges, ranges, nranges * newranges[0].sizeof);
1593 cstdlib.free(ranges);
1598 ranges[nranges].pbot = pbot;
1599 ranges[nranges].ptop = ptop;
1607 void removeRange(void *pbot)
1609 debug(PRINTF) printf("Thread %x ", pthread_self());
1610 debug(PRINTF) printf("%x.Gcx.removeRange(%x), nranges = %d\n", this, pbot, nranges);
1611 for (size_t i = nranges; i--;)
1613 if (ranges[i].pbot == pbot)
1616 cstring.memmove(ranges + i, ranges + i + 1, (nranges - i) * ranges[0].sizeof);
1620 debug(PRINTF) printf("Wrong thread\n");
1622 // This is a fatal error, but ignore it.
1623 // The problem is that we can get a Close() call on a thread
1624 // other than the one the range was allocated on.
1630 * Find Pool that pointer is in.
1631 * Return null if not in a Pool.
1632 * Assume pooltable[] is sorted.
1634 Pool *findPool(void *p)
1636 if (p >= minAddr && p < maxAddr)
1640 return pooltable[0];
1643 for (size_t i = 0; i < npools; i++)
1646 pool = pooltable[i];
1647 if (p < pool.topAddr)
1648 { if (pool.baseAddr <= p)
1659 * Find base address of block containing pointer p.
1660 * Returns null if not a gc'd pointer
1662 void* findBase(void *p)
1669 size_t offset = cast(size_t)(p - pool.baseAddr);
1670 size_t pn = offset / PAGESIZE;
1671 Bins bin = cast(Bins)pool.pagetable[pn];
1673 // Adjust bit to be at start of allocated memory block
1676 return pool.baseAddr + (offset & notbinsize[bin]);
1678 else if (bin == B_PAGEPLUS)
1681 { --pn, offset -= PAGESIZE;
1682 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1684 return pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1688 // we are in a B_FREE or B_UNCOMMITTED page
1697 * Find size of pointer p.
1698 * Returns 0 if not a gc'd pointer
1700 size_t findSize(void *p)
1711 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1712 bin = cast(Bins)pool.pagetable[pagenum];
1713 size = binsize[bin];
1715 { size_t npages = pool.ncommitted;
1719 pt = &pool.pagetable[0];
1720 for (i = pagenum + 1; i < npages; i++)
1722 if (pt[i] != B_PAGEPLUS)
1725 size = (i - pagenum) * PAGESIZE;
1735 BlkInfo getInfo(void* p)
1743 size_t offset = cast(size_t)(p - pool.baseAddr);
1744 size_t pn = offset / PAGESIZE;
1745 Bins bin = cast(Bins)pool.pagetable[pn];
1747 ////////////////////////////////////////////////////////////////////
1749 ////////////////////////////////////////////////////////////////////
1753 info.base = pool.baseAddr + (offset & notbinsize[bin]);
1755 else if (bin == B_PAGEPLUS)
1758 { --pn, offset -= PAGESIZE;
1759 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1761 info.base = pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1763 // fix bin for use by size calc below
1764 bin = cast(Bins)pool.pagetable[pn];
1767 ////////////////////////////////////////////////////////////////////
1769 ////////////////////////////////////////////////////////////////////
1771 info.size = binsize[bin];
1773 { size_t npages = pool.ncommitted;
1777 pt = &pool.pagetable[0];
1778 for (i = pn + 1; i < npages; i++)
1780 if (pt[i] != B_PAGEPLUS)
1783 info.size = (i - pn) * PAGESIZE;
1786 ////////////////////////////////////////////////////////////////////
1788 ////////////////////////////////////////////////////////////////////
1790 info.attr = getBits(pool, cast(size_t)(offset / 16));
1797 * Compute bin for size.
1799 static Bins findBin(size_t size)
1808 else if (size <= 32)
1843 * Allocate a new pool of at least size bytes.
1844 * Sort it into pooltable[].
1845 * Mark all memory in the pool as B_FREE.
1846 * Return the actual number of bytes reserved or 0 on error.
1848 size_t reserve(size_t size)
1850 size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
1851 Pool* pool = newPool(npages);
1853 if (!pool || pool.extendPages(npages) == OPFAIL)
1855 return pool.ncommitted * PAGESIZE;
1860 * Minimizes physical memory usage by returning free pools to the OS.
1869 for (n = 0; n < npools; n++)
1871 pool = pooltable[n];
1872 ncommitted = pool.ncommitted;
1873 for (pn = 0; pn < ncommitted; pn++)
1875 if (cast(Bins)pool.pagetable[pn] != B_FREE)
1878 if (pn < ncommitted)
1885 cstring.memmove(pooltable + n,
1887 (--npools - n) * (Pool*).sizeof);
1888 minAddr = pooltable[0].baseAddr;
1889 maxAddr = pooltable[npools - 1].topAddr;
1895 * Allocate a chunk of memory that is larger than a page.
1896 * Return null if out of memory.
1898 void *bigAlloc(size_t size)
1908 npages = (size + PAGESIZE - 1) / PAGESIZE;
1912 // This code could use some refinement when repeatedly
1913 // allocating very large arrays.
1915 for (n = 0; n < npools; n++)
1917 pool = pooltable[n];
1918 pn = pool.allocPages(npages);
1932 freedpages = fullcollectshell();
1933 if (freedpages >= npools * ((POOLSIZE / PAGESIZE) / 4))
1937 // Release empty pools to prevent bloat
1939 // Allocate new pool
1940 pool = newPool(npages);
1945 pn = pool.allocPages(npages);
1946 assert(pn != OPFAIL);
1949 // Release empty pools to prevent bloat
1951 // Allocate new pool
1952 pool = newPool(npages);
1955 pn = pool.allocPages(npages);
1956 assert(pn != OPFAIL);
1966 pool.pagetable[pn] = B_PAGE;
1968 cstring.memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
1969 p = pool.baseAddr + pn * PAGESIZE;
1970 cstring.memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
1971 debug (MEMSTOMP) cstring.memset(p, 0xF1, size);
1972 //debug(PRINTF) printf("\tp = %x\n", p);
1976 return null; // let mallocNoSync handle the error
1981 * Allocate a new pool with at least npages in it.
1982 * Sort it into pooltable[].
1983 * Return null if failed.
1985 Pool *newPool(size_t npages)
1988 Pool** newpooltable;
1992 //debug(PRINTF) printf("************Gcx::newPool(npages = %d)****************\n", npages);
1994 // Round up to COMMITSIZE pages
1995 npages = (npages + (COMMITSIZE/PAGESIZE) - 1) & ~(COMMITSIZE/PAGESIZE - 1);
1997 // Minimum of POOLSIZE
1998 if (npages < POOLSIZE/PAGESIZE)
1999 npages = POOLSIZE/PAGESIZE;
2000 else if (npages > POOLSIZE/PAGESIZE)
2001 { // Give us 150% of requested size, so there's room to extend
2002 auto n = npages + (npages >> 1);
2003 if (n < size_t.max/PAGESIZE)
2007 // Allocate successively larger pools up to 8 megs
2013 n = 8; // cap pool size at 8 megs
2014 n *= (POOLSIZE / PAGESIZE);
2019 pool = cast(Pool *)cstdlib.calloc(1, Pool.sizeof);
2022 pool.initialize(npages);
2026 newnpools = npools + 1;
2027 newpooltable = cast(Pool **)cstdlib.realloc(pooltable, newnpools * (Pool *).sizeof);
2031 // Sort pool into newpooltable[]
2032 for (i = 0; i < npools; i++)
2034 if (pool.opCmp(newpooltable[i]) < 0)
2037 cstring.memmove(newpooltable + i + 1, newpooltable + i, (npools - i) * (Pool *).sizeof);
2038 newpooltable[i] = pool;
2040 pooltable = newpooltable;
2043 minAddr = pooltable[0].baseAddr;
2044 maxAddr = pooltable[npools - 1].topAddr;
2056 * Allocate a page of bin's.
2060 int allocPage(Bins bin)
2068 //debug(PRINTF) printf("Gcx::allocPage(bin = %d)\n", bin);
2069 for (n = 0; n < npools; n++)
2071 pool = pooltable[n];
2072 pn = pool.allocPages(1);
2079 pool.pagetable[pn] = cast(ubyte)bin;
2081 // Convert page to free list
2082 size_t size = binsize[bin];
2083 List **b = &bucket[bin];
2085 p = pool.baseAddr + pn * PAGESIZE;
2086 ptop = p + PAGESIZE;
2087 for (; p < ptop; p += size)
2089 (cast(List *)p).next = *b;
2097 * Search a range of memory values and mark any pointers into the GC pool.
2099 void mark(void *pbot, void *ptop)
2101 void **p1 = cast(void **)pbot;
2102 void **p2 = cast(void **)ptop;
2106 //printf("marking range: %p -> %p\n", pbot, ptop);
2107 for (; p1 < p2; p1++)
2110 byte *p = cast(byte *)(*p1);
2112 //if (log) debug(PRINTF) printf("\tmark %x\n", p);
2113 if (p >= minAddr && p < maxAddr)
2115 if ((cast(size_t)p & ~(PAGESIZE-1)) == pcache)
2121 size_t offset = cast(size_t)(p - pool.baseAddr);
2123 size_t pn = offset / PAGESIZE;
2124 Bins bin = cast(Bins)pool.pagetable[pn];
2126 //debug(PRINTF) printf("\t\tfound pool %x, base=%x, pn = %d, bin = %d, biti = x%x\n", pool, pool.baseAddr, pn, bin, biti);
2128 // Adjust bit to be at start of allocated memory block
2131 biti = (offset & notbinsize[bin]) >> 4;
2132 //debug(PRINTF) printf("\t\tbiti = x%x\n", biti);
2134 else if (bin == B_PAGEPLUS)
2138 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
2139 biti = pn * (PAGESIZE / 16);
2143 // Don't mark bits in B_FREE or B_UNCOMMITTED pages
2147 if (bin >= B_PAGE) // Cache B_PAGE and B_PAGEPLUS lookups
2148 pcache = cast(size_t)p & ~(PAGESIZE-1);
2150 //debug(PRINTF) printf("\t\tmark(x%x) = %d\n", biti, pool.mark.test(biti));
2151 if (!pool.mark.test(biti))
2153 //if (log) debug(PRINTF) printf("\t\tmarking %x\n", p);
2154 pool.mark.set(biti);
2155 if (!pool.noscan.test(biti))
2157 pool.scan.set(biti);
2160 log_parent(sentinel_add(pool.baseAddr + biti * 16), sentinel_add(pbot));
2165 anychanges |= changes;
2170 * Return number of full pages free'd.
2172 size_t fullcollectshell()
2174 // The purpose of the 'shell' is to ensure all the registers
2175 // get put on the stack so they'll be scanned
2180 __builtin_unwind_init();
2187 uint eax,ecx,edx,ebx,ebp,esi,edi;
2200 else version (X86_64)
2202 ulong rax,rbx,rcx,rdx,rbp,rsi,rdi,rsp,r8,r9,r10,r11,r12,r13,r14,r15;
2205 movq rax[RBP], RAX ;
2206 movq rbx[RBP], RBX ;
2207 movq rcx[RBP], RCX ;
2208 movq rdx[RBP], RDX ;
2209 movq rbp[RBP], RBP ;
2210 movq rsi[RBP], RSI ;
2211 movq rdi[RBP], RDI ;
2212 movq rsp[RBP], RSP ;
2215 movq r10[RBP], R10 ;
2216 movq r11[RBP], R11 ;
2217 movq r12[RBP], R12 ;
2218 movq r13[RBP], R13 ;
2219 movq r14[RBP], R14 ;
2220 movq r15[RBP], R15 ;
2225 static assert( false, "Architecture not supported." );
2236 result = fullcollect(sp);
2259 size_t fullcollect(void *stackTop)
2264 debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n");
2266 thread_suspendAll();
2272 for (n = 0; n < npools; n++)
2274 pool = pooltable[n];
2277 pool.freebits.zero();
2280 // Mark each free entry, so it doesn't get scanned
2281 for (n = 0; n < B_PAGE; n++)
2283 for (List *list = bucket[n]; list; list = list.next)
2285 pool = findPool(list);
2287 pool.freebits.set(cast(size_t)(cast(byte*)list - pool.baseAddr) / 16);
2291 for (n = 0; n < npools; n++)
2293 pool = pooltable[n];
2294 pool.mark.copy(&pool.freebits);
2297 rt_scanStaticData( &mark );
2299 version (MULTI_THREADED)
2303 // Scan stacks and registers for each paused thread
2304 thread_scanAll( &mark, stackTop );
2311 // Scan stack for main thread
2312 debug(PRINTF) printf(" scan stack bot = %x, top = %x\n", stackTop, stackBottom);
2313 version (STACKGROWSDOWN)
2314 mark(stackTop, stackBottom);
2316 mark(stackBottom, stackTop);
2321 debug(COLLECT_PRINTF) printf("scan roots[]\n");
2322 mark(roots, roots + nroots);
2325 debug(COLLECT_PRINTF) printf("scan ranges[]\n");
2327 for (n = 0; n < nranges; n++)
2329 debug(COLLECT_PRINTF) printf("\t%x .. %x\n", ranges[n].pbot, ranges[n].ptop);
2330 mark(ranges[n].pbot, ranges[n].ptop);
2334 debug(COLLECT_PRINTF) printf("\tscan heap\n");
2338 for (n = 0; n < npools; n++)
2344 pool = pooltable[n];
2346 bbase = pool.scan.base();
2347 btop = bbase + pool.scan.nwords;
2348 for (b = bbase; b < btop;)
2362 o = pool.baseAddr + (b - bbase) * 32 * 16;
2363 if (!(bitm & 0xFFFF))
2368 for (; bitm; o += 16, bitm >>= 1)
2373 pn = cast(size_t)(o - pool.baseAddr) / PAGESIZE;
2374 bin = cast(Bins)pool.pagetable[pn];
2377 mark(o, o + binsize[bin]);
2379 else if (bin == B_PAGE || bin == B_PAGEPLUS)
2381 if (bin == B_PAGEPLUS)
2383 while (pool.pagetable[pn - 1] != B_PAGE)
2387 while (pn + u < pool.ncommitted && pool.pagetable[pn + u] == B_PAGEPLUS)
2389 mark(o, o + u * PAGESIZE);
2398 // Free up everything not marked
2399 debug(COLLECT_PRINTF) printf("\tfree'ing\n");
2400 size_t freedpages = 0;
2402 for (n = 0; n < npools; n++)
2407 pool = pooltable[n];
2408 bbase = pool.mark.base();
2409 ncommitted = pool.ncommitted;
2410 for (pn = 0; pn < ncommitted; pn++, bbase += PAGESIZE / (32 * 16))
2412 Bins bin = cast(Bins)pool.pagetable[pn];
2419 auto size = binsize[bin];
2421 p = pool.baseAddr + pn * PAGESIZE;
2422 ptop = p + PAGESIZE;
2423 biti = pn * (PAGESIZE/16);
2424 bitstride = size / 16;
2426 version(none) // BUG: doesn't work because freebits() must also be cleared
2428 // If free'd entire page
2429 if (bbase[0] == 0 && bbase[1] == 0 && bbase[2] == 0 && bbase[3] == 0 &&
2430 bbase[4] == 0 && bbase[5] == 0 && bbase[6] == 0 && bbase[7] == 0)
2432 for (; p < ptop; p += size, biti += bitstride)
2434 if (pool.finals.nbits && pool.finals.testClear(biti))
2435 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2436 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
2438 List *list = cast(List *)p;
2439 //debug(PRINTF) printf("\tcollecting %x\n", list);
2440 log_free(sentinel_add(list));
2442 debug (MEMSTOMP) cstring.memset(p, 0xF3, size);
2444 pool.pagetable[pn] = B_FREE;
2446 //debug(PRINTF) printf("freeing entire page %d\n", pn);
2450 for (; p < ptop; p += size, biti += bitstride)
2452 if (!pool.mark.test(biti))
2454 sentinel_Invariant(sentinel_add(p));
2456 pool.freebits.set(biti);
2457 if (pool.finals.nbits && pool.finals.testClear(biti))
2458 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2459 clrBits(pool, biti, BlkAttr.ALL_BITS);
2461 List *list = cast(List *)p;
2462 debug(PRINTF) printf("\tcollecting %x\n", list);
2463 log_free(sentinel_add(list));
2465 debug (MEMSTOMP) cstring.memset(p, 0xF3, size);
2471 else if (bin == B_PAGE)
2472 { size_t biti = pn * (PAGESIZE / 16);
2474 if (!pool.mark.test(biti))
2475 { byte *p = pool.baseAddr + pn * PAGESIZE;
2477 sentinel_Invariant(sentinel_add(p));
2478 if (pool.finals.nbits && pool.finals.testClear(biti))
2479 rt_finalize(sentinel_add(p), false/*noStack > 0*/);
2480 clrBits(pool, biti, BlkAttr.ALL_BITS);
2482 debug(COLLECT_PRINTF) printf("\tcollecting big %x\n", p);
2483 log_free(sentinel_add(p));
2484 pool.pagetable[pn] = B_FREE;
2486 debug (MEMSTOMP) cstring.memset(p, 0xF3, PAGESIZE);
2487 while (pn + 1 < ncommitted && pool.pagetable[pn + 1] == B_PAGEPLUS)
2490 pool.pagetable[pn] = B_FREE;
2495 cstring.memset(p, 0xF3, PAGESIZE);
2506 // Free complete pages, rebuild free list
2507 debug(COLLECT_PRINTF) printf("\tfree complete pages\n");
2508 size_t recoveredpages = 0;
2509 for (n = 0; n < npools; n++)
2513 pool = pooltable[n];
2514 ncommitted = pool.ncommitted;
2515 for (pn = 0; pn < ncommitted; pn++)
2517 Bins bin = cast(Bins)pool.pagetable[pn];
2523 size_t size = binsize[bin];
2524 size_t bitstride = size / 16;
2525 size_t bitbase = pn * (PAGESIZE / 16);
2526 size_t bittop = bitbase + (PAGESIZE / 16);
2530 for (biti = bitbase; biti < bittop; biti += bitstride)
2531 { if (!pool.freebits.test(biti))
2534 pool.pagetable[pn] = B_FREE;
2539 p = pool.baseAddr + pn * PAGESIZE;
2540 for (u = 0; u < PAGESIZE; u += size)
2541 { biti = bitbase + u / 16;
2542 if (pool.freebits.test(biti))
2545 list = cast(List *)(p + u);
2546 if (list.next != bucket[bin]) // avoid unnecessary writes
2547 list.next = bucket[bin];
2555 debug(COLLECT_PRINTF) printf("recovered pages = %d\n", recoveredpages);
2556 debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedpages, npools);
2558 return freedpages + recoveredpages;
2565 uint getBits(Pool* pool, size_t biti)
2574 if (pool.finals.nbits &&
2575 pool.finals.test(biti))
2576 bits |= BlkAttr.FINALIZE;
2577 if (pool.noscan.test(biti))
2578 bits |= BlkAttr.NO_SCAN;
2579 // if (pool.nomove.nbits &&
2580 // pool.nomove.test(biti))
2581 // bits |= BlkAttr.NO_MOVE;
2589 void setBits(Pool* pool, size_t biti, uint mask)
2596 if (mask & BlkAttr.FINALIZE)
2598 if (!pool.finals.nbits)
2599 pool.finals.alloc(pool.mark.nbits);
2600 pool.finals.set(biti);
2602 if (mask & BlkAttr.NO_SCAN)
2604 pool.noscan.set(biti);
2606 // if (mask & BlkAttr.NO_MOVE)
2608 // if (!pool.nomove.nbits)
2609 // pool.nomove.alloc(pool.mark.nbits);
2610 // pool.nomove.set(biti);
2618 void clrBits(Pool* pool, size_t biti, uint mask)
2625 if (mask & BlkAttr.FINALIZE && pool.finals.nbits)
2626 pool.finals.clear(biti);
2627 if (mask & BlkAttr.NO_SCAN)
2628 pool.noscan.clear(biti);
2629 // if (mask & BlkAttr.NO_MOVE && pool.nomove.nbits)
2630 // pool.nomove.clear(biti);
2634 /***** Leak Detector ******/
2645 //debug(PRINTF) printf("+log_init()\n");
2646 current.reserve(1000);
2648 //debug(PRINTF) printf("-log_init()\n");
2652 void log_malloc(void *p, size_t size)
2654 //debug(PRINTF) printf("+log_malloc(p = %x, size = %d)\n", p, size);
2667 //debug(PRINTF) printf("-log_malloc()\n");
2671 void log_free(void *p)
2673 //debug(PRINTF) printf("+log_free(%x)\n", p);
2676 i = current.find(p);
2679 debug(PRINTF) printf("free'ing unallocated memory %x\n", p);
2683 //debug(PRINTF) printf("-log_free()\n");
2689 //debug(PRINTF) printf("+log_collect()\n");
2690 // Print everything in current that is not in prev
2692 debug(PRINTF) printf("New pointers this cycle: --------------------------------\n");
2694 for (size_t i = 0; i < current.dim; i++)
2698 j = prev.find(current.data[i].p);
2700 current.data[i].print();
2705 debug(PRINTF) printf("All roots this cycle: --------------------------------\n");
2706 for (size_t i = 0; i < current.dim; i++)
2711 p = current.data[i].p;
2712 if (!findPool(current.data[i].parent))
2714 j = prev.find(current.data[i].p);
2716 debug(PRINTF) printf("N");
2718 debug(PRINTF) printf(" ");;
2719 current.data[i].print();
2723 debug(PRINTF) printf("Used = %d-------------------------------------------------\n", used);
2724 prev.copy(¤t);
2726 debug(PRINTF) printf("-log_collect()\n");
2730 void log_parent(void *p, void *parent)
2732 //debug(PRINTF) printf("+log_parent()\n");
2735 i = current.find(p);
2738 debug(PRINTF) printf("parent'ing unallocated memory %x, parent = %x\n", p, parent);
2742 size_t offset = cast(size_t)(p - pool.baseAddr);
2744 size_t pn = offset / PAGESIZE;
2745 Bins bin = cast(Bins)pool.pagetable[pn];
2746 biti = (offset & notbinsize[bin]);
2747 debug(PRINTF) printf("\tbin = %d, offset = x%x, biti = x%x\n", bin, offset, biti);
2751 current.data[i].parent = parent;
2753 //debug(PRINTF) printf("-log_parent()\n");
2760 void log_malloc(void *p, size_t size) { }
2761 void log_free(void *p) { }
2762 void log_collect() { }
2763 void log_parent(void *p, void *parent) { }
2768 /* ============================ Pool =============================== */
2775 GCBits mark; // entries already scanned, or should not be scanned
2776 GCBits scan; // entries that need to be scanned
2777 GCBits freebits; // entries that are on the free list
2778 GCBits finals; // entries that need finalizer run on them
2779 GCBits noscan; // entries that should not be scanned
2782 size_t ncommitted; // ncommitted <= npages
2786 void initialize(size_t npages)
2790 //debug(PRINTF) printf("Pool::Pool(%u)\n", npages);
2791 poolsize = npages * PAGESIZE;
2792 assert(poolsize >= POOLSIZE);
2793 baseAddr = cast(byte *)os_mem_map(poolsize);
2795 // Some of the code depends on page alignment of memory pools
2796 assert((cast(size_t)baseAddr & (PAGESIZE - 1)) == 0);
2800 //debug(PRINTF) printf("GC fail: poolsize = x%x, errno = %d\n", poolsize, errno);
2801 //debug(PRINTF) printf("message = '%s'\n", sys_errlist[errno]);
2807 topAddr = baseAddr + poolsize;
2809 mark.alloc(cast(size_t)poolsize / 16);
2810 scan.alloc(cast(size_t)poolsize / 16);
2811 freebits.alloc(cast(size_t)poolsize / 16);
2812 noscan.alloc(cast(size_t)poolsize / 16);
2814 pagetable = cast(ubyte*)cstdlib.malloc(npages);
2816 onOutOfMemoryError();
2817 cstring.memset(pagetable, B_UNCOMMITTED, npages);
2819 this.npages = npages;
2832 result = os_mem_decommit(baseAddr, 0, ncommitted * PAGESIZE);
2833 assert(result == 0);
2839 result = os_mem_unmap(baseAddr, npages * PAGESIZE);
2840 assert(result == 0);
2848 cstdlib.free(pagetable);
2858 void Invariant() { }
2865 //freebits.Invariant();
2866 //finals.Invariant();
2867 //noscan.Invariant();
2871 //if (baseAddr + npages * PAGESIZE != topAddr)
2872 //printf("baseAddr = %p, npages = %d, topAddr = %p\n", baseAddr, npages, topAddr);
2873 assert(baseAddr + npages * PAGESIZE == topAddr);
2874 assert(ncommitted <= npages);
2877 for (size_t i = 0; i < npages; i++)
2878 { Bins bin = cast(Bins)pagetable[i];
2880 assert(bin < B_MAX);
2886 * Allocate n pages from Pool.
2887 * Returns OPFAIL on failure.
2889 size_t allocPages(size_t n)
2894 //debug(PRINTF) printf("Pool::allocPages(n = %d)\n", n);
2896 for (i = 0; i < ncommitted; i++)
2898 if (pagetable[i] == B_FREE)
2901 { //debug(PRINTF) printf("\texisting pn = %d\n", i - n + 1);
2908 return extendPages(n);
2912 * Extend Pool by n pages.
2913 * Returns OPFAIL on failure.
2915 size_t extendPages(size_t n)
2917 //debug(PRINTF) printf("Pool::extendPages(n = %d)\n", n);
2918 if (ncommitted + n <= npages)
2922 tocommit = (n + (COMMITSIZE/PAGESIZE) - 1) & ~(COMMITSIZE/PAGESIZE - 1);
2923 if (ncommitted + tocommit > npages)
2924 tocommit = npages - ncommitted;
2925 //debug(PRINTF) printf("\tlooking to commit %d more pages\n", tocommit);
2927 if (os_mem_commit(baseAddr, ncommitted * PAGESIZE, tocommit * PAGESIZE) == 0)
2929 cstring.memset(pagetable + ncommitted, B_FREE, tocommit);
2930 auto i = ncommitted;
2931 ncommitted += tocommit;
2933 while (i && pagetable[i - 1] == B_FREE)
2938 //debug(PRINTF) printf("\tfailed to commit %d pages\n", tocommit);
2946 * Free npages pages starting with pagenum.
2948 void freePages(size_t pagenum, size_t npages)
2950 cstring.memset(&pagetable[pagenum], B_FREE, npages);
2955 * Used for sorting pooltable[]
2959 if (baseAddr < p2.baseAddr)
2962 return cast(int)(baseAddr > p2.baseAddr);
2967 /* ============================ SENTINEL =============================== */
2972 const size_t SENTINEL_PRE = cast(size_t) 0xF4F4F4F4F4F4F4F4UL; // 32 or 64 bits
2973 const ubyte SENTINEL_POST = 0xF5; // 8 bits
2974 const uint SENTINEL_EXTRA = 2 * size_t.sizeof + 1;
2977 size_t* sentinel_size(void *p) { return &(cast(size_t *)p)[-2]; }
2978 size_t* sentinel_pre(void *p) { return &(cast(size_t *)p)[-1]; }
2979 ubyte* sentinel_post(void *p) { return &(cast(ubyte *)p)[*sentinel_size(p)]; }
2982 void sentinel_init(void *p, size_t size)
2984 *sentinel_size(p) = size;
2985 *sentinel_pre(p) = SENTINEL_PRE;
2986 *sentinel_post(p) = SENTINEL_POST;
2990 void sentinel_Invariant(void *p)
2992 assert(*sentinel_pre(p) == SENTINEL_PRE);
2993 assert(*sentinel_post(p) == SENTINEL_POST);
2997 void *sentinel_add(void *p)
2999 return p + 2 * size_t.sizeof;
3003 void *sentinel_sub(void *p)
3005 return p - 2 * size_t.sizeof;
3010 const uint SENTINEL_EXTRA = 0;
3013 void sentinel_init(void *p, size_t size)
3018 void sentinel_Invariant(void *p)
3023 void *sentinel_add(void *p)
3029 void *sentinel_sub(void *p)