2 * This module contains the garbage collector implementation.
4 * Copyright: Copyright (C) 2001-2007 Digital Mars, www.digitalmars.com.
7 * This software is provided 'as-is', without any express or implied
8 * warranty. In no event will the authors be held liable for any damages
9 * arising from the use of this software.
11 * Permission is granted to anyone to use this software for any purpose,
12 * including commercial applications, and to alter it and redistribute it
13 * freely, in both source and binary form, subject to the following
16 * o The origin of this software must not be misrepresented; you must not
17 * claim that you wrote the original software. If you use this software
18 * in a product, an acknowledgment in the product documentation would be
19 * appreciated but is not required.
20 * o Altered source versions must be plainly marked as such, and must not
21 * be misrepresented as being the original software.
22 * o This notice may not be removed or altered from any source
24 * Authors: Walter Bright, David Friedman, Sean Kelly
29 // D Programming Language Garbage Collector implementation
31 /************** Debugging ***************************/
33 //debug = PRINTF; // turn on printf's
34 //debug = COLLECT_PRINTF; // turn on printf's
35 //debug = THREADINVARIANT; // check thread integrity
36 //debug = LOGGING; // log allocations / frees
37 //debug = MEMSTOMP; // stomp on memory
38 //debug = SENTINEL; // add underrun/overrrun protection
39 //debug = PTRCHECK; // more pointer checking
40 //debug = PTRCHECK2; // thorough but slow pointer checking
42 /*************** Configuration *********************/
44 version = STACKGROWSDOWN; // growing the stack means subtracting from the stack pointer
45 // (use for Intel X86 CPUs)
46 // else growing the stack means adding to the stack pointer
47 version = MULTI_THREADED; // produce multithreaded version
49 /***************************************************/
55 import cstdlib = tango.stdc.stdlib : calloc, free, malloc, realloc;
56 import cstring = tango.stdc.string : memcpy, memmove, memset;
58 debug(THREADINVARIANT) import tango.stdc.posix.pthread;
59 debug(PRINTF) import tango.stdc.posix.pthread : pthread_self, pthread_t;
60 debug import tango.stdc.stdio : printf;
64 // BUG: The following import will likely not work, since the gcc
65 // subdirectory is elsewhere. Instead, perhaps the functions
66 // could be declared directly or some other resolution could
68 import gcc.builtins; // for __builtin_unwind_init
83 FINALIZE = 0b0000_0001,
84 NO_SCAN = 0b0000_0010,
85 NO_MOVE = 0b0000_0100,
86 ALL_BITS = 0b1111_1111
89 extern (C) void* rt_stackBottom();
90 extern (C) void* rt_stackTop();
92 extern (C) void rt_finalize( void* p, bool det = true );
94 alias void delegate( void*, void* ) scanFn;
96 extern (C) void rt_scanStaticData( scanFn scan );
98 version (MULTI_THREADED)
100 extern (C) bool thread_needLock();
101 extern (C) void thread_suspendAll();
102 extern (C) void thread_resumeAll();
104 extern (C) void thread_scanAll( scanFn fn, void* curStackTop = null );
107 extern (C) void onOutOfMemoryError();
111 OPFAIL = ~cast(size_t)0
119 /* ======================= Leak Detector =========================== */
134 printf(" p = %x, size = %d, parent = %x ", p, size, parent);
137 printf("%s(%u)", file, line);
157 void reserve(size_t nentries)
159 assert(dim <= allocdim);
160 if (allocdim - dim < nentries)
162 allocdim = (dim + nentries) * 2;
163 assert(dim + nentries <= allocdim);
166 data = cast(Log*)cstdlib.malloc(allocdim * Log.sizeof);
167 if (!data && allocdim)
168 onOutOfMemoryError();
173 newdata = cast(Log*)cstdlib.malloc(allocdim * Log.sizeof);
174 if (!newdata && allocdim)
175 onOutOfMemoryError();
176 cstring.memcpy(newdata, data, dim * Log.sizeof);
190 void remove(size_t i)
192 cstring.memmove(data + i, data + i + 1, (dim - i) * Log.sizeof);
199 for (size_t i = 0; i < dim; i++)
204 return OPFAIL; // not found
208 void copy(LogArray *from)
210 reserve(from.dim - dim);
211 assert(from.dim <= allocdim);
212 cstring.memcpy(data, from.data, from.dim * Log.sizeof);
219 /* ============================ GC =============================== */
222 class GCLock { } // just a dummy so we can get a global lock
225 const uint GCVERSION = 1; // increment every time we change interface
230 // For passing to debug code
234 uint gcversion = GCVERSION;
236 Gcx *gcx; // implementation
237 static ClassInfo gcLock; // global lock
242 gcLock = GCLock.classinfo;
243 gcx = cast(Gcx*)cstdlib.calloc(1, Gcx.sizeof);
245 onOutOfMemoryError();
247 setStackBottom(rt_stackBottom());
255 //debug(PRINTF) printf("Thread %x ", pthread_self());
256 //debug(PRINTF) printf("GC.Dtor()\n");
272 gcx.thread_Invariant();
282 if (!thread_needLock())
284 assert(gcx.disabled > 0);
287 else synchronized (gcLock)
289 assert(gcx.disabled > 0);
300 if (!thread_needLock())
304 else synchronized (gcLock)
314 uint getAttr(void* p)
323 Pool* pool = gcx.findPool(p);
328 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
330 oldb = gcx.getBits(pool, biti);
335 if (!thread_needLock())
339 else synchronized (gcLock)
349 uint setAttr(void* p, uint mask)
358 Pool* pool = gcx.findPool(p);
363 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
365 oldb = gcx.getBits(pool, biti);
366 gcx.setBits(pool, biti, mask);
371 if (!thread_needLock())
375 else synchronized (gcLock)
385 uint clrAttr(void* p, uint mask)
394 Pool* pool = gcx.findPool(p);
399 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
401 oldb = gcx.getBits(pool, biti);
402 gcx.clrBits(pool, biti, mask);
407 if (!thread_needLock())
411 else synchronized (gcLock)
421 void *malloc(size_t size, uint bits = 0)
428 if (!thread_needLock())
430 return mallocNoSync(size, bits);
432 else synchronized (gcLock)
434 return mallocNoSync(size, bits);
442 private void *mallocNoSync(size_t size, uint bits = 0)
449 //debug(PRINTF) printf("GC::malloc(size = %d, gcx = %p)\n", size, gcx);
451 //debug(PRINTF) printf("gcx.self = %x, pthread_self() = %x\n", gcx.self, pthread_self());
453 size += SENTINEL_EXTRA;
456 // Cache previous binsize lookup - Dave Fladebo.
457 static size_t lastsize = -1;
459 if (size == lastsize)
463 bin = gcx.findBin(size);
473 if (!gcx.allocPage(bin) && !gcx.disabled) // try to find a new page
475 if (!thread_needLock())
477 /* Then we haven't locked it yet. Be sure
478 * and lock for a collection, since a finalizer
479 * may start a new thread.
481 synchronized (gcLock)
483 gcx.fullcollectshell();
486 else if (!gcx.fullcollectshell()) // collect to find a new page
491 if (!gcx.bucket[bin] && !gcx.allocPage(bin))
494 gcx.newPool(1); // allocate new pool to find a new page
495 result = gcx.allocPage(bin);
497 onOutOfMemoryError();
502 // Return next item from free list
503 gcx.bucket[bin] = (cast(List*)p).next;
504 if( !(bits & BlkAttr.NO_SCAN) )
505 cstring.memset(p + size, 0, binsize[bin] - size);
506 //debug(PRINTF) printf("\tmalloc => %x\n", p);
507 debug (MEMSTOMP) cstring.memset(p, 0xF0, size);
511 p = gcx.bigAlloc(size);
513 onOutOfMemoryError();
515 size -= SENTINEL_EXTRA;
517 sentinel_init(p, size);
518 gcx.log_malloc(p, size);
522 Pool *pool = gcx.findPool(p);
525 gcx.setBits(pool, cast(size_t)(p - pool.baseAddr) / 16, bits);
534 void *calloc(size_t size, uint bits = 0)
541 if (!thread_needLock())
543 return callocNoSync(size, bits);
545 else synchronized (gcLock)
547 return callocNoSync(size, bits);
555 private void *callocNoSync(size_t size, uint bits = 0)
559 //debug(PRINTF) printf("calloc: %x len %d\n", p, len);
560 void *p = mallocNoSync(size, bits);
561 cstring.memset(p, 0, size);
569 void *realloc(void *p, size_t size, uint bits = 0)
571 if (!thread_needLock())
573 return reallocNoSync(p, size, bits);
575 else synchronized (gcLock)
577 return reallocNoSync(p, size, bits);
585 private void *reallocNoSync(void *p, size_t size, uint bits = 0)
595 p = mallocNoSync(size, bits);
601 //debug(PRINTF) printf("GC::realloc(p = %x, size = %u)\n", p, size);
604 sentinel_Invariant(p);
605 psize = *sentinel_size(p);
610 Pool *pool = gcx.findPool(p);
614 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
618 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
619 gcx.setBits(pool, biti, bits);
623 bits = gcx.getBits(pool, biti);
627 p2 = mallocNoSync(size, bits);
630 //debug(PRINTF) printf("\tcopying %d bytes\n",size);
631 cstring.memcpy(p2, p, size);
637 psize = gcx.findSize(p); // find allocated size
638 if (psize >= PAGESIZE && size >= PAGESIZE)
640 auto psz = psize / PAGESIZE;
641 auto newsz = (size + PAGESIZE - 1) / PAGESIZE;
645 auto pool = gcx.findPool(p);
646 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
650 synchronized (gcLock)
652 debug (MEMSTOMP) cstring.memset(p + size, 0xF2, psize - size);
653 pool.freePages(pagenum + newsz, psz - newsz);
657 else if (pagenum + newsz <= pool.npages)
659 // Attempt to expand in place
660 synchronized (gcLock)
662 for (size_t i = pagenum + psz; 1;)
664 if (i == pagenum + newsz)
666 debug (MEMSTOMP) cstring.memset(p + psize, 0xF0, size - psize);
667 cstring.memset(&pool.pagetable[pagenum + psz], B_PAGEPLUS, newsz - psz);
670 if (i == pool.ncommitted)
672 auto u = pool.extendPages(pagenum + newsz - pool.ncommitted);
678 if (pool.pagetable[i] != B_FREE)
685 if (psize < size || // if new size is bigger
686 psize > size * 2) // or less than half
690 Pool *pool = gcx.findPool(p);
694 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
698 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
699 gcx.setBits(pool, biti, bits);
703 bits = gcx.getBits(pool, biti);
707 p2 = mallocNoSync(size, bits);
710 //debug(PRINTF) printf("\tcopying %d bytes\n",size);
711 cstring.memcpy(p2, p, size);
721 * Attempt to in-place enlarge the memory block pointed to by p by at least
722 * minbytes beyond its current capacity, up to a maximum of maxsize. This
723 * does not attempt to move the memory block (like realloc() does).
726 * 0 if could not extend p,
727 * total size of entire memory block if successful.
729 size_t extend(void* p, size_t minsize, size_t maxsize)
731 if (!thread_needLock())
733 return extendNoSync(p, minsize, maxsize);
735 else synchronized (gcLock)
737 return extendNoSync(p, minsize, maxsize);
745 private size_t extendNoSync(void* p, size_t minsize, size_t maxsize)
748 assert( minsize <= maxsize );
752 //debug(PRINTF) printf("GC::extend(p = %x, minsize = %u, maxsize = %u)\n", p, minsize, maxsize);
757 auto psize = gcx.findSize(p); // find allocated size
758 if (psize < PAGESIZE)
759 return 0; // cannot extend buckets
761 auto psz = psize / PAGESIZE;
762 auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE;
763 auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE;
765 auto pool = gcx.findPool(p);
766 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
769 for (sz = 0; sz < maxsz; sz++)
771 auto i = pagenum + psz + sz;
772 if (i == pool.ncommitted)
774 if (pool.pagetable[i] != B_FREE)
783 else if (pagenum + psz + sz == pool.ncommitted)
785 auto u = pool.extendPages(minsz - sz);
792 debug (MEMSTOMP) cstring.memset(p + psize, 0xF0, (psz + sz) * PAGESIZE - psize);
793 cstring.memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
796 return (psz + sz) * PAGESIZE;
803 size_t reserve(size_t size)
810 if (!thread_needLock())
812 return reserveNoSync(size);
814 else synchronized (gcLock)
816 return reserveNoSync(size);
824 private size_t reserveNoSync(size_t size)
829 return gcx.reserve(size);
843 if (!thread_needLock())
845 return freeNoSync(p);
847 else synchronized (gcLock)
849 return freeNoSync(p);
857 private void freeNoSync(void *p)
866 // Find which page it is in
867 pool = gcx.findPool(p);
868 if (!pool) // if not one of ours
870 sentinel_Invariant(p);
872 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
873 biti = cast(size_t)(p - pool.baseAddr) / 16;
874 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
876 bin = cast(Bins)pool.pagetable[pagenum];
877 if (bin == B_PAGE) // if large alloc
884 while (++n < pool.ncommitted && pool.pagetable[n] == B_PAGEPLUS)
886 debug (MEMSTOMP) cstring.memset(p, 0xF2, npages * PAGESIZE);
887 pool.freePages(pagenum, npages);
890 { // Add to free list
891 List *list = cast(List*)p;
893 debug (MEMSTOMP) cstring.memset(p, 0xF2, binsize[bin]);
895 list.next = gcx.bucket[bin];
896 gcx.bucket[bin] = list;
898 gcx.log_free(sentinel_add(p));
903 * Determine the base address of the block containing p. If p is not a gc
904 * allocated pointer, return null.
906 void* addrOf(void *p)
913 if (!thread_needLock())
915 return addrOfNoSync(p);
917 else synchronized (gcLock)
919 return addrOfNoSync(p);
927 void* addrOfNoSync(void *p)
934 return gcx.findBase(p);
939 * Determine the allocated size of pointer p. If p is an interior pointer
940 * or not a gc allocated pointer, return 0.
942 size_t sizeOf(void *p)
949 if (!thread_needLock())
951 return sizeOfNoSync(p);
953 else synchronized (gcLock)
955 return sizeOfNoSync(p);
963 private size_t sizeOfNoSync(void *p)
970 size_t size = gcx.findSize(p);
972 // Check for interior pointer
974 // 1) size is a power of 2 for less than PAGESIZE values
975 // 2) base of memory pool is aligned on PAGESIZE boundary
976 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
978 return size ? size - SENTINEL_EXTRA : 0;
982 if (p == gcx.p_cache)
983 return gcx.size_cache;
985 size_t size = gcx.findSize(p);
987 // Check for interior pointer
989 // 1) size is a power of 2 for less than PAGESIZE values
990 // 2) base of memory pool is aligned on PAGESIZE boundary
991 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
996 gcx.size_cache = size;
1005 * Determine the base address of the block containing p. If p is not a gc
1006 * allocated pointer, return null.
1008 BlkInfo query(void *p)
1016 if (!thread_needLock())
1018 return queryNoSync(p);
1020 else synchronized (gcLock)
1022 return queryNoSync(p);
1030 BlkInfo queryNoSync(void *p)
1034 return gcx.getInfo(p);
1039 * Verify that pointer p:
1040 * 1) belongs to this memory pool
1041 * 2) points to the start of an allocated piece of memory
1042 * 3) is not on a free list
1051 if (!thread_needLock())
1055 else synchronized (gcLock)
1065 private void checkNoSync(void *p)
1069 sentinel_Invariant(p);
1077 p = sentinel_sub(p);
1078 pool = gcx.findPool(p);
1080 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1081 bin = cast(Bins)pool.pagetable[pagenum];
1082 assert(bin <= B_PAGE);
1083 size = binsize[bin];
1084 assert((cast(size_t)p & (size - 1)) == 0);
1090 // Check that p is not on a free list
1093 for (list = gcx.bucket[bin]; list; list = list.next)
1095 assert(cast(void*)list != p);
1106 private void setStackBottom(void *p)
1108 version (STACKGROWSDOWN)
1110 //p = (void *)((uint *)p + 4);
1111 if (p > gcx.stackBottom)
1113 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1114 gcx.stackBottom = p;
1119 //p = (void *)((uint *)p - 4);
1120 if (p < gcx.stackBottom)
1122 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1123 gcx.stackBottom = cast(char*)p;
1130 * add p to list of roots
1132 void addRoot(void *p)
1139 if (!thread_needLock())
1143 else synchronized (gcLock)
1151 * remove p from list of roots
1153 void removeRoot(void *p)
1160 if (!thread_needLock())
1164 else synchronized (gcLock)
1172 * add range to scan for roots
1174 void addRange(void *p, size_t sz)
1181 //debug(PRINTF) printf("+GC.addRange(pbot = x%x, ptop = x%x)\n", pbot, ptop);
1182 if (!thread_needLock())
1184 gcx.addRange(p, p + sz);
1186 else synchronized (gcLock)
1188 gcx.addRange(p, p + sz);
1190 //debug(PRINTF) printf("-GC.addRange()\n");
1197 void removeRange(void *p)
1204 if (!thread_needLock())
1208 else synchronized (gcLock)
1216 * do full garbage collection
1220 debug(PRINTF) printf("GC.fullCollect()\n");
1222 if (!thread_needLock())
1224 gcx.fullcollectshell();
1226 else synchronized (gcLock)
1228 gcx.fullcollectshell();
1236 debug(PRINTF) printf("poolsize = %x, usedsize = %x, freelistsize = %x\n",
1237 stats.poolsize, stats.usedsize, stats.freelistsize);
1245 * do full garbage collection ignoring roots
1247 void fullCollectNoStack()
1249 if (!thread_needLock())
1252 gcx.fullcollectshell();
1255 else synchronized (gcLock)
1258 gcx.fullcollectshell();
1265 * minimize free space usage
1269 if (!thread_needLock())
1273 else synchronized (gcLock)
1281 * Retrieve statistics about garbage collection.
1282 * Useful for debugging and tuning.
1284 void getStats(out GCStats stats)
1286 if (!thread_needLock())
1288 getStatsNoSync(stats);
1290 else synchronized (gcLock)
1292 getStatsNoSync(stats);
1300 private void getStatsNoSync(out GCStats stats)
1309 //debug(PRINTF) printf("getStats()\n");
1310 cstring.memset(&stats, 0, GCStats.sizeof);
1312 for (n = 0; n < gcx.npools; n++)
1313 { Pool *pool = gcx.pooltable[n];
1315 psize += pool.ncommitted * PAGESIZE;
1316 for (size_t j = 0; j < pool.ncommitted; j++)
1318 Bins bin = cast(Bins)pool.pagetable[j];
1321 else if (bin == B_PAGE)
1323 else if (bin < B_PAGE)
1328 for (n = 0; n < B_PAGE; n++)
1330 //debug(PRINTF) printf("bin %d\n", n);
1331 for (List *list = gcx.bucket[n]; list; list = list.next)
1333 //debug(PRINTF) printf("\tlist %x\n", list);
1334 flsize += binsize[n];
1338 usize = bsize - flsize;
1340 stats.poolsize = psize;
1341 stats.usedsize = bsize - flsize;
1342 stats.freelistsize = flsize;
1347 /* ============================ Gcx =============================== */
1351 COMMITSIZE = (4096*16),
1352 POOLSIZE = (4096*256),
1366 B_PAGE, // start of large alloc
1367 B_PAGEPLUS, // continuation of large alloc
1368 B_FREE, // free page
1369 B_UNCOMMITTED, // memory not committed for this page
1390 const uint binsize[B_MAX] = [ 16,32,64,128,256,512,1024,2048,4096 ];
1391 const uint notbinsize[B_MAX] = [ ~(16u-1),~(32u-1),~(64u-1),~(128u-1),~(256u-1),
1392 ~(512u-1),~(1024u-1),~(2048u-1),~(4096u-1) ];
1394 /* ============================ Gcx =============================== */
1399 debug (THREADINVARIANT)
1402 void thread_Invariant()
1404 if (self != pthread_self())
1405 printf("thread_Invariant(): gcx = %x, self = %x, pthread_self() = %x\n", this, self, pthread_self());
1406 assert(self == pthread_self());
1411 void thread_Invariant() { }
1425 uint noStack; // !=0 means don't scan stack
1426 uint log; // turn on logging
1430 int disabled; // turn off collections if >0
1432 byte *minAddr; // min(baseAddr)
1433 byte *maxAddr; // max(topAddr)
1438 List *bucket[B_MAX]; // free list for each size
1444 (cast(byte*)this)[0 .. Gcx.sizeof] = 0;
1445 stackBottom = cast(char*)&dummy;
1447 debug (THREADINVARIANT)
1448 self = pthread_self();
1449 //printf("gcx = %p, self = %x\n", this, self);
1458 for (size_t i = 0; i < npools; i++)
1459 { Pool *pool = pooltable[i];
1465 cstdlib.free(pooltable);
1468 cstdlib.free(roots);
1471 cstdlib.free(ranges);
1475 void Invariant() { }
1482 //printf("Gcx.invariant(): this = %p\n", this);
1485 // Assure we're called on the right thread
1486 debug (THREADINVARIANT) assert(self == pthread_self());
1488 for (i = 0; i < npools; i++)
1489 { Pool *pool = pooltable[i];
1494 assert(minAddr == pool.baseAddr);
1498 assert(pool.opCmp(pooltable[i + 1]) < 0);
1500 else if (i + 1 == npools)
1502 assert(maxAddr == pool.topAddr);
1508 assert(rootdim != 0);
1509 assert(nroots <= rootdim);
1514 assert(rangedim != 0);
1515 assert(nranges <= rangedim);
1517 for (i = 0; i < nranges; i++)
1519 assert(ranges[i].pbot);
1520 assert(ranges[i].ptop);
1521 assert(ranges[i].pbot <= ranges[i].ptop);
1525 for (i = 0; i < B_PAGE; i++)
1527 for (List *list = bucket[i]; list; list = list.next)
1538 void addRoot(void *p)
1540 if (nroots == rootdim)
1542 size_t newdim = rootdim * 2 + 16;
1545 newroots = cast(void**)cstdlib.malloc(newdim * newroots[0].sizeof);
1547 onOutOfMemoryError();
1549 { cstring.memcpy(newroots, roots, nroots * newroots[0].sizeof);
1550 cstdlib.free(roots);
1563 void removeRoot(void *p)
1565 for (size_t i = nroots; i--;)
1570 cstring.memmove(roots + i, roots + i + 1, (nroots - i) * roots[0].sizeof);
1581 void addRange(void *pbot, void *ptop)
1583 debug(PRINTF) printf("Thread %x ", pthread_self());
1584 debug(PRINTF) printf("%x.Gcx::addRange(%x, %x), nranges = %d\n", this, pbot, ptop, nranges);
1585 if (nranges == rangedim)
1587 size_t newdim = rangedim * 2 + 16;
1590 newranges = cast(Range*)cstdlib.malloc(newdim * newranges[0].sizeof);
1592 onOutOfMemoryError();
1594 { cstring.memcpy(newranges, ranges, nranges * newranges[0].sizeof);
1595 cstdlib.free(ranges);
1600 ranges[nranges].pbot = pbot;
1601 ranges[nranges].ptop = ptop;
1609 void removeRange(void *pbot)
1611 debug(PRINTF) printf("Thread %x ", pthread_self());
1612 debug(PRINTF) printf("%x.Gcx.removeRange(%x), nranges = %d\n", this, pbot, nranges);
1613 for (size_t i = nranges; i--;)
1615 if (ranges[i].pbot == pbot)
1618 cstring.memmove(ranges + i, ranges + i + 1, (nranges - i) * ranges[0].sizeof);
1622 debug(PRINTF) printf("Wrong thread\n");
1624 // This is a fatal error, but ignore it.
1625 // The problem is that we can get a Close() call on a thread
1626 // other than the one the range was allocated on.
1632 * Find Pool that pointer is in.
1633 * Return null if not in a Pool.
1634 * Assume pooltable[] is sorted.
1636 Pool *findPool(void *p)
1638 if (p >= minAddr && p < maxAddr)
1642 return pooltable[0];
1645 for (size_t i = 0; i < npools; i++)
1648 pool = pooltable[i];
1649 if (p < pool.topAddr)
1650 { if (pool.baseAddr <= p)
1661 * Find base address of block containing pointer p.
1662 * Returns null if not a gc'd pointer
1664 void* findBase(void *p)
1671 size_t offset = cast(size_t)(p - pool.baseAddr);
1672 size_t pn = offset / PAGESIZE;
1673 Bins bin = cast(Bins)pool.pagetable[pn];
1675 // Adjust bit to be at start of allocated memory block
1678 return pool.baseAddr + (offset & notbinsize[bin]);
1680 else if (bin == B_PAGEPLUS)
1683 { --pn, offset -= PAGESIZE;
1684 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1686 return pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1690 // we are in a B_FREE or B_UNCOMMITTED page
1699 * Find size of pointer p.
1700 * Returns 0 if not a gc'd pointer
1702 size_t findSize(void *p)
1713 pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1714 bin = cast(Bins)pool.pagetable[pagenum];
1715 size = binsize[bin];
1717 { size_t npages = pool.ncommitted;
1721 pt = &pool.pagetable[0];
1722 for (i = pagenum + 1; i < npages; i++)
1724 if (pt[i] != B_PAGEPLUS)
1727 size = (i - pagenum) * PAGESIZE;
1737 BlkInfo getInfo(void* p)
1745 size_t offset = cast(size_t)(p - pool.baseAddr);
1746 size_t pn = offset / PAGESIZE;
1747 Bins bin = cast(Bins)pool.pagetable[pn];
1749 ////////////////////////////////////////////////////////////////////
1751 ////////////////////////////////////////////////////////////////////
1755 info.base = pool.baseAddr + (offset & notbinsize[bin]);
1757 else if (bin == B_PAGEPLUS)
1760 { --pn, offset -= PAGESIZE;
1761 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1763 info.base = pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1765 // fix bin for use by size calc below
1766 bin = cast(Bins)pool.pagetable[pn];
1769 ////////////////////////////////////////////////////////////////////
1771 ////////////////////////////////////////////////////////////////////
1773 info.size = binsize[bin];
1775 { size_t npages = pool.ncommitted;
1779 pt = &pool.pagetable[0];
1780 for (i = pn + 1; i < npages; i++)
1782 if (pt[i] != B_PAGEPLUS)
1785 info.size = (i - pn) * PAGESIZE;
1788 ////////////////////////////////////////////////////////////////////
1790 ////////////////////////////////////////////////////////////////////
1792 info.attr = getBits(pool, cast(size_t)(offset / 16));
1799 * Compute bin for size.
1801 static Bins findBin(size_t size)
1810 else if (size <= 32)
1845 * Allocate a new pool of at least size bytes.
1846 * Sort it into pooltable[].
1847 * Mark all memory in the pool as B_FREE.
1848 * Return the actual number of bytes reserved or 0 on error.
1850 size_t reserve(size_t size)
1852 size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
1853 Pool* pool = newPool(npages);
1855 if (!pool || pool.extendPages(npages) == OPFAIL)
1857 return pool.ncommitted * PAGESIZE;
1862 * Minimizes physical memory usage by returning free pools to the OS.
1871 for (n = 0; n < npools; n++)
1873 pool = pooltable[n];
1874 ncommitted = pool.ncommitted;
1875 for (pn = 0; pn < ncommitted; pn++)
1877 if (cast(Bins)pool.pagetable[pn] != B_FREE)
1880 if (pn < ncommitted)
1887 cstring.memmove(pooltable + n,
1889 (--npools - n) * (Pool*).sizeof);
1890 minAddr = pooltable[0].baseAddr;
1891 maxAddr = pooltable[npools - 1].topAddr;
1897 * Allocate a chunk of memory that is larger than a page.
1898 * Return null if out of memory.
1900 void *bigAlloc(size_t size)
1910 npages = (size + PAGESIZE - 1) / PAGESIZE;
1914 // This code could use some refinement when repeatedly
1915 // allocating very large arrays.
1917 for (n = 0; n < npools; n++)
1919 pool = pooltable[n];
1920 pn = pool.allocPages(npages);
1934 freedpages = fullcollectshell();
1935 if (freedpages >= npools * ((POOLSIZE / PAGESIZE) / 4))
1939 // Release empty pools to prevent bloat
1941 // Allocate new pool
1942 pool = newPool(npages);
1947 pn = pool.allocPages(npages);
1948 assert(pn != OPFAIL);
1951 // Release empty pools to prevent bloat
1953 // Allocate new pool
1954 pool = newPool(npages);
1957 pn = pool.allocPages(npages);
1958 assert(pn != OPFAIL);
1968 pool.pagetable[pn] = B_PAGE;
1970 cstring.memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
1971 p = pool.baseAddr + pn * PAGESIZE;
1972 cstring.memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
1973 debug (MEMSTOMP) cstring.memset(p, 0xF1, size);
1974 //debug(PRINTF) printf("\tp = %x\n", p);
1978 return null; // let mallocNoSync handle the error
1983 * Allocate a new pool with at least npages in it.
1984 * Sort it into pooltable[].
1985 * Return null if failed.
1987 Pool *newPool(size_t npages)
1990 Pool** newpooltable;
1994 //debug(PRINTF) printf("************Gcx::newPool(npages = %d)****************\n", npages);
1996 // Round up to COMMITSIZE pages
1997 npages = (npages + (COMMITSIZE/PAGESIZE) - 1) & ~(COMMITSIZE/PAGESIZE - 1);
1999 // Minimum of POOLSIZE
2000 if (npages < POOLSIZE/PAGESIZE)
2001 npages = POOLSIZE/PAGESIZE;
2002 else if (npages > POOLSIZE/PAGESIZE)
2003 { // Give us 150% of requested size, so there's room to extend
2004 auto n = npages + (npages >> 1);
2005 if (n < size_t.max/PAGESIZE)
2009 // Allocate successively larger pools up to 8 megs
2015 n = 8; // cap pool size at 8 megs
2016 n *= (POOLSIZE / PAGESIZE);
2021 pool = cast(Pool *)cstdlib.calloc(1, Pool.sizeof);
2024 pool.initialize(npages);
2028 newnpools = npools + 1;
2029 newpooltable = cast(Pool **)cstdlib.realloc(pooltable, newnpools * (Pool *).sizeof);
2033 // Sort pool into newpooltable[]
2034 for (i = 0; i < npools; i++)
2036 if (pool.opCmp(newpooltable[i]) < 0)
2039 cstring.memmove(newpooltable + i + 1, newpooltable + i, (npools - i) * (Pool *).sizeof);
2040 newpooltable[i] = pool;
2042 pooltable = newpooltable;
2045 minAddr = pooltable[0].baseAddr;
2046 maxAddr = pooltable[npools - 1].topAddr;
2058 * Allocate a page of bin's.
2062 int allocPage(Bins bin)
2070 //debug(PRINTF) printf("Gcx::allocPage(bin = %d)\n", bin);
2071 for (n = 0; n < npools; n++)
2073 pool = pooltable[n];
2074 pn = pool.allocPages(1);
2081 pool.pagetable[pn] = cast(ubyte)bin;
2083 // Convert page to free list
2084 size_t size = binsize[bin];
2085 List **b = &bucket[bin];
2087 p = pool.baseAddr + pn * PAGESIZE;
2088 ptop = p + PAGESIZE;
2089 for (; p < ptop; p += size)
2091 (cast(List *)p).next = *b;
2099 * Search a range of memory values and mark any pointers into the GC pool.
2101 void mark(void *pbot, void *ptop)
2103 void **p1 = cast(void **)pbot;
2104 void **p2 = cast(void **)ptop;
2108 //printf("marking range: %p -> %p\n", pbot, ptop);
2109 for (; p1 < p2; p1++)
2112 byte *p = cast(byte *)(*p1);
2114 //if (log) debug(PRINTF) printf("\tmark %x\n", p);
2115 if (p >= minAddr && p < maxAddr)
2117 if ((cast(size_t)p & ~(PAGESIZE-1)) == pcache)
2123 size_t offset = cast(size_t)(p - pool.baseAddr);
2125 size_t pn = offset / PAGESIZE;
2126 Bins bin = cast(Bins)pool.pagetable[pn];
2128 //debug(PRINTF) printf("\t\tfound pool %x, base=%x, pn = %d, bin = %d, biti = x%x\n", pool, pool.baseAddr, pn, bin, biti);
2130 // Adjust bit to be at start of allocated memory block
2133 biti = (offset & notbinsize[bin]) >> 4;
2134 //debug(PRINTF) printf("\t\tbiti = x%x\n", biti);
2136 else if (bin == B_PAGEPLUS)
2140 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
2141 biti = pn * (PAGESIZE / 16);
2145 // Don't mark bits in B_FREE or B_UNCOMMITTED pages
2149 if (bin >= B_PAGE) // Cache B_PAGE and B_PAGEPLUS lookups
2150 pcache = cast(size_t)p & ~(PAGESIZE-1);
2152 //debug(PRINTF) printf("\t\tmark(x%x) = %d\n", biti, pool.mark.test(biti));
2153 if (!pool.mark.test(biti))
2155 //if (log) debug(PRINTF) printf("\t\tmarking %x\n", p);
2156 pool.mark.set(biti);
2157 if (!pool.noscan.test(biti))
2159 pool.scan.set(biti);
2162 log_parent(sentinel_add(pool.baseAddr + biti * 16), sentinel_add(pbot));
2167 anychanges |= changes;
2172 * Return number of full pages free'd.
2174 size_t fullcollectshell()
2176 // The purpose of the 'shell' is to ensure all the registers
2177 // get put on the stack so they'll be scanned
2182 __builtin_unwind_init();
2189 uint eax,ecx,edx,ebx,ebp,esi,edi;
2202 else version (X86_64)
2204 ulong rax,rbx,rcx,rdx,rbp,rsi,rdi,rsp,r8,r9,r10,r11,r12,r13,r14,r15;
2207 movq rax[RBP], RAX ;
2208 movq rbx[RBP], RBX ;
2209 movq rcx[RBP], RCX ;
2210 movq rdx[RBP], RDX ;
2211 movq rbp[RBP], RBP ;
2212 movq rsi[RBP], RSI ;
2213 movq rdi[RBP], RDI ;
2214 movq rsp[RBP], RSP ;
2217 movq r10[RBP], R10 ;
2218 movq r11[RBP], R11 ;
2219 movq r12[RBP], R12 ;
2220 movq r13[RBP], R13 ;
2221 movq r14[RBP], R14 ;
2222 movq r15[RBP], R15 ;
2227 static assert( false, "Architecture not supported." );
2238 result = fullcollect(sp);
2261 size_t fullcollect(void *stackTop)
2266 debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n");
2268 thread_suspendAll();
2274 for (n = 0; n < npools; n++)
2276 pool = pooltable[n];
2279 pool.freebits.zero();
2282 // Mark each free entry, so it doesn't get scanned
2283 for (n = 0; n < B_PAGE; n++)
2285 for (List *list = bucket[n]; list; list = list.next)
2287 pool = findPool(list);
2289 pool.freebits.set(cast(size_t)(cast(byte*)list - pool.baseAddr) / 16);
2293 for (n = 0; n < npools; n++)
2295 pool = pooltable[n];
2296 pool.mark.copy(&pool.freebits);
2299 rt_scanStaticData( &mark );
2301 version (MULTI_THREADED)
2305 // Scan stacks and registers for each paused thread
2306 thread_scanAll( &mark, stackTop );
2313 // Scan stack for main thread
2314 debug(PRINTF) printf(" scan stack bot = %x, top = %x\n", stackTop, stackBottom);
2315 version (STACKGROWSDOWN)
2316 mark(stackTop, stackBottom);
2318 mark(stackBottom, stackTop);
2323 debug(COLLECT_PRINTF) printf("scan roots[]\n");
2324 mark(roots, roots + nroots);
2327 debug(COLLECT_PRINTF) printf("scan ranges[]\n");
2329 for (n = 0; n < nranges; n++)
2331 debug(COLLECT_PRINTF) printf("\t%x .. %x\n", ranges[n].pbot, ranges[n].ptop);
2332 mark(ranges[n].pbot, ranges[n].ptop);
2336 debug(COLLECT_PRINTF) printf("\tscan heap\n");
2340 for (n = 0; n < npools; n++)
2346 pool = pooltable[n];
2348 bbase = pool.scan.base();
2349 btop = bbase + pool.scan.nwords;
2350 for (b = bbase; b < btop;)
2364 o = pool.baseAddr + (b - bbase) * 32 * 16;
2365 if (!(bitm & 0xFFFF))
2370 for (; bitm; o += 16, bitm >>= 1)
2375 pn = cast(size_t)(o - pool.baseAddr) / PAGESIZE;
2376 bin = cast(Bins)pool.pagetable[pn];
2379 mark(o, o + binsize[bin]);
2381 else if (bin == B_PAGE || bin == B_PAGEPLUS)
2383 if (bin == B_PAGEPLUS)
2385 while (pool.pagetable[pn - 1] != B_PAGE)
2389 while (pn + u < pool.ncommitted && pool.pagetable[pn + u] == B_PAGEPLUS)
2391 mark(o, o + u * PAGESIZE);
2400 // Free up everything not marked
2401 debug(COLLECT_PRINTF) printf("\tfree'ing\n");
2402 size_t freedpages = 0;
2404 for (n = 0; n < npools; n++)
2409 pool = pooltable[n];
2410 bbase = pool.mark.base();
2411 ncommitted = pool.ncommitted;
2412 for (pn = 0; pn < ncommitted; pn++, bbase += PAGESIZE / (32 * 16))
2414 Bins bin = cast(Bins)pool.pagetable[pn];
2421 auto size = binsize[bin];
2423 p = pool.baseAddr + pn * PAGESIZE;
2424 ptop = p + PAGESIZE;
2425 biti = pn * (PAGESIZE/16);
2426 bitstride = size / 16;
2428 version(none) // BUG: doesn't work because freebits() must also be cleared
2430 // If free'd entire page
2431 if (bbase[0] == 0 && bbase[1] == 0 && bbase[2] == 0 && bbase[3] == 0 &&
2432 bbase[4] == 0 && bbase[5] == 0 && bbase[6] == 0 && bbase[7] == 0)
2434 for (; p < ptop; p += size, biti += bitstride)
2436 if (pool.finals.nbits && pool.finals.testClear(biti))
2437 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2438 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
2440 List *list = cast(List *)p;
2441 //debug(PRINTF) printf("\tcollecting %x\n", list);
2442 log_free(sentinel_add(list));
2444 debug (MEMSTOMP) cstring.memset(p, 0xF3, size);
2446 pool.pagetable[pn] = B_FREE;
2448 //debug(PRINTF) printf("freeing entire page %d\n", pn);
2452 for (; p < ptop; p += size, biti += bitstride)
2454 if (!pool.mark.test(biti))
2456 sentinel_Invariant(sentinel_add(p));
2458 pool.freebits.set(biti);
2459 if (pool.finals.nbits && pool.finals.testClear(biti))
2460 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2461 clrBits(pool, biti, BlkAttr.ALL_BITS);
2463 List *list = cast(List *)p;
2464 debug(PRINTF) printf("\tcollecting %x\n", list);
2465 log_free(sentinel_add(list));
2467 debug (MEMSTOMP) cstring.memset(p, 0xF3, size);
2473 else if (bin == B_PAGE)
2474 { size_t biti = pn * (PAGESIZE / 16);
2476 if (!pool.mark.test(biti))
2477 { byte *p = pool.baseAddr + pn * PAGESIZE;
2479 sentinel_Invariant(sentinel_add(p));
2480 if (pool.finals.nbits && pool.finals.testClear(biti))
2481 rt_finalize(sentinel_add(p), false/*noStack > 0*/);
2482 clrBits(pool, biti, BlkAttr.ALL_BITS);
2484 debug(COLLECT_PRINTF) printf("\tcollecting big %x\n", p);
2485 log_free(sentinel_add(p));
2486 pool.pagetable[pn] = B_FREE;
2488 debug (MEMSTOMP) cstring.memset(p, 0xF3, PAGESIZE);
2489 while (pn + 1 < ncommitted && pool.pagetable[pn + 1] == B_PAGEPLUS)
2492 pool.pagetable[pn] = B_FREE;
2497 cstring.memset(p, 0xF3, PAGESIZE);
2508 // Free complete pages, rebuild free list
2509 debug(COLLECT_PRINTF) printf("\tfree complete pages\n");
2510 size_t recoveredpages = 0;
2511 for (n = 0; n < npools; n++)
2515 pool = pooltable[n];
2516 ncommitted = pool.ncommitted;
2517 for (pn = 0; pn < ncommitted; pn++)
2519 Bins bin = cast(Bins)pool.pagetable[pn];
2525 size_t size = binsize[bin];
2526 size_t bitstride = size / 16;
2527 size_t bitbase = pn * (PAGESIZE / 16);
2528 size_t bittop = bitbase + (PAGESIZE / 16);
2532 for (biti = bitbase; biti < bittop; biti += bitstride)
2533 { if (!pool.freebits.test(biti))
2536 pool.pagetable[pn] = B_FREE;
2541 p = pool.baseAddr + pn * PAGESIZE;
2542 for (u = 0; u < PAGESIZE; u += size)
2543 { biti = bitbase + u / 16;
2544 if (pool.freebits.test(biti))
2547 list = cast(List *)(p + u);
2548 if (list.next != bucket[bin]) // avoid unnecessary writes
2549 list.next = bucket[bin];
2557 debug(COLLECT_PRINTF) printf("recovered pages = %d\n", recoveredpages);
2558 debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedpages, npools);
2560 return freedpages + recoveredpages;
2567 uint getBits(Pool* pool, size_t biti)
2576 if (pool.finals.nbits &&
2577 pool.finals.test(biti))
2578 bits |= BlkAttr.FINALIZE;
2579 if (pool.noscan.test(biti))
2580 bits |= BlkAttr.NO_SCAN;
2581 // if (pool.nomove.nbits &&
2582 // pool.nomove.test(biti))
2583 // bits |= BlkAttr.NO_MOVE;
2591 void setBits(Pool* pool, size_t biti, uint mask)
2598 if (mask & BlkAttr.FINALIZE)
2600 if (!pool.finals.nbits)
2601 pool.finals.alloc(pool.mark.nbits);
2602 pool.finals.set(biti);
2604 if (mask & BlkAttr.NO_SCAN)
2606 pool.noscan.set(biti);
2608 // if (mask & BlkAttr.NO_MOVE)
2610 // if (!pool.nomove.nbits)
2611 // pool.nomove.alloc(pool.mark.nbits);
2612 // pool.nomove.set(biti);
2620 void clrBits(Pool* pool, size_t biti, uint mask)
2627 if (mask & BlkAttr.FINALIZE && pool.finals.nbits)
2628 pool.finals.clear(biti);
2629 if (mask & BlkAttr.NO_SCAN)
2630 pool.noscan.clear(biti);
2631 // if (mask & BlkAttr.NO_MOVE && pool.nomove.nbits)
2632 // pool.nomove.clear(biti);
2636 /***** Leak Detector ******/
2647 //debug(PRINTF) printf("+log_init()\n");
2648 current.reserve(1000);
2650 //debug(PRINTF) printf("-log_init()\n");
2654 void log_malloc(void *p, size_t size)
2656 //debug(PRINTF) printf("+log_malloc(p = %x, size = %d)\n", p, size);
2669 //debug(PRINTF) printf("-log_malloc()\n");
2673 void log_free(void *p)
2675 //debug(PRINTF) printf("+log_free(%x)\n", p);
2678 i = current.find(p);
2681 debug(PRINTF) printf("free'ing unallocated memory %x\n", p);
2685 //debug(PRINTF) printf("-log_free()\n");
2691 //debug(PRINTF) printf("+log_collect()\n");
2692 // Print everything in current that is not in prev
2694 debug(PRINTF) printf("New pointers this cycle: --------------------------------\n");
2696 for (size_t i = 0; i < current.dim; i++)
2700 j = prev.find(current.data[i].p);
2702 current.data[i].print();
2707 debug(PRINTF) printf("All roots this cycle: --------------------------------\n");
2708 for (size_t i = 0; i < current.dim; i++)
2713 p = current.data[i].p;
2714 if (!findPool(current.data[i].parent))
2716 j = prev.find(current.data[i].p);
2718 debug(PRINTF) printf("N");
2720 debug(PRINTF) printf(" ");;
2721 current.data[i].print();
2725 debug(PRINTF) printf("Used = %d-------------------------------------------------\n", used);
2726 prev.copy(¤t);
2728 debug(PRINTF) printf("-log_collect()\n");
2732 void log_parent(void *p, void *parent)
2734 //debug(PRINTF) printf("+log_parent()\n");
2737 i = current.find(p);
2740 debug(PRINTF) printf("parent'ing unallocated memory %x, parent = %x\n", p, parent);
2744 size_t offset = cast(size_t)(p - pool.baseAddr);
2746 size_t pn = offset / PAGESIZE;
2747 Bins bin = cast(Bins)pool.pagetable[pn];
2748 biti = (offset & notbinsize[bin]);
2749 debug(PRINTF) printf("\tbin = %d, offset = x%x, biti = x%x\n", bin, offset, biti);
2753 current.data[i].parent = parent;
2755 //debug(PRINTF) printf("-log_parent()\n");
2762 void log_malloc(void *p, size_t size) { }
2763 void log_free(void *p) { }
2764 void log_collect() { }
2765 void log_parent(void *p, void *parent) { }
2770 /* ============================ Pool =============================== */
2777 GCBits mark; // entries already scanned, or should not be scanned
2778 GCBits scan; // entries that need to be scanned
2779 GCBits freebits; // entries that are on the free list
2780 GCBits finals; // entries that need finalizer run on them
2781 GCBits noscan; // entries that should not be scanned
2784 size_t ncommitted; // ncommitted <= npages
2788 void initialize(size_t npages)
2792 //debug(PRINTF) printf("Pool::Pool(%u)\n", npages);
2793 poolsize = npages * PAGESIZE;
2794 assert(poolsize >= POOLSIZE);
2795 baseAddr = cast(byte *)os_mem_map(poolsize);
2797 // Some of the code depends on page alignment of memory pools
2798 assert((cast(size_t)baseAddr & (PAGESIZE - 1)) == 0);
2802 //debug(PRINTF) printf("GC fail: poolsize = x%x, errno = %d\n", poolsize, errno);
2803 //debug(PRINTF) printf("message = '%s'\n", sys_errlist[errno]);
2809 topAddr = baseAddr + poolsize;
2811 mark.alloc(cast(size_t)poolsize / 16);
2812 scan.alloc(cast(size_t)poolsize / 16);
2813 freebits.alloc(cast(size_t)poolsize / 16);
2814 noscan.alloc(cast(size_t)poolsize / 16);
2816 pagetable = cast(ubyte*)cstdlib.malloc(npages);
2818 onOutOfMemoryError();
2819 cstring.memset(pagetable, B_UNCOMMITTED, npages);
2821 this.npages = npages;
2834 result = os_mem_decommit(baseAddr, 0, ncommitted * PAGESIZE);
2835 assert(result == 0);
2841 result = os_mem_unmap(baseAddr, npages * PAGESIZE);
2842 assert(result == 0);
2850 cstdlib.free(pagetable);
2860 void Invariant() { }
2867 //freebits.Invariant();
2868 //finals.Invariant();
2869 //noscan.Invariant();
2873 //if (baseAddr + npages * PAGESIZE != topAddr)
2874 //printf("baseAddr = %p, npages = %d, topAddr = %p\n", baseAddr, npages, topAddr);
2875 assert(baseAddr + npages * PAGESIZE == topAddr);
2876 assert(ncommitted <= npages);
2879 for (size_t i = 0; i < npages; i++)
2880 { Bins bin = cast(Bins)pagetable[i];
2882 assert(bin < B_MAX);
2888 * Allocate n pages from Pool.
2889 * Returns OPFAIL on failure.
2891 size_t allocPages(size_t n)
2896 //debug(PRINTF) printf("Pool::allocPages(n = %d)\n", n);
2898 for (i = 0; i < ncommitted; i++)
2900 if (pagetable[i] == B_FREE)
2903 { //debug(PRINTF) printf("\texisting pn = %d\n", i - n + 1);
2910 return extendPages(n);
2914 * Extend Pool by n pages.
2915 * Returns OPFAIL on failure.
2917 size_t extendPages(size_t n)
2919 //debug(PRINTF) printf("Pool::extendPages(n = %d)\n", n);
2920 if (ncommitted + n <= npages)
2924 tocommit = (n + (COMMITSIZE/PAGESIZE) - 1) & ~(COMMITSIZE/PAGESIZE - 1);
2925 if (ncommitted + tocommit > npages)
2926 tocommit = npages - ncommitted;
2927 //debug(PRINTF) printf("\tlooking to commit %d more pages\n", tocommit);
2929 if (os_mem_commit(baseAddr, ncommitted * PAGESIZE, tocommit * PAGESIZE) == 0)
2931 cstring.memset(pagetable + ncommitted, B_FREE, tocommit);
2932 auto i = ncommitted;
2933 ncommitted += tocommit;
2935 while (i && pagetable[i - 1] == B_FREE)
2940 //debug(PRINTF) printf("\tfailed to commit %d pages\n", tocommit);
2948 * Free npages pages starting with pagenum.
2950 void freePages(size_t pagenum, size_t npages)
2952 cstring.memset(&pagetable[pagenum], B_FREE, npages);
2957 * Used for sorting pooltable[]
2961 if (baseAddr < p2.baseAddr)
2964 return cast(int)(baseAddr > p2.baseAddr);
2969 /* ============================ SENTINEL =============================== */
2974 const size_t SENTINEL_PRE = cast(size_t) 0xF4F4F4F4F4F4F4F4UL; // 32 or 64 bits
2975 const ubyte SENTINEL_POST = 0xF5; // 8 bits
2976 const uint SENTINEL_EXTRA = 2 * size_t.sizeof + 1;
2979 size_t* sentinel_size(void *p) { return &(cast(size_t *)p)[-2]; }
2980 size_t* sentinel_pre(void *p) { return &(cast(size_t *)p)[-1]; }
2981 ubyte* sentinel_post(void *p) { return &(cast(ubyte *)p)[*sentinel_size(p)]; }
2984 void sentinel_init(void *p, size_t size)
2986 *sentinel_size(p) = size;
2987 *sentinel_pre(p) = SENTINEL_PRE;
2988 *sentinel_post(p) = SENTINEL_POST;
2992 void sentinel_Invariant(void *p)
2994 assert(*sentinel_pre(p) == SENTINEL_PRE);
2995 assert(*sentinel_post(p) == SENTINEL_POST);
2999 void *sentinel_add(void *p)
3001 return p + 2 * size_t.sizeof;
3005 void *sentinel_sub(void *p)
3007 return p - 2 * size_t.sizeof;
3012 const uint SENTINEL_EXTRA = 0;
3015 void sentinel_init(void *p, size_t size)
3020 void sentinel_Invariant(void *p)
3025 void *sentinel_add(void *p)
3031 void *sentinel_sub(void *p)