]> git.llucax.com Git - software/dgc/cdgc.git/blob - gc/gc.d
Remove valloc() allocation method
[software/dgc/cdgc.git] / gc / gc.d
1 /**
2  * This module contains the garbage collector implementation.
3  *
4  * Copyright: Copyright (C) 2001-2007 Digital Mars, www.digitalmars.com.
5  *            All rights reserved.
6  * License:
7  *  This software is provided 'as-is', without any express or implied
8  *  warranty. In no event will the authors be held liable for any damages
9  *  arising from the use of this software.
10  *
11  *  Permission is granted to anyone to use this software for any purpose,
12  *  including commercial applications, and to alter it and redistribute it
13  *  freely, in both source and binary form, subject to the following
14  *  restrictions:
15  *
16  *  o  The origin of this software must not be misrepresented; you must not
17  *     claim that you wrote the original software. If you use this software
18  *     in a product, an acknowledgment in the product documentation would be
19  *     appreciated but is not required.
20  *  o  Altered source versions must be plainly marked as such, and must not
21  *     be misrepresented as being the original software.
22  *  o  This notice may not be removed or altered from any source
23  *     distribution.
24  * Authors:   Walter Bright, David Friedman, Sean Kelly
25  */
26
27 module gc.gc;
28
29 // D Programming Language Garbage Collector implementation
30
31 /************** Debugging ***************************/
32
33 //debug = PRINTF;               // turn on printf's
34 //debug = COLLECT_PRINTF;       // turn on printf's
35 //debug = LOGGING;              // log allocations / frees
36 //debug = MEMSTOMP;             // stomp on memory
37 //debug = SENTINEL;             // add underrun/overrrun protection
38 //debug = PTRCHECK;             // more pointer checking
39 //debug = PTRCHECK2;            // thorough but slow pointer checking
40
41 /*************** Configuration *********************/
42
43 version = STACKGROWSDOWN;       // growing the stack means subtracting from the stack pointer
44                                 // (use for Intel X86 CPUs)
45                                 // else growing the stack means adding to the stack pointer
46 version = MULTI_THREADED;       // produce multithreaded version
47
48 /***************************************************/
49
50 import gc.bits;
51 import gc.stats;
52 import gc.alloc;
53 import gc.libc;
54
55
56 version (GNU)
57 {
58     // BUG: The following import will likely not work, since the gcc
59     //      subdirectory is elsewhere.  Instead, perhaps the functions
60     //      could be declared directly or some other resolution could
61     //      be found.
62     import gcc.builtins; // for __builtin_unwind_init
63 }
64
65
66 struct BlkInfo
67 {
68     void*  base;
69     size_t size;
70     uint   attr;
71 }
72
73 private
74 {
75     enum BlkAttr : uint
76     {
77         FINALIZE = 0b0000_0001,
78         NO_SCAN  = 0b0000_0010,
79         NO_MOVE  = 0b0000_0100,
80         ALL_BITS = 0b1111_1111
81     }
82
83     extern (C) void* rt_stackBottom();
84     extern (C) void* rt_stackTop();
85
86     extern (C) void rt_finalize( void* p, bool det = true );
87
88     alias void delegate( void*, void* ) scanFn;
89
90     extern (C) void rt_scanStaticData( scanFn scan );
91
92     version (MULTI_THREADED)
93     {
94         extern (C) bool thread_needLock();
95         extern (C) void thread_suspendAll();
96         extern (C) void thread_resumeAll();
97
98         extern (C) void thread_scanAll( scanFn fn, void* curStackTop = null );
99     }
100
101     extern (C) void onOutOfMemoryError();
102
103     enum
104     {
105         OPFAIL = ~cast(size_t)0
106     }
107 }
108
109
110 alias GC gc_t;
111
112
113 /* ======================= Leak Detector =========================== */
114
115
116 debug (LOGGING)
117 {
118     struct Log
119     {
120         void*  p;
121         size_t size;
122         size_t line;
123         char*  file;
124         void*  parent;
125
126         void print()
127         {
128             printf("    p = %x, size = %d, parent = %x ", p, size, parent);
129             if (file)
130             {
131                 printf("%s(%u)", file, line);
132             }
133             printf("\n");
134         }
135     }
136
137
138     struct LogArray
139     {
140         size_t dim;
141         size_t allocdim;
142         Log *data;
143
144         void Dtor()
145         {
146             if (data)
147                 .free(data);
148             data = null;
149         }
150
151         void reserve(size_t nentries)
152         {
153             assert(dim <= allocdim);
154             if (allocdim - dim < nentries)
155             {
156                 allocdim = (dim + nentries) * 2;
157                 assert(dim + nentries <= allocdim);
158                 if (!data)
159                 {
160                     data = cast(Log*) .malloc(allocdim * Log.sizeof);
161                     if (!data && allocdim)
162                         onOutOfMemoryError();
163                 }
164                 else
165                 {   Log *newdata;
166
167                     newdata = cast(Log*) .malloc(allocdim * Log.sizeof);
168                     if (!newdata && allocdim)
169                         onOutOfMemoryError();
170                     .memcpy(newdata, data, dim * Log.sizeof);
171                     .free(data);
172                     data = newdata;
173                 }
174             }
175         }
176
177
178         void push(Log log)
179         {
180             reserve(1);
181             data[dim++] = log;
182         }
183
184         void remove(size_t i)
185         {
186             .memmove(data + i, data + i + 1, (dim - i) * Log.sizeof);
187             dim--;
188         }
189
190
191         size_t find(void *p)
192         {
193             for (size_t i = 0; i < dim; i++)
194             {
195                 if (data[i].p == p)
196                     return i;
197             }
198             return OPFAIL; // not found
199         }
200
201
202         void copy(LogArray *from)
203         {
204             reserve(from.dim - dim);
205             assert(from.dim <= allocdim);
206             .memcpy(data, from.data, from.dim * Log.sizeof);
207             dim = from.dim;
208         }
209     }
210 }
211
212
213 /* ============================ GC =============================== */
214
215
216 class GCLock { }                // just a dummy so we can get a global lock
217
218
219 const uint GCVERSION = 1;       // increment every time we change interface
220                                 // to GC.
221
222 class GC
223 {
224     // For passing to debug code
225     static size_t line;
226     static char*  file;
227
228     uint gcversion = GCVERSION;
229
230     Gcx *gcx;                   // implementation
231     static ClassInfo gcLock;    // global lock
232
233
234     void initialize()
235     {
236         gcLock = GCLock.classinfo;
237         gcx = cast(Gcx*) .calloc(1, Gcx.sizeof);
238         if (!gcx)
239             onOutOfMemoryError();
240         gcx.initialize();
241         setStackBottom(rt_stackBottom());
242     }
243
244
245     void Dtor()
246     {
247         if (gcx)
248         {
249             gcx.Dtor();
250             .free(gcx);
251             gcx = null;
252         }
253     }
254
255
256     /**
257      *
258      */
259     void enable()
260     {
261         if (!thread_needLock())
262         {
263             assert(gcx.disabled > 0);
264             gcx.disabled--;
265         }
266         else synchronized (gcLock)
267         {
268             assert(gcx.disabled > 0);
269             gcx.disabled--;
270         }
271     }
272
273
274     /**
275      *
276      */
277     void disable()
278     {
279         if (!thread_needLock())
280         {
281             gcx.disabled++;
282         }
283         else synchronized (gcLock)
284         {
285             gcx.disabled++;
286         }
287     }
288
289
290     /**
291      *
292      */
293     uint getAttr(void* p)
294     {
295         if (!p)
296         {
297             return 0;
298         }
299
300         uint go()
301         {
302             Pool* pool = gcx.findPool(p);
303             uint  oldb = 0;
304
305             if (pool)
306             {
307                 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
308
309                 oldb = gcx.getBits(pool, biti);
310             }
311             return oldb;
312         }
313
314         if (!thread_needLock())
315         {
316             return go();
317         }
318         else synchronized (gcLock)
319         {
320             return go();
321         }
322     }
323
324
325     /**
326      *
327      */
328     uint setAttr(void* p, uint mask)
329     {
330         if (!p)
331         {
332             return 0;
333         }
334
335         uint go()
336         {
337             Pool* pool = gcx.findPool(p);
338             uint  oldb = 0;
339
340             if (pool)
341             {
342                 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
343
344                 oldb = gcx.getBits(pool, biti);
345                 gcx.setBits(pool, biti, mask);
346             }
347             return oldb;
348         }
349
350         if (!thread_needLock())
351         {
352             return go();
353         }
354         else synchronized (gcLock)
355         {
356             return go();
357         }
358     }
359
360
361     /**
362      *
363      */
364     uint clrAttr(void* p, uint mask)
365     {
366         if (!p)
367         {
368             return 0;
369         }
370
371         uint go()
372         {
373             Pool* pool = gcx.findPool(p);
374             uint  oldb = 0;
375
376             if (pool)
377             {
378                 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
379
380                 oldb = gcx.getBits(pool, biti);
381                 gcx.clrBits(pool, biti, mask);
382             }
383             return oldb;
384         }
385
386         if (!thread_needLock())
387         {
388             return go();
389         }
390         else synchronized (gcLock)
391         {
392             return go();
393         }
394     }
395
396
397     /**
398      *
399      */
400     void *malloc(size_t size, uint bits = 0)
401     {
402         if (!size)
403         {
404             return null;
405         }
406
407         if (!thread_needLock())
408         {
409             return mallocNoSync(size, bits);
410         }
411         else synchronized (gcLock)
412         {
413             return mallocNoSync(size, bits);
414         }
415     }
416
417
418     //
419     //
420     //
421     private void *mallocNoSync(size_t size, uint bits = 0)
422     {
423         assert(size != 0);
424
425         void *p = null;
426         Bins bin;
427
428         //debug(PRINTF) printf("GC::malloc(size = %d, gcx = %p)\n", size, gcx);
429         assert(gcx);
430
431         size += SENTINEL_EXTRA;
432
433         // Compute size bin
434         // Cache previous binsize lookup - Dave Fladebo.
435         static size_t lastsize = -1;
436         static Bins lastbin;
437         if (size == lastsize)
438             bin = lastbin;
439         else
440         {
441             bin = gcx.findBin(size);
442             lastsize = size;
443             lastbin = bin;
444         }
445
446         if (bin < B_PAGE)
447         {
448             p = gcx.bucket[bin];
449             if (p is null)
450             {
451                 if (!gcx.allocPage(bin) && !gcx.disabled)   // try to find a new page
452                 {
453                     if (!thread_needLock())
454                     {
455                         /* Then we haven't locked it yet. Be sure
456                          * and lock for a collection, since a finalizer
457                          * may start a new thread.
458                          */
459                         synchronized (gcLock)
460                         {
461                             gcx.fullcollectshell();
462                         }
463                     }
464                     else if (!gcx.fullcollectshell())       // collect to find a new page
465                     {
466                         //gcx.newPool(1);
467                     }
468                 }
469                 if (!gcx.bucket[bin] && !gcx.allocPage(bin))
470                 {   int result;
471
472                     gcx.newPool(1);         // allocate new pool to find a new page
473                     result = gcx.allocPage(bin);
474                     if (!result)
475                         onOutOfMemoryError();
476                 }
477                 p = gcx.bucket[bin];
478             }
479
480             // Return next item from free list
481             gcx.bucket[bin] = (cast(List*)p).next;
482             if( !(bits & BlkAttr.NO_SCAN) )
483                 .memset(p + size, 0, binsize[bin] - size);
484             //debug(PRINTF) printf("\tmalloc => %x\n", p);
485             debug (MEMSTOMP) .memset(p, 0xF0, size);
486         }
487         else
488         {
489             p = gcx.bigAlloc(size);
490             if (!p)
491                 onOutOfMemoryError();
492         }
493         size -= SENTINEL_EXTRA;
494         p = sentinel_add(p);
495         sentinel_init(p, size);
496         gcx.log_malloc(p, size);
497
498         if (bits)
499         {
500             Pool *pool = gcx.findPool(p);
501             assert(pool);
502
503             gcx.setBits(pool, cast(size_t)(p - pool.baseAddr) / 16, bits);
504         }
505         return p;
506     }
507
508
509     /**
510      *
511      */
512     void *calloc(size_t size, uint bits = 0)
513     {
514         if (!size)
515         {
516             return null;
517         }
518
519         if (!thread_needLock())
520         {
521             return callocNoSync(size, bits);
522         }
523         else synchronized (gcLock)
524         {
525             return callocNoSync(size, bits);
526         }
527     }
528
529
530     //
531     //
532     //
533     private void *callocNoSync(size_t size, uint bits = 0)
534     {
535         assert(size != 0);
536
537         //debug(PRINTF) printf("calloc: %x len %d\n", p, len);
538         void *p = mallocNoSync(size, bits);
539         .memset(p, 0, size);
540         return p;
541     }
542
543
544     /**
545      *
546      */
547     void *realloc(void *p, size_t size, uint bits = 0)
548     {
549         if (!thread_needLock())
550         {
551             return reallocNoSync(p, size, bits);
552         }
553         else synchronized (gcLock)
554         {
555             return reallocNoSync(p, size, bits);
556         }
557     }
558
559
560     //
561     //
562     //
563     private void *reallocNoSync(void *p, size_t size, uint bits = 0)
564     {
565         if (!size)
566         {   if (p)
567             {   freeNoSync(p);
568                 p = null;
569             }
570         }
571         else if (!p)
572         {
573             p = mallocNoSync(size, bits);
574         }
575         else
576         {   void *p2;
577             size_t psize;
578
579             //debug(PRINTF) printf("GC::realloc(p = %x, size = %u)\n", p, size);
580             version (SENTINEL)
581             {
582                 sentinel_Invariant(p);
583                 psize = *sentinel_size(p);
584                 if (psize != size)
585                 {
586                     if (psize)
587                     {
588                         Pool *pool = gcx.findPool(p);
589
590                         if (pool)
591                         {
592                             auto biti = cast(size_t)(p - pool.baseAddr) / 16;
593
594                             if (bits)
595                             {
596                                 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
597                                 gcx.setBits(pool, biti, bits);
598                             }
599                             else
600                             {
601                                 bits = gcx.getBits(pool, biti);
602                             }
603                         }
604                     }
605                     p2 = mallocNoSync(size, bits);
606                     if (psize < size)
607                         size = psize;
608                     //debug(PRINTF) printf("\tcopying %d bytes\n",size);
609                     .memcpy(p2, p, size);
610                     p = p2;
611                 }
612             }
613             else
614             {
615                 psize = gcx.findSize(p);        // find allocated size
616                 if (psize >= PAGESIZE && size >= PAGESIZE)
617                 {
618                     auto psz = psize / PAGESIZE;
619                     auto newsz = (size + PAGESIZE - 1) / PAGESIZE;
620                     if (newsz == psz)
621                         return p;
622
623                     auto pool = gcx.findPool(p);
624                     auto pagenum = (p - pool.baseAddr) / PAGESIZE;
625
626                     if (newsz < psz)
627                     {   // Shrink in place
628                         synchronized (gcLock)
629                         {
630                             debug (MEMSTOMP) .memset(p + size, 0xF2, psize - size);
631                             pool.freePages(pagenum + newsz, psz - newsz);
632                         }
633                         return p;
634                     }
635                     else if (pagenum + newsz <= pool.npages)
636                     {
637                         // Attempt to expand in place
638                         synchronized (gcLock)
639                         {
640                             for (size_t i = pagenum + psz; 1;)
641                             {
642                                 if (i == pagenum + newsz)
643                                 {
644                                     debug (MEMSTOMP) .memset(p + psize, 0xF0, size - psize);
645                                     .memset(&pool.pagetable[pagenum + psz], B_PAGEPLUS, newsz - psz);
646                                     return p;
647                                 }
648                                 if (i == pool.ncommitted)
649                                 {
650                                     auto u = pool.extendPages(pagenum + newsz - pool.ncommitted);
651                                     if (u == OPFAIL)
652                                         break;
653                                     i = pagenum + newsz;
654                                     continue;
655                                 }
656                                 if (pool.pagetable[i] != B_FREE)
657                                     break;
658                                 i++;
659                             }
660                         }
661                     }
662                 }
663                 if (psize < size ||             // if new size is bigger
664                     psize > size * 2)           // or less than half
665                 {
666                     if (psize)
667                     {
668                         Pool *pool = gcx.findPool(p);
669
670                         if (pool)
671                         {
672                             auto biti = cast(size_t)(p - pool.baseAddr) / 16;
673
674                             if (bits)
675                             {
676                                 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
677                                 gcx.setBits(pool, biti, bits);
678                             }
679                             else
680                             {
681                                 bits = gcx.getBits(pool, biti);
682                             }
683                         }
684                     }
685                     p2 = mallocNoSync(size, bits);
686                     if (psize < size)
687                         size = psize;
688                     //debug(PRINTF) printf("\tcopying %d bytes\n",size);
689                     .memcpy(p2, p, size);
690                     p = p2;
691                 }
692             }
693         }
694         return p;
695     }
696
697
698     /**
699      * Attempt to in-place enlarge the memory block pointed to by p by at least
700      * minbytes beyond its current capacity, up to a maximum of maxsize.  This
701      * does not attempt to move the memory block (like realloc() does).
702      *
703      * Returns:
704      *  0 if could not extend p,
705      *  total size of entire memory block if successful.
706      */
707     size_t extend(void* p, size_t minsize, size_t maxsize)
708     {
709         if (!thread_needLock())
710         {
711             return extendNoSync(p, minsize, maxsize);
712         }
713         else synchronized (gcLock)
714         {
715             return extendNoSync(p, minsize, maxsize);
716         }
717     }
718
719
720     //
721     //
722     //
723     private size_t extendNoSync(void* p, size_t minsize, size_t maxsize)
724     in
725     {
726         assert( minsize <= maxsize );
727     }
728     body
729     {
730         //debug(PRINTF) printf("GC::extend(p = %x, minsize = %u, maxsize = %u)\n", p, minsize, maxsize);
731         version (SENTINEL)
732         {
733             return 0;
734         }
735         auto psize = gcx.findSize(p);   // find allocated size
736         if (psize < PAGESIZE)
737             return 0;                   // cannot extend buckets
738
739         auto psz = psize / PAGESIZE;
740         auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE;
741         auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE;
742
743         auto pool = gcx.findPool(p);
744         auto pagenum = (p - pool.baseAddr) / PAGESIZE;
745
746         size_t sz;
747         for (sz = 0; sz < maxsz; sz++)
748         {
749             auto i = pagenum + psz + sz;
750             if (i == pool.ncommitted)
751                 break;
752             if (pool.pagetable[i] != B_FREE)
753             {   if (sz < minsz)
754                     return 0;
755                 break;
756             }
757         }
758         if (sz >= minsz)
759         {
760         }
761         else if (pagenum + psz + sz == pool.ncommitted)
762         {
763             auto u = pool.extendPages(minsz - sz);
764             if (u == OPFAIL)
765                 return 0;
766             sz = minsz;
767         }
768         else
769             return 0;
770         debug (MEMSTOMP) .memset(p + psize, 0xF0, (psz + sz) * PAGESIZE - psize);
771         .memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
772         gcx.p_cache = null;
773         gcx.size_cache = 0;
774         return (psz + sz) * PAGESIZE;
775     }
776
777
778     /**
779      *
780      */
781     size_t reserve(size_t size)
782     {
783         if (!size)
784         {
785             return 0;
786         }
787
788         if (!thread_needLock())
789         {
790             return reserveNoSync(size);
791         }
792         else synchronized (gcLock)
793         {
794             return reserveNoSync(size);
795         }
796     }
797
798
799     //
800     //
801     //
802     private size_t reserveNoSync(size_t size)
803     {
804         assert(size != 0);
805         assert(gcx);
806
807         return gcx.reserve(size);
808     }
809
810
811     /**
812      *
813      */
814     void free(void *p)
815     {
816         if (!p)
817         {
818             return;
819         }
820
821         if (!thread_needLock())
822         {
823             return freeNoSync(p);
824         }
825         else synchronized (gcLock)
826         {
827             return freeNoSync(p);
828         }
829     }
830
831
832     //
833     //
834     //
835     private void freeNoSync(void *p)
836     {
837         assert (p);
838
839         Pool*  pool;
840         size_t pagenum;
841         Bins   bin;
842         size_t biti;
843
844         // Find which page it is in
845         pool = gcx.findPool(p);
846         if (!pool)                              // if not one of ours
847             return;                             // ignore
848         sentinel_Invariant(p);
849         p = sentinel_sub(p);
850         pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
851         biti = cast(size_t)(p - pool.baseAddr) / 16;
852         gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
853
854         bin = cast(Bins)pool.pagetable[pagenum];
855         if (bin == B_PAGE)              // if large alloc
856         {   size_t npages;
857             size_t n;
858
859             // Free pages
860             npages = 1;
861             n = pagenum;
862             while (++n < pool.ncommitted && pool.pagetable[n] == B_PAGEPLUS)
863                 npages++;
864             debug (MEMSTOMP) .memset(p, 0xF2, npages * PAGESIZE);
865             pool.freePages(pagenum, npages);
866         }
867         else
868         {   // Add to free list
869             List *list = cast(List*)p;
870
871             debug (MEMSTOMP) .memset(p, 0xF2, binsize[bin]);
872
873             list.next = gcx.bucket[bin];
874             gcx.bucket[bin] = list;
875         }
876         gcx.log_free(sentinel_add(p));
877     }
878
879
880     /**
881      * Determine the base address of the block containing p.  If p is not a gc
882      * allocated pointer, return null.
883      */
884     void* addrOf(void *p)
885     {
886         if (!p)
887         {
888             return null;
889         }
890
891         if (!thread_needLock())
892         {
893             return addrOfNoSync(p);
894         }
895         else synchronized (gcLock)
896         {
897             return addrOfNoSync(p);
898         }
899     }
900
901
902     //
903     //
904     //
905     void* addrOfNoSync(void *p)
906     {
907         if (!p)
908         {
909             return null;
910         }
911
912         return gcx.findBase(p);
913     }
914
915
916     /**
917      * Determine the allocated size of pointer p.  If p is an interior pointer
918      * or not a gc allocated pointer, return 0.
919      */
920     size_t sizeOf(void *p)
921     {
922         if (!p)
923         {
924             return 0;
925         }
926
927         if (!thread_needLock())
928         {
929             return sizeOfNoSync(p);
930         }
931         else synchronized (gcLock)
932         {
933             return sizeOfNoSync(p);
934         }
935     }
936
937
938     //
939     //
940     //
941     private size_t sizeOfNoSync(void *p)
942     {
943         assert (p);
944
945         version (SENTINEL)
946         {
947             p = sentinel_sub(p);
948             size_t size = gcx.findSize(p);
949
950             // Check for interior pointer
951             // This depends on:
952             // 1) size is a power of 2 for less than PAGESIZE values
953             // 2) base of memory pool is aligned on PAGESIZE boundary
954             if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
955                 size = 0;
956             return size ? size - SENTINEL_EXTRA : 0;
957         }
958         else
959         {
960             if (p == gcx.p_cache)
961                 return gcx.size_cache;
962
963             size_t size = gcx.findSize(p);
964
965             // Check for interior pointer
966             // This depends on:
967             // 1) size is a power of 2 for less than PAGESIZE values
968             // 2) base of memory pool is aligned on PAGESIZE boundary
969             if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
970                 size = 0;
971             else
972             {
973                 gcx.p_cache = p;
974                 gcx.size_cache = size;
975             }
976
977             return size;
978         }
979     }
980
981
982     /**
983      * Determine the base address of the block containing p.  If p is not a gc
984      * allocated pointer, return null.
985      */
986     BlkInfo query(void *p)
987     {
988         if (!p)
989         {
990             BlkInfo i;
991             return  i;
992         }
993
994         if (!thread_needLock())
995         {
996             return queryNoSync(p);
997         }
998         else synchronized (gcLock)
999         {
1000             return queryNoSync(p);
1001         }
1002     }
1003
1004
1005     //
1006     //
1007     //
1008     BlkInfo queryNoSync(void *p)
1009     {
1010         assert(p);
1011
1012         return gcx.getInfo(p);
1013     }
1014
1015
1016     /**
1017      * Verify that pointer p:
1018      *  1) belongs to this memory pool
1019      *  2) points to the start of an allocated piece of memory
1020      *  3) is not on a free list
1021      */
1022     void check(void *p)
1023     {
1024         if (!p)
1025         {
1026             return;
1027         }
1028
1029         if (!thread_needLock())
1030         {
1031             checkNoSync(p);
1032         }
1033         else synchronized (gcLock)
1034         {
1035             checkNoSync(p);
1036         }
1037     }
1038
1039
1040     //
1041     //
1042     //
1043     private void checkNoSync(void *p)
1044     {
1045         assert(p);
1046
1047         sentinel_Invariant(p);
1048         debug (PTRCHECK)
1049         {
1050             Pool*  pool;
1051             size_t pagenum;
1052             Bins   bin;
1053             size_t size;
1054
1055             p = sentinel_sub(p);
1056             pool = gcx.findPool(p);
1057             assert(pool);
1058             pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1059             bin = cast(Bins)pool.pagetable[pagenum];
1060             assert(bin <= B_PAGE);
1061             size = binsize[bin];
1062             assert((cast(size_t)p & (size - 1)) == 0);
1063
1064             debug (PTRCHECK2)
1065             {
1066                 if (bin < B_PAGE)
1067                 {
1068                     // Check that p is not on a free list
1069                     List *list;
1070
1071                     for (list = gcx.bucket[bin]; list; list = list.next)
1072                     {
1073                         assert(cast(void*)list != p);
1074                     }
1075                 }
1076             }
1077         }
1078     }
1079
1080
1081     //
1082     //
1083     //
1084     private void setStackBottom(void *p)
1085     {
1086         version (STACKGROWSDOWN)
1087         {
1088             //p = (void *)((uint *)p + 4);
1089             if (p > gcx.stackBottom)
1090             {
1091                 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1092                 gcx.stackBottom = p;
1093             }
1094         }
1095         else
1096         {
1097             //p = (void *)((uint *)p - 4);
1098             if (p < gcx.stackBottom)
1099             {
1100                 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1101                 gcx.stackBottom = cast(char*)p;
1102             }
1103         }
1104     }
1105
1106
1107     /**
1108      * add p to list of roots
1109      */
1110     void addRoot(void *p)
1111     {
1112         if (!p)
1113         {
1114             return;
1115         }
1116
1117         if (!thread_needLock())
1118         {
1119             gcx.addRoot(p);
1120         }
1121         else synchronized (gcLock)
1122         {
1123             gcx.addRoot(p);
1124         }
1125     }
1126
1127
1128     /**
1129      * remove p from list of roots
1130      */
1131     void removeRoot(void *p)
1132     {
1133         if (!p)
1134         {
1135             return;
1136         }
1137
1138         if (!thread_needLock())
1139         {
1140             gcx.removeRoot(p);
1141         }
1142         else synchronized (gcLock)
1143         {
1144             gcx.removeRoot(p);
1145         }
1146     }
1147
1148
1149     /**
1150      * add range to scan for roots
1151      */
1152     void addRange(void *p, size_t sz)
1153     {
1154         if (!p || !sz)
1155         {
1156             return;
1157         }
1158
1159         //debug(PRINTF) printf("+GC.addRange(pbot = x%x, ptop = x%x)\n", pbot, ptop);
1160         if (!thread_needLock())
1161         {
1162             gcx.addRange(p, p + sz);
1163         }
1164         else synchronized (gcLock)
1165         {
1166             gcx.addRange(p, p + sz);
1167         }
1168         //debug(PRINTF) printf("-GC.addRange()\n");
1169     }
1170
1171
1172     /**
1173      * remove range
1174      */
1175     void removeRange(void *p)
1176     {
1177         if (!p)
1178         {
1179             return;
1180         }
1181
1182         if (!thread_needLock())
1183         {
1184             gcx.removeRange(p);
1185         }
1186         else synchronized (gcLock)
1187         {
1188             gcx.removeRange(p);
1189         }
1190     }
1191
1192
1193     /**
1194      * do full garbage collection
1195      */
1196     void fullCollect()
1197     {
1198         debug(PRINTF) printf("GC.fullCollect()\n");
1199
1200         if (!thread_needLock())
1201         {
1202             gcx.fullcollectshell();
1203         }
1204         else synchronized (gcLock)
1205         {
1206             gcx.fullcollectshell();
1207         }
1208
1209         version (none)
1210         {
1211             GCStats stats;
1212
1213             getStats(stats);
1214             debug(PRINTF) printf("poolsize = %x, usedsize = %x, freelistsize = %x\n",
1215                     stats.poolsize, stats.usedsize, stats.freelistsize);
1216         }
1217
1218         gcx.log_collect();
1219     }
1220
1221
1222     /**
1223      * do full garbage collection ignoring roots
1224      */
1225     void fullCollectNoStack()
1226     {
1227         if (!thread_needLock())
1228         {
1229             gcx.noStack++;
1230             gcx.fullcollectshell();
1231             gcx.noStack--;
1232         }
1233         else synchronized (gcLock)
1234         {
1235             gcx.noStack++;
1236             gcx.fullcollectshell();
1237             gcx.noStack--;
1238         }
1239     }
1240
1241
1242     /**
1243      * minimize free space usage
1244      */
1245     void minimize()
1246     {
1247         if (!thread_needLock())
1248         {
1249             gcx.minimize();
1250         }
1251         else synchronized (gcLock)
1252         {
1253             gcx.minimize();
1254         }
1255     }
1256
1257
1258     /**
1259      * Retrieve statistics about garbage collection.
1260      * Useful for debugging and tuning.
1261      */
1262     void getStats(out GCStats stats)
1263     {
1264         if (!thread_needLock())
1265         {
1266             getStatsNoSync(stats);
1267         }
1268         else synchronized (gcLock)
1269         {
1270             getStatsNoSync(stats);
1271         }
1272     }
1273
1274
1275     //
1276     //
1277     //
1278     private void getStatsNoSync(out GCStats stats)
1279     {
1280         size_t psize = 0;
1281         size_t usize = 0;
1282         size_t flsize = 0;
1283
1284         size_t n;
1285         size_t bsize = 0;
1286
1287         //debug(PRINTF) printf("getStats()\n");
1288         .memset(&stats, 0, GCStats.sizeof);
1289
1290         for (n = 0; n < gcx.npools; n++)
1291         {   Pool *pool = gcx.pooltable[n];
1292
1293             psize += pool.ncommitted * PAGESIZE;
1294             for (size_t j = 0; j < pool.ncommitted; j++)
1295             {
1296                 Bins bin = cast(Bins)pool.pagetable[j];
1297                 if (bin == B_FREE)
1298                     stats.freeblocks++;
1299                 else if (bin == B_PAGE)
1300                     stats.pageblocks++;
1301                 else if (bin < B_PAGE)
1302                     bsize += PAGESIZE;
1303             }
1304         }
1305
1306         for (n = 0; n < B_PAGE; n++)
1307         {
1308             //debug(PRINTF) printf("bin %d\n", n);
1309             for (List *list = gcx.bucket[n]; list; list = list.next)
1310             {
1311                 //debug(PRINTF) printf("\tlist %x\n", list);
1312                 flsize += binsize[n];
1313             }
1314         }
1315
1316         usize = bsize - flsize;
1317
1318         stats.poolsize = psize;
1319         stats.usedsize = bsize - flsize;
1320         stats.freelistsize = flsize;
1321     }
1322 }
1323
1324
1325 /* ============================ Gcx =============================== */
1326
1327 enum
1328 {   PAGESIZE =    4096,
1329     COMMITSIZE = (4096*16),
1330     POOLSIZE =   (4096*256),
1331 }
1332
1333
1334 enum
1335 {
1336     B_16,
1337     B_32,
1338     B_64,
1339     B_128,
1340     B_256,
1341     B_512,
1342     B_1024,
1343     B_2048,
1344     B_PAGE,             // start of large alloc
1345     B_PAGEPLUS,         // continuation of large alloc
1346     B_FREE,             // free page
1347     B_UNCOMMITTED,      // memory not committed for this page
1348     B_MAX
1349 }
1350
1351
1352 alias ubyte Bins;
1353
1354
1355 struct List
1356 {
1357     List *next;
1358 }
1359
1360
1361 struct Range
1362 {
1363     void *pbot;
1364     void *ptop;
1365 }
1366
1367
1368 const uint binsize[B_MAX] = [ 16,32,64,128,256,512,1024,2048,4096 ];
1369 const uint notbinsize[B_MAX] = [ ~(16u-1),~(32u-1),~(64u-1),~(128u-1),~(256u-1),
1370                                 ~(512u-1),~(1024u-1),~(2048u-1),~(4096u-1) ];
1371
1372 /* ============================ Gcx =============================== */
1373
1374
1375 struct Gcx
1376 {
1377
1378     void *p_cache;
1379     size_t size_cache;
1380
1381     size_t nroots;
1382     size_t rootdim;
1383     void **roots;
1384
1385     size_t nranges;
1386     size_t rangedim;
1387     Range *ranges;
1388
1389     uint noStack;       // !=0 means don't scan stack
1390     uint log;           // turn on logging
1391     uint anychanges;
1392     void *stackBottom;
1393     uint inited;
1394     int disabled;       // turn off collections if >0
1395
1396     byte *minAddr;      // min(baseAddr)
1397     byte *maxAddr;      // max(topAddr)
1398
1399     size_t npools;
1400     Pool **pooltable;
1401
1402     List *bucket[B_MAX];        // free list for each size
1403
1404
1405     void initialize()
1406     {   int dummy;
1407
1408         (cast(byte*)this)[0 .. Gcx.sizeof] = 0;
1409         stackBottom = cast(char*)&dummy;
1410         log_init();
1411         //printf("gcx = %p, self = %x\n", this, self);
1412         inited = 1;
1413     }
1414
1415
1416     void Dtor()
1417     {
1418         inited = 0;
1419
1420         for (size_t i = 0; i < npools; i++)
1421         {   Pool *pool = pooltable[i];
1422
1423             pool.Dtor();
1424             .free(pool);
1425         }
1426         if (pooltable)
1427             .free(pooltable);
1428
1429         if (roots)
1430             .free(roots);
1431
1432         if (ranges)
1433             .free(ranges);
1434     }
1435
1436
1437     void Invariant() { }
1438
1439
1440     invariant
1441     {
1442         if (inited)
1443         {
1444         //printf("Gcx.invariant(): this = %p\n", this);
1445             size_t i;
1446
1447             for (i = 0; i < npools; i++)
1448             {   Pool *pool = pooltable[i];
1449
1450                 pool.Invariant();
1451                 if (i == 0)
1452                 {
1453                     assert(minAddr == pool.baseAddr);
1454                 }
1455                 if (i + 1 < npools)
1456                 {
1457                     assert(pool.opCmp(pooltable[i + 1]) < 0);
1458                 }
1459                 else if (i + 1 == npools)
1460                 {
1461                     assert(maxAddr == pool.topAddr);
1462                 }
1463             }
1464
1465             if (roots)
1466             {
1467                 assert(rootdim != 0);
1468                 assert(nroots <= rootdim);
1469             }
1470
1471             if (ranges)
1472             {
1473                 assert(rangedim != 0);
1474                 assert(nranges <= rangedim);
1475
1476                 for (i = 0; i < nranges; i++)
1477                 {
1478                     assert(ranges[i].pbot);
1479                     assert(ranges[i].ptop);
1480                     assert(ranges[i].pbot <= ranges[i].ptop);
1481                 }
1482             }
1483
1484             for (i = 0; i < B_PAGE; i++)
1485             {
1486                 for (List *list = bucket[i]; list; list = list.next)
1487                 {
1488                 }
1489             }
1490         }
1491     }
1492
1493
1494     /**
1495      *
1496      */
1497     void addRoot(void *p)
1498     {
1499         if (nroots == rootdim)
1500         {
1501             size_t newdim = rootdim * 2 + 16;
1502             void** newroots;
1503
1504             newroots = cast(void**) .malloc(newdim * newroots[0].sizeof);
1505             if (!newroots)
1506                 onOutOfMemoryError();
1507             if (roots)
1508             {   .memcpy(newroots, roots, nroots * newroots[0].sizeof);
1509                 .free(roots);
1510             }
1511             roots = newroots;
1512             rootdim = newdim;
1513         }
1514         roots[nroots] = p;
1515         nroots++;
1516     }
1517
1518
1519     /**
1520      *
1521      */
1522     void removeRoot(void *p)
1523     {
1524         for (size_t i = nroots; i--;)
1525         {
1526             if (roots[i] == p)
1527             {
1528                 nroots--;
1529                 .memmove(roots + i, roots + i + 1, (nroots - i) * roots[0].sizeof);
1530                 return;
1531             }
1532         }
1533         assert(0);
1534     }
1535
1536
1537     /**
1538      *
1539      */
1540     void addRange(void *pbot, void *ptop)
1541     {
1542         debug (PRINTF) printf("%x.Gcx::addRange(%x, %x), nranges = %d\n", this,
1543                 pbot, ptop, nranges);
1544         if (nranges == rangedim)
1545         {
1546             size_t newdim = rangedim * 2 + 16;
1547             Range *newranges;
1548
1549             newranges = cast(Range*) .malloc(newdim * newranges[0].sizeof);
1550             if (!newranges)
1551                 onOutOfMemoryError();
1552             if (ranges)
1553             {   .memcpy(newranges, ranges, nranges * newranges[0].sizeof);
1554                 .free(ranges);
1555             }
1556             ranges = newranges;
1557             rangedim = newdim;
1558         }
1559         ranges[nranges].pbot = pbot;
1560         ranges[nranges].ptop = ptop;
1561         nranges++;
1562     }
1563
1564
1565     /**
1566      *
1567      */
1568     void removeRange(void *pbot)
1569     {
1570         debug (PRINTF) printf("%x.Gcx.removeRange(%x), nranges = %d\n", this,
1571                 pbot, nranges);
1572         for (size_t i = nranges; i--;)
1573         {
1574             if (ranges[i].pbot == pbot)
1575             {
1576                 nranges--;
1577                 .memmove(ranges + i, ranges + i + 1, (nranges - i) * ranges[0].sizeof);
1578                 return;
1579             }
1580         }
1581         debug(PRINTF) printf("Wrong thread\n");
1582
1583         // This is a fatal error, but ignore it.
1584         // The problem is that we can get a Close() call on a thread
1585         // other than the one the range was allocated on.
1586         //assert(zero);
1587     }
1588
1589
1590     /**
1591      * Find Pool that pointer is in.
1592      * Return null if not in a Pool.
1593      * Assume pooltable[] is sorted.
1594      */
1595     Pool *findPool(void *p)
1596     {
1597         if (p >= minAddr && p < maxAddr)
1598         {
1599             if (npools == 1)
1600             {
1601                 return pooltable[0];
1602             }
1603
1604             for (size_t i = 0; i < npools; i++)
1605             {   Pool *pool;
1606
1607                 pool = pooltable[i];
1608                 if (p < pool.topAddr)
1609                 {   if (pool.baseAddr <= p)
1610                         return pool;
1611                     break;
1612                 }
1613             }
1614         }
1615         return null;
1616     }
1617
1618
1619     /**
1620      * Find base address of block containing pointer p.
1621      * Returns null if not a gc'd pointer
1622      */
1623     void* findBase(void *p)
1624     {
1625         Pool *pool;
1626
1627         pool = findPool(p);
1628         if (pool)
1629         {
1630             size_t offset = cast(size_t)(p - pool.baseAddr);
1631             size_t pn = offset / PAGESIZE;
1632             Bins   bin = cast(Bins)pool.pagetable[pn];
1633
1634             // Adjust bit to be at start of allocated memory block
1635             if (bin <= B_PAGE)
1636             {
1637                 return pool.baseAddr + (offset & notbinsize[bin]);
1638             }
1639             else if (bin == B_PAGEPLUS)
1640             {
1641                 do
1642                 {   --pn, offset -= PAGESIZE;
1643                 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1644
1645                 return pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1646             }
1647             else
1648             {
1649                 // we are in a B_FREE or B_UNCOMMITTED page
1650                 return null;
1651             }
1652         }
1653         return null;
1654     }
1655
1656
1657     /**
1658      * Find size of pointer p.
1659      * Returns 0 if not a gc'd pointer
1660      */
1661     size_t findSize(void *p)
1662     {
1663         Pool*  pool;
1664         size_t size = 0;
1665
1666         pool = findPool(p);
1667         if (pool)
1668         {
1669             size_t pagenum;
1670             Bins   bin;
1671
1672             pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1673             bin = cast(Bins)pool.pagetable[pagenum];
1674             size = binsize[bin];
1675             if (bin == B_PAGE)
1676             {   size_t npages = pool.ncommitted;
1677                 ubyte* pt;
1678                 size_t i;
1679
1680                 pt = &pool.pagetable[0];
1681                 for (i = pagenum + 1; i < npages; i++)
1682                 {
1683                     if (pt[i] != B_PAGEPLUS)
1684                         break;
1685                 }
1686                 size = (i - pagenum) * PAGESIZE;
1687             }
1688         }
1689         return size;
1690     }
1691
1692
1693     /**
1694      *
1695      */
1696     BlkInfo getInfo(void* p)
1697     {
1698         Pool*   pool;
1699         BlkInfo info;
1700
1701         pool = findPool(p);
1702         if (pool)
1703         {
1704             size_t offset = cast(size_t)(p - pool.baseAddr);
1705             size_t pn = offset / PAGESIZE;
1706             Bins   bin = cast(Bins)pool.pagetable[pn];
1707
1708             ////////////////////////////////////////////////////////////////////
1709             // findAddr
1710             ////////////////////////////////////////////////////////////////////
1711
1712             if (bin <= B_PAGE)
1713             {
1714                 info.base = pool.baseAddr + (offset & notbinsize[bin]);
1715             }
1716             else if (bin == B_PAGEPLUS)
1717             {
1718                 do
1719                 {   --pn, offset -= PAGESIZE;
1720                 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1721
1722                 info.base = pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1723
1724                 // fix bin for use by size calc below
1725                 bin = cast(Bins)pool.pagetable[pn];
1726             }
1727
1728             ////////////////////////////////////////////////////////////////////
1729             // findSize
1730             ////////////////////////////////////////////////////////////////////
1731
1732             info.size = binsize[bin];
1733             if (bin == B_PAGE)
1734             {   size_t npages = pool.ncommitted;
1735                 ubyte* pt;
1736                 size_t i;
1737
1738                 pt = &pool.pagetable[0];
1739                 for (i = pn + 1; i < npages; i++)
1740                 {
1741                     if (pt[i] != B_PAGEPLUS)
1742                         break;
1743                 }
1744                 info.size = (i - pn) * PAGESIZE;
1745             }
1746
1747             ////////////////////////////////////////////////////////////////////
1748             // getBits
1749             ////////////////////////////////////////////////////////////////////
1750
1751             info.attr = getBits(pool, cast(size_t)(offset / 16));
1752         }
1753         return info;
1754     }
1755
1756
1757     /**
1758      * Compute bin for size.
1759      */
1760     static Bins findBin(size_t size)
1761     {   Bins bin;
1762
1763         if (size <= 256)
1764         {
1765             if (size <= 64)
1766             {
1767                 if (size <= 16)
1768                     bin = B_16;
1769                 else if (size <= 32)
1770                     bin = B_32;
1771                 else
1772                     bin = B_64;
1773             }
1774             else
1775             {
1776                 if (size <= 128)
1777                     bin = B_128;
1778                 else
1779                     bin = B_256;
1780             }
1781         }
1782         else
1783         {
1784             if (size <= 1024)
1785             {
1786                 if (size <= 512)
1787                     bin = B_512;
1788                 else
1789                     bin = B_1024;
1790             }
1791             else
1792             {
1793                 if (size <= 2048)
1794                     bin = B_2048;
1795                 else
1796                     bin = B_PAGE;
1797             }
1798         }
1799         return bin;
1800     }
1801
1802
1803     /**
1804      * Allocate a new pool of at least size bytes.
1805      * Sort it into pooltable[].
1806      * Mark all memory in the pool as B_FREE.
1807      * Return the actual number of bytes reserved or 0 on error.
1808      */
1809     size_t reserve(size_t size)
1810     {
1811         size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
1812         Pool*  pool = newPool(npages);
1813
1814         if (!pool || pool.extendPages(npages) == OPFAIL)
1815             return 0;
1816         return pool.ncommitted * PAGESIZE;
1817     }
1818
1819
1820     /**
1821      * Minimizes physical memory usage by returning free pools to the OS.
1822      */
1823     void minimize()
1824     {
1825         size_t n;
1826         size_t pn;
1827         Pool*  pool;
1828         size_t ncommitted;
1829
1830         for (n = 0; n < npools; n++)
1831         {
1832             pool = pooltable[n];
1833             ncommitted = pool.ncommitted;
1834             for (pn = 0; pn < ncommitted; pn++)
1835             {
1836                 if (cast(Bins)pool.pagetable[pn] != B_FREE)
1837                     break;
1838             }
1839             if (pn < ncommitted)
1840             {
1841                 n++;
1842                 continue;
1843             }
1844             pool.Dtor();
1845             .free(pool);
1846             .memmove(pooltable + n,
1847                             pooltable + n + 1,
1848                             (--npools - n) * (Pool*).sizeof);
1849             minAddr = pooltable[0].baseAddr;
1850             maxAddr = pooltable[npools - 1].topAddr;
1851         }
1852     }
1853
1854
1855     /**
1856      * Allocate a chunk of memory that is larger than a page.
1857      * Return null if out of memory.
1858      */
1859     void *bigAlloc(size_t size)
1860     {
1861         Pool*  pool;
1862         size_t npages;
1863         size_t n;
1864         size_t pn;
1865         size_t freedpages;
1866         void*  p;
1867         int    state;
1868
1869         npages = (size + PAGESIZE - 1) / PAGESIZE;
1870
1871         for (state = 0; ; )
1872         {
1873             // This code could use some refinement when repeatedly
1874             // allocating very large arrays.
1875
1876             for (n = 0; n < npools; n++)
1877             {
1878                 pool = pooltable[n];
1879                 pn = pool.allocPages(npages);
1880                 if (pn != OPFAIL)
1881                     goto L1;
1882             }
1883
1884             // Failed
1885             switch (state)
1886             {
1887             case 0:
1888                 if (disabled)
1889                 {   state = 1;
1890                     continue;
1891                 }
1892                 // Try collecting
1893                 freedpages = fullcollectshell();
1894                 if (freedpages >= npools * ((POOLSIZE / PAGESIZE) / 4))
1895                 {   state = 1;
1896                     continue;
1897                 }
1898                 // Release empty pools to prevent bloat
1899                 minimize();
1900                 // Allocate new pool
1901                 pool = newPool(npages);
1902                 if (!pool)
1903                 {   state = 2;
1904                     continue;
1905                 }
1906                 pn = pool.allocPages(npages);
1907                 assert(pn != OPFAIL);
1908                 goto L1;
1909             case 1:
1910                 // Release empty pools to prevent bloat
1911                 minimize();
1912                 // Allocate new pool
1913                 pool = newPool(npages);
1914                 if (!pool)
1915                     goto Lnomemory;
1916                 pn = pool.allocPages(npages);
1917                 assert(pn != OPFAIL);
1918                 goto L1;
1919             case 2:
1920                 goto Lnomemory;
1921             default:
1922                 assert(false);
1923             }
1924         }
1925
1926       L1:
1927         pool.pagetable[pn] = B_PAGE;
1928         if (npages > 1)
1929             .memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
1930         p = pool.baseAddr + pn * PAGESIZE;
1931         .memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
1932         debug (MEMSTOMP) .memset(p, 0xF1, size);
1933         //debug(PRINTF) printf("\tp = %x\n", p);
1934         return p;
1935
1936       Lnomemory:
1937         return null; // let mallocNoSync handle the error
1938     }
1939
1940
1941     /**
1942      * Allocate a new pool with at least npages in it.
1943      * Sort it into pooltable[].
1944      * Return null if failed.
1945      */
1946     Pool *newPool(size_t npages)
1947     {
1948         Pool*  pool;
1949         Pool** newpooltable;
1950         size_t newnpools;
1951         size_t i;
1952
1953         //debug(PRINTF) printf("************Gcx::newPool(npages = %d)****************\n", npages);
1954
1955         // Round up to COMMITSIZE pages
1956         npages = (npages + (COMMITSIZE/PAGESIZE) - 1) & ~(COMMITSIZE/PAGESIZE - 1);
1957
1958         // Minimum of POOLSIZE
1959         if (npages < POOLSIZE/PAGESIZE)
1960             npages = POOLSIZE/PAGESIZE;
1961         else if (npages > POOLSIZE/PAGESIZE)
1962         {   // Give us 150% of requested size, so there's room to extend
1963             auto n = npages + (npages >> 1);
1964             if (n < size_t.max/PAGESIZE)
1965                 npages = n;
1966         }
1967
1968         // Allocate successively larger pools up to 8 megs
1969         if (npools)
1970         {   size_t n;
1971
1972             n = npools;
1973             if (n > 8)
1974                 n = 8;                  // cap pool size at 8 megs
1975             n *= (POOLSIZE / PAGESIZE);
1976             if (npages < n)
1977                 npages = n;
1978         }
1979
1980         pool = cast(Pool *) .calloc(1, Pool.sizeof);
1981         if (pool)
1982         {
1983             pool.initialize(npages);
1984             if (!pool.baseAddr)
1985                 goto Lerr;
1986
1987             newnpools = npools + 1;
1988             newpooltable = cast(Pool **) .realloc(pooltable, newnpools * (Pool *).sizeof);
1989             if (!newpooltable)
1990                 goto Lerr;
1991
1992             // Sort pool into newpooltable[]
1993             for (i = 0; i < npools; i++)
1994             {
1995                 if (pool.opCmp(newpooltable[i]) < 0)
1996                      break;
1997             }
1998             .memmove(newpooltable + i + 1, newpooltable + i, (npools - i) * (Pool *).sizeof);
1999             newpooltable[i] = pool;
2000
2001             pooltable = newpooltable;
2002             npools = newnpools;
2003
2004             minAddr = pooltable[0].baseAddr;
2005             maxAddr = pooltable[npools - 1].topAddr;
2006         }
2007         return pool;
2008
2009       Lerr:
2010         pool.Dtor();
2011         .free(pool);
2012         return null;
2013     }
2014
2015
2016     /**
2017      * Allocate a page of bin's.
2018      * Returns:
2019      *  0       failed
2020      */
2021     int allocPage(Bins bin)
2022     {
2023         Pool*  pool;
2024         size_t n;
2025         size_t pn;
2026         byte*  p;
2027         byte*  ptop;
2028
2029         //debug(PRINTF) printf("Gcx::allocPage(bin = %d)\n", bin);
2030         for (n = 0; n < npools; n++)
2031         {
2032             pool = pooltable[n];
2033             pn = pool.allocPages(1);
2034             if (pn != OPFAIL)
2035                 goto L1;
2036         }
2037         return 0;               // failed
2038
2039       L1:
2040         pool.pagetable[pn] = cast(ubyte)bin;
2041
2042         // Convert page to free list
2043         size_t size = binsize[bin];
2044         List **b = &bucket[bin];
2045
2046         p = pool.baseAddr + pn * PAGESIZE;
2047         ptop = p + PAGESIZE;
2048         for (; p < ptop; p += size)
2049         {
2050             (cast(List *)p).next = *b;
2051             *b = cast(List *)p;
2052         }
2053         return 1;
2054     }
2055
2056
2057     /**
2058      * Search a range of memory values and mark any pointers into the GC pool.
2059      */
2060     void mark(void *pbot, void *ptop)
2061     {
2062         void **p1 = cast(void **)pbot;
2063         void **p2 = cast(void **)ptop;
2064         size_t pcache = 0;
2065         uint changes = 0;
2066
2067         //printf("marking range: %p -> %p\n", pbot, ptop);
2068         for (; p1 < p2; p1++)
2069         {
2070             Pool *pool;
2071             byte *p = cast(byte *)(*p1);
2072
2073             //if (log) debug(PRINTF) printf("\tmark %x\n", p);
2074             if (p >= minAddr && p < maxAddr)
2075             {
2076                 if ((cast(size_t)p & ~(PAGESIZE-1)) == pcache)
2077                     continue;
2078
2079                 pool = findPool(p);
2080                 if (pool)
2081                 {
2082                     size_t offset = cast(size_t)(p - pool.baseAddr);
2083                     size_t biti;
2084                     size_t pn = offset / PAGESIZE;
2085                     Bins   bin = cast(Bins)pool.pagetable[pn];
2086
2087                     //debug(PRINTF) printf("\t\tfound pool %x, base=%x, pn = %d, bin = %d, biti = x%x\n", pool, pool.baseAddr, pn, bin, biti);
2088
2089                     // Adjust bit to be at start of allocated memory block
2090                     if (bin <= B_PAGE)
2091                     {
2092                         biti = (offset & notbinsize[bin]) >> 4;
2093                         //debug(PRINTF) printf("\t\tbiti = x%x\n", biti);
2094                     }
2095                     else if (bin == B_PAGEPLUS)
2096                     {
2097                         do
2098                         {   --pn;
2099                         } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
2100                         biti = pn * (PAGESIZE / 16);
2101                     }
2102                     else
2103                     {
2104                         // Don't mark bits in B_FREE or B_UNCOMMITTED pages
2105                         continue;
2106                     }
2107
2108                     if (bin >= B_PAGE) // Cache B_PAGE and B_PAGEPLUS lookups
2109                         pcache = cast(size_t)p & ~(PAGESIZE-1);
2110
2111                     //debug(PRINTF) printf("\t\tmark(x%x) = %d\n", biti, pool.mark.test(biti));
2112                     if (!pool.mark.test(biti))
2113                     {
2114                         //if (log) debug(PRINTF) printf("\t\tmarking %x\n", p);
2115                         pool.mark.set(biti);
2116                         if (!pool.noscan.test(biti))
2117                         {
2118                             pool.scan.set(biti);
2119                             changes = 1;
2120                         }
2121                         log_parent(sentinel_add(pool.baseAddr + biti * 16), sentinel_add(pbot));
2122                     }
2123                 }
2124             }
2125         }
2126         anychanges |= changes;
2127     }
2128
2129
2130     /**
2131      * Return number of full pages free'd.
2132      */
2133     size_t fullcollectshell()
2134     {
2135         // The purpose of the 'shell' is to ensure all the registers
2136         // get put on the stack so they'll be scanned
2137         void *sp;
2138         size_t result;
2139         version (GNU)
2140         {
2141             __builtin_unwind_init();
2142             sp = & sp;
2143         }
2144         else version(LDC)
2145         {
2146             version(X86)
2147             {
2148                 uint eax,ecx,edx,ebx,ebp,esi,edi;
2149                 asm
2150                 {
2151                     mov eax[EBP], EAX      ;
2152                     mov ecx[EBP], ECX      ;
2153                     mov edx[EBP], EDX      ;
2154                     mov ebx[EBP], EBX      ;
2155                     mov ebp[EBP], EBP      ;
2156                     mov esi[EBP], ESI      ;
2157                     mov edi[EBP], EDI      ;
2158                     mov sp[EBP],ESP     ;
2159                 }
2160             }
2161             else version (X86_64)
2162             {
2163                 ulong rax,rbx,rcx,rdx,rbp,rsi,rdi,rsp,r8,r9,r10,r11,r12,r13,r14,r15;
2164                 asm
2165                 {
2166                     movq rax[RBP], RAX      ;
2167                     movq rbx[RBP], RBX      ;
2168                     movq rcx[RBP], RCX      ;
2169                     movq rdx[RBP], RDX      ;
2170                     movq rbp[RBP], RBP      ;
2171                     movq rsi[RBP], RSI      ;
2172                     movq rdi[RBP], RDI      ;
2173                     movq rsp[RBP], RSP      ;
2174                     movq  r8[RBP], R8       ;
2175                     movq  r9[RBP], R9       ;
2176                     movq r10[RBP], R10      ;
2177                     movq r11[RBP], R11      ;
2178                     movq r12[RBP], R12      ;
2179                     movq r13[RBP], R13      ;
2180                     movq r14[RBP], R14      ;
2181                     movq r15[RBP], R15      ;
2182                 }
2183             }
2184             else
2185             {
2186                 static assert( false, "Architecture not supported." );
2187             }
2188         }
2189         else
2190         {
2191         asm
2192         {
2193             pushad              ;
2194             mov sp[EBP],ESP     ;
2195         }
2196         }
2197         result = fullcollect(sp);
2198         version (GNU)
2199         {
2200             // nothing to do
2201         }
2202         else version(LDC)
2203         {
2204             // nothing to do
2205         }
2206         else
2207         {
2208         asm
2209         {
2210             popad               ;
2211         }
2212         }
2213         return result;
2214     }
2215
2216
2217     /**
2218      *
2219      */
2220     size_t fullcollect(void *stackTop)
2221     {
2222         size_t n;
2223         Pool*  pool;
2224
2225         debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n");
2226
2227         thread_suspendAll();
2228
2229         p_cache = null;
2230         size_cache = 0;
2231
2232         anychanges = 0;
2233         for (n = 0; n < npools; n++)
2234         {
2235             pool = pooltable[n];
2236             pool.mark.zero();
2237             pool.scan.zero();
2238             pool.freebits.zero();
2239         }
2240
2241         // Mark each free entry, so it doesn't get scanned
2242         for (n = 0; n < B_PAGE; n++)
2243         {
2244             for (List *list = bucket[n]; list; list = list.next)
2245             {
2246                 pool = findPool(list);
2247                 assert(pool);
2248                 pool.freebits.set(cast(size_t)(cast(byte*)list - pool.baseAddr) / 16);
2249             }
2250         }
2251
2252         for (n = 0; n < npools; n++)
2253         {
2254             pool = pooltable[n];
2255             pool.mark.copy(&pool.freebits);
2256         }
2257
2258         rt_scanStaticData( &mark );
2259
2260         version (MULTI_THREADED)
2261         {
2262             if (!noStack)
2263             {
2264                 // Scan stacks and registers for each paused thread
2265                 thread_scanAll( &mark, stackTop );
2266             }
2267         }
2268         else
2269         {
2270             if (!noStack)
2271             {
2272                 // Scan stack for main thread
2273                 debug(PRINTF) printf(" scan stack bot = %x, top = %x\n", stackTop, stackBottom);
2274                 version (STACKGROWSDOWN)
2275                     mark(stackTop, stackBottom);
2276                 else
2277                     mark(stackBottom, stackTop);
2278             }
2279         }
2280
2281         // Scan roots[]
2282         debug(COLLECT_PRINTF) printf("scan roots[]\n");
2283         mark(roots, roots + nroots);
2284
2285         // Scan ranges[]
2286         debug(COLLECT_PRINTF) printf("scan ranges[]\n");
2287         //log++;
2288         for (n = 0; n < nranges; n++)
2289         {
2290             debug(COLLECT_PRINTF) printf("\t%x .. %x\n", ranges[n].pbot, ranges[n].ptop);
2291             mark(ranges[n].pbot, ranges[n].ptop);
2292         }
2293         //log--;
2294
2295         debug(COLLECT_PRINTF) printf("\tscan heap\n");
2296         while (anychanges)
2297         {
2298             anychanges = 0;
2299             for (n = 0; n < npools; n++)
2300             {
2301                 uint *bbase;
2302                 uint *b;
2303                 uint *btop;
2304
2305                 pool = pooltable[n];
2306
2307                 bbase = pool.scan.base();
2308                 btop = bbase + pool.scan.nwords;
2309                 for (b = bbase; b < btop;)
2310                 {   Bins   bin;
2311                     size_t pn;
2312                     size_t u;
2313                     size_t bitm;
2314                     byte*  o;
2315
2316                     bitm = *b;
2317                     if (!bitm)
2318                     {   b++;
2319                         continue;
2320                     }
2321                     *b = 0;
2322
2323                     o = pool.baseAddr + (b - bbase) * 32 * 16;
2324                     if (!(bitm & 0xFFFF))
2325                     {
2326                         bitm >>= 16;
2327                         o += 16 * 16;
2328                     }
2329                     for (; bitm; o += 16, bitm >>= 1)
2330                     {
2331                         if (!(bitm & 1))
2332                             continue;
2333
2334                         pn = cast(size_t)(o - pool.baseAddr) / PAGESIZE;
2335                         bin = cast(Bins)pool.pagetable[pn];
2336                         if (bin < B_PAGE)
2337                         {
2338                             mark(o, o + binsize[bin]);
2339                         }
2340                         else if (bin == B_PAGE || bin == B_PAGEPLUS)
2341                         {
2342                             if (bin == B_PAGEPLUS)
2343                             {
2344                                 while (pool.pagetable[pn - 1] != B_PAGE)
2345                                     pn--;
2346                             }
2347                             u = 1;
2348                             while (pn + u < pool.ncommitted && pool.pagetable[pn + u] == B_PAGEPLUS)
2349                                 u++;
2350                             mark(o, o + u * PAGESIZE);
2351                         }
2352                     }
2353                 }
2354             }
2355         }
2356
2357         thread_resumeAll();
2358
2359         // Free up everything not marked
2360         debug(COLLECT_PRINTF) printf("\tfree'ing\n");
2361         size_t freedpages = 0;
2362         size_t freed = 0;
2363         for (n = 0; n < npools; n++)
2364         {   size_t pn;
2365             size_t ncommitted;
2366             uint*  bbase;
2367
2368             pool = pooltable[n];
2369             bbase = pool.mark.base();
2370             ncommitted = pool.ncommitted;
2371             for (pn = 0; pn < ncommitted; pn++, bbase += PAGESIZE / (32 * 16))
2372             {
2373                 Bins bin = cast(Bins)pool.pagetable[pn];
2374
2375                 if (bin < B_PAGE)
2376                 {   byte* p;
2377                     byte* ptop;
2378                     size_t biti;
2379                     size_t bitstride;
2380                     auto   size = binsize[bin];
2381
2382                     p = pool.baseAddr + pn * PAGESIZE;
2383                     ptop = p + PAGESIZE;
2384                     biti = pn * (PAGESIZE/16);
2385                     bitstride = size / 16;
2386
2387     version(none) // BUG: doesn't work because freebits() must also be cleared
2388     {
2389                     // If free'd entire page
2390                     if (bbase[0] == 0 && bbase[1] == 0 && bbase[2] == 0 && bbase[3] == 0 &&
2391                         bbase[4] == 0 && bbase[5] == 0 && bbase[6] == 0 && bbase[7] == 0)
2392                     {
2393                         for (; p < ptop; p += size, biti += bitstride)
2394                         {
2395                             if (pool.finals.nbits && pool.finals.testClear(biti))
2396                                 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2397                             gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
2398
2399                             List *list = cast(List *)p;
2400                             //debug(PRINTF) printf("\tcollecting %x\n", list);
2401                             log_free(sentinel_add(list));
2402
2403                             debug (MEMSTOMP) .memset(p, 0xF3, size);
2404                         }
2405                         pool.pagetable[pn] = B_FREE;
2406                         freed += PAGESIZE;
2407                         //debug(PRINTF) printf("freeing entire page %d\n", pn);
2408                         continue;
2409                     }
2410     }
2411                     for (; p < ptop; p += size, biti += bitstride)
2412                     {
2413                         if (!pool.mark.test(biti))
2414                         {
2415                             sentinel_Invariant(sentinel_add(p));
2416
2417                             pool.freebits.set(biti);
2418                             if (pool.finals.nbits && pool.finals.testClear(biti))
2419                                 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2420                             clrBits(pool, biti, BlkAttr.ALL_BITS);
2421
2422                             List *list = cast(List *)p;
2423                             debug(PRINTF) printf("\tcollecting %x\n", list);
2424                             log_free(sentinel_add(list));
2425
2426                             debug (MEMSTOMP) .memset(p, 0xF3, size);
2427
2428                             freed += size;
2429                         }
2430                     }
2431                 }
2432                 else if (bin == B_PAGE)
2433                 {   size_t biti = pn * (PAGESIZE / 16);
2434
2435                     if (!pool.mark.test(biti))
2436                     {   byte *p = pool.baseAddr + pn * PAGESIZE;
2437
2438                         sentinel_Invariant(sentinel_add(p));
2439                         if (pool.finals.nbits && pool.finals.testClear(biti))
2440                             rt_finalize(sentinel_add(p), false/*noStack > 0*/);
2441                         clrBits(pool, biti, BlkAttr.ALL_BITS);
2442
2443                         debug(COLLECT_PRINTF) printf("\tcollecting big %x\n", p);
2444                         log_free(sentinel_add(p));
2445                         pool.pagetable[pn] = B_FREE;
2446                         freedpages++;
2447                         debug (MEMSTOMP) .memset(p, 0xF3, PAGESIZE);
2448                         while (pn + 1 < ncommitted && pool.pagetable[pn + 1] == B_PAGEPLUS)
2449                         {
2450                             pn++;
2451                             pool.pagetable[pn] = B_FREE;
2452                             freedpages++;
2453
2454                             debug (MEMSTOMP)
2455                             {   p += PAGESIZE;
2456                                 .memset(p, 0xF3, PAGESIZE);
2457                             }
2458                         }
2459                     }
2460                 }
2461             }
2462         }
2463
2464         // Zero buckets
2465         bucket[] = null;
2466
2467         // Free complete pages, rebuild free list
2468         debug(COLLECT_PRINTF) printf("\tfree complete pages\n");
2469         size_t recoveredpages = 0;
2470         for (n = 0; n < npools; n++)
2471         {   size_t pn;
2472             size_t ncommitted;
2473
2474             pool = pooltable[n];
2475             ncommitted = pool.ncommitted;
2476             for (pn = 0; pn < ncommitted; pn++)
2477             {
2478                 Bins   bin = cast(Bins)pool.pagetable[pn];
2479                 size_t biti;
2480                 size_t u;
2481
2482                 if (bin < B_PAGE)
2483                 {
2484                     size_t size = binsize[bin];
2485                     size_t bitstride = size / 16;
2486                     size_t bitbase = pn * (PAGESIZE / 16);
2487                     size_t bittop = bitbase + (PAGESIZE / 16);
2488                     byte*  p;
2489
2490                     biti = bitbase;
2491                     for (biti = bitbase; biti < bittop; biti += bitstride)
2492                     {   if (!pool.freebits.test(biti))
2493                             goto Lnotfree;
2494                     }
2495                     pool.pagetable[pn] = B_FREE;
2496                     recoveredpages++;
2497                     continue;
2498
2499                  Lnotfree:
2500                     p = pool.baseAddr + pn * PAGESIZE;
2501                     for (u = 0; u < PAGESIZE; u += size)
2502                     {   biti = bitbase + u / 16;
2503                         if (pool.freebits.test(biti))
2504                         {   List *list;
2505
2506                             list = cast(List *)(p + u);
2507                             if (list.next != bucket[bin])       // avoid unnecessary writes
2508                                 list.next = bucket[bin];
2509                             bucket[bin] = list;
2510                         }
2511                     }
2512                 }
2513             }
2514         }
2515
2516         debug(COLLECT_PRINTF) printf("recovered pages = %d\n", recoveredpages);
2517         debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedpages, npools);
2518
2519         return freedpages + recoveredpages;
2520     }
2521
2522
2523     /**
2524      *
2525      */
2526     uint getBits(Pool* pool, size_t biti)
2527     in
2528     {
2529         assert( pool );
2530     }
2531     body
2532     {
2533         uint bits;
2534
2535         if (pool.finals.nbits &&
2536             pool.finals.test(biti))
2537             bits |= BlkAttr.FINALIZE;
2538         if (pool.noscan.test(biti))
2539             bits |= BlkAttr.NO_SCAN;
2540 //        if (pool.nomove.nbits &&
2541 //            pool.nomove.test(biti))
2542 //            bits |= BlkAttr.NO_MOVE;
2543         return bits;
2544     }
2545
2546
2547     /**
2548      *
2549      */
2550     void setBits(Pool* pool, size_t biti, uint mask)
2551     in
2552     {
2553         assert( pool );
2554     }
2555     body
2556     {
2557         if (mask & BlkAttr.FINALIZE)
2558         {
2559             if (!pool.finals.nbits)
2560                 pool.finals.alloc(pool.mark.nbits);
2561             pool.finals.set(biti);
2562         }
2563         if (mask & BlkAttr.NO_SCAN)
2564         {
2565             pool.noscan.set(biti);
2566         }
2567 //        if (mask & BlkAttr.NO_MOVE)
2568 //        {
2569 //            if (!pool.nomove.nbits)
2570 //                pool.nomove.alloc(pool.mark.nbits);
2571 //            pool.nomove.set(biti);
2572 //        }
2573     }
2574
2575
2576     /**
2577      *
2578      */
2579     void clrBits(Pool* pool, size_t biti, uint mask)
2580     in
2581     {
2582         assert( pool );
2583     }
2584     body
2585     {
2586         if (mask & BlkAttr.FINALIZE && pool.finals.nbits)
2587             pool.finals.clear(biti);
2588         if (mask & BlkAttr.NO_SCAN)
2589             pool.noscan.clear(biti);
2590 //        if (mask & BlkAttr.NO_MOVE && pool.nomove.nbits)
2591 //            pool.nomove.clear(biti);
2592     }
2593
2594
2595     /***** Leak Detector ******/
2596
2597
2598     debug (LOGGING)
2599     {
2600         LogArray current;
2601         LogArray prev;
2602
2603
2604         void log_init()
2605         {
2606             //debug(PRINTF) printf("+log_init()\n");
2607             current.reserve(1000);
2608             prev.reserve(1000);
2609             //debug(PRINTF) printf("-log_init()\n");
2610         }
2611
2612
2613         void log_malloc(void *p, size_t size)
2614         {
2615             //debug(PRINTF) printf("+log_malloc(p = %x, size = %d)\n", p, size);
2616             Log log;
2617
2618             log.p = p;
2619             log.size = size;
2620             log.line = GC.line;
2621             log.file = GC.file;
2622             log.parent = null;
2623
2624             GC.line = 0;
2625             GC.file = null;
2626
2627             current.push(log);
2628             //debug(PRINTF) printf("-log_malloc()\n");
2629         }
2630
2631
2632         void log_free(void *p)
2633         {
2634             //debug(PRINTF) printf("+log_free(%x)\n", p);
2635             size_t i;
2636
2637             i = current.find(p);
2638             if (i == OPFAIL)
2639             {
2640                 debug(PRINTF) printf("free'ing unallocated memory %x\n", p);
2641             }
2642             else
2643                 current.remove(i);
2644             //debug(PRINTF) printf("-log_free()\n");
2645         }
2646
2647
2648         void log_collect()
2649         {
2650             //debug(PRINTF) printf("+log_collect()\n");
2651             // Print everything in current that is not in prev
2652
2653             debug(PRINTF) printf("New pointers this cycle: --------------------------------\n");
2654             size_t used = 0;
2655             for (size_t i = 0; i < current.dim; i++)
2656             {
2657                 size_t j;
2658
2659                 j = prev.find(current.data[i].p);
2660                 if (j == OPFAIL)
2661                     current.data[i].print();
2662                 else
2663                     used++;
2664             }
2665
2666             debug(PRINTF) printf("All roots this cycle: --------------------------------\n");
2667             for (size_t i = 0; i < current.dim; i++)
2668             {
2669                 void *p;
2670                 size_t j;
2671
2672                 p = current.data[i].p;
2673                 if (!findPool(current.data[i].parent))
2674                 {
2675                     j = prev.find(current.data[i].p);
2676                     if (j == OPFAIL)
2677                         debug(PRINTF) printf("N");
2678                     else
2679                         debug(PRINTF) printf(" ");;
2680                     current.data[i].print();
2681                 }
2682             }
2683
2684             debug(PRINTF) printf("Used = %d-------------------------------------------------\n", used);
2685             prev.copy(&current);
2686
2687             debug(PRINTF) printf("-log_collect()\n");
2688         }
2689
2690
2691         void log_parent(void *p, void *parent)
2692         {
2693             //debug(PRINTF) printf("+log_parent()\n");
2694             size_t i;
2695
2696             i = current.find(p);
2697             if (i == OPFAIL)
2698             {
2699                 debug(PRINTF) printf("parent'ing unallocated memory %x, parent = %x\n", p, parent);
2700                 Pool *pool;
2701                 pool = findPool(p);
2702                 assert(pool);
2703                 size_t offset = cast(size_t)(p - pool.baseAddr);
2704                 size_t biti;
2705                 size_t pn = offset / PAGESIZE;
2706                 Bins bin = cast(Bins)pool.pagetable[pn];
2707                 biti = (offset & notbinsize[bin]);
2708                 debug(PRINTF) printf("\tbin = %d, offset = x%x, biti = x%x\n", bin, offset, biti);
2709             }
2710             else
2711             {
2712                 current.data[i].parent = parent;
2713             }
2714             //debug(PRINTF) printf("-log_parent()\n");
2715         }
2716
2717     }
2718     else
2719     {
2720         void log_init() { }
2721         void log_malloc(void *p, size_t size) { }
2722         void log_free(void *p) { }
2723         void log_collect() { }
2724         void log_parent(void *p, void *parent) { }
2725     }
2726 }
2727
2728
2729 /* ============================ Pool  =============================== */
2730
2731
2732 struct Pool
2733 {
2734     byte* baseAddr;
2735     byte* topAddr;
2736     GCBits mark;        // entries already scanned, or should not be scanned
2737     GCBits scan;        // entries that need to be scanned
2738     GCBits freebits;    // entries that are on the free list
2739     GCBits finals;      // entries that need finalizer run on them
2740     GCBits noscan;      // entries that should not be scanned
2741
2742     size_t npages;
2743     size_t ncommitted;    // ncommitted <= npages
2744     ubyte* pagetable;
2745
2746
2747     void initialize(size_t npages)
2748     {
2749         size_t poolsize;
2750
2751         //debug(PRINTF) printf("Pool::Pool(%u)\n", npages);
2752         poolsize = npages * PAGESIZE;
2753         assert(poolsize >= POOLSIZE);
2754         baseAddr = cast(byte *)os_mem_map(poolsize);
2755
2756         // Some of the code depends on page alignment of memory pools
2757         assert((cast(size_t)baseAddr & (PAGESIZE - 1)) == 0);
2758
2759         if (!baseAddr)
2760         {
2761             //debug(PRINTF) printf("GC fail: poolsize = x%x, errno = %d\n", poolsize, errno);
2762             //debug(PRINTF) printf("message = '%s'\n", sys_errlist[errno]);
2763
2764             npages = 0;
2765             poolsize = 0;
2766         }
2767         //assert(baseAddr);
2768         topAddr = baseAddr + poolsize;
2769
2770         mark.alloc(cast(size_t)poolsize / 16);
2771         scan.alloc(cast(size_t)poolsize / 16);
2772         freebits.alloc(cast(size_t)poolsize / 16);
2773         noscan.alloc(cast(size_t)poolsize / 16);
2774
2775         pagetable = cast(ubyte*) .malloc(npages);
2776         if (!pagetable)
2777             onOutOfMemoryError();
2778         .memset(pagetable, B_UNCOMMITTED, npages);
2779
2780         this.npages = npages;
2781         ncommitted = 0;
2782     }
2783
2784
2785     void Dtor()
2786     {
2787         if (baseAddr)
2788         {
2789             int result;
2790
2791             if (ncommitted)
2792             {
2793                 result = os_mem_decommit(baseAddr, 0, ncommitted * PAGESIZE);
2794                 assert(result == 0);
2795                 ncommitted = 0;
2796             }
2797
2798             if (npages)
2799             {
2800                 result = os_mem_unmap(baseAddr, npages * PAGESIZE);
2801                 assert(result == 0);
2802                 npages = 0;
2803             }
2804
2805             baseAddr = null;
2806             topAddr = null;
2807         }
2808         if (pagetable)
2809             .free(pagetable);
2810
2811         mark.Dtor();
2812         scan.Dtor();
2813         freebits.Dtor();
2814         finals.Dtor();
2815         noscan.Dtor();
2816     }
2817
2818
2819     void Invariant() { }
2820
2821
2822     invariant
2823     {
2824         //mark.Invariant();
2825         //scan.Invariant();
2826         //freebits.Invariant();
2827         //finals.Invariant();
2828         //noscan.Invariant();
2829
2830         if (baseAddr)
2831         {
2832             //if (baseAddr + npages * PAGESIZE != topAddr)
2833                 //printf("baseAddr = %p, npages = %d, topAddr = %p\n", baseAddr, npages, topAddr);
2834             assert(baseAddr + npages * PAGESIZE == topAddr);
2835             assert(ncommitted <= npages);
2836         }
2837
2838         for (size_t i = 0; i < npages; i++)
2839         {   Bins bin = cast(Bins)pagetable[i];
2840
2841             assert(bin < B_MAX);
2842         }
2843     }
2844
2845
2846     /**
2847      * Allocate n pages from Pool.
2848      * Returns OPFAIL on failure.
2849      */
2850     size_t allocPages(size_t n)
2851     {
2852         size_t i;
2853         size_t n2;
2854
2855         //debug(PRINTF) printf("Pool::allocPages(n = %d)\n", n);
2856         n2 = n;
2857         for (i = 0; i < ncommitted; i++)
2858         {
2859             if (pagetable[i] == B_FREE)
2860             {
2861                 if (--n2 == 0)
2862                 {   //debug(PRINTF) printf("\texisting pn = %d\n", i - n + 1);
2863                     return i - n + 1;
2864                 }
2865             }
2866             else
2867                 n2 = n;
2868         }
2869         return extendPages(n);
2870     }
2871
2872     /**
2873      * Extend Pool by n pages.
2874      * Returns OPFAIL on failure.
2875      */
2876     size_t extendPages(size_t n)
2877     {
2878         //debug(PRINTF) printf("Pool::extendPages(n = %d)\n", n);
2879         if (ncommitted + n <= npages)
2880         {
2881             size_t tocommit;
2882
2883             tocommit = (n + (COMMITSIZE/PAGESIZE) - 1) & ~(COMMITSIZE/PAGESIZE - 1);
2884             if (ncommitted + tocommit > npages)
2885                 tocommit = npages - ncommitted;
2886             //debug(PRINTF) printf("\tlooking to commit %d more pages\n", tocommit);
2887             //fflush(stdout);
2888             if (os_mem_commit(baseAddr, ncommitted * PAGESIZE, tocommit * PAGESIZE) == 0)
2889             {
2890                 .memset(pagetable + ncommitted, B_FREE, tocommit);
2891                 auto i = ncommitted;
2892                 ncommitted += tocommit;
2893
2894                 while (i && pagetable[i - 1] == B_FREE)
2895                     i--;
2896
2897                 return i;
2898             }
2899             //debug(PRINTF) printf("\tfailed to commit %d pages\n", tocommit);
2900         }
2901
2902         return OPFAIL;
2903     }
2904
2905
2906     /**
2907      * Free npages pages starting with pagenum.
2908      */
2909     void freePages(size_t pagenum, size_t npages)
2910     {
2911         .memset(&pagetable[pagenum], B_FREE, npages);
2912     }
2913
2914
2915     /**
2916      * Used for sorting pooltable[]
2917      */
2918     int opCmp(Pool *p2)
2919     {
2920         if (baseAddr < p2.baseAddr)
2921             return -1;
2922         else
2923         return cast(int)(baseAddr > p2.baseAddr);
2924     }
2925 }
2926
2927
2928 /* ============================ SENTINEL =============================== */
2929
2930
2931 version (SENTINEL)
2932 {
2933     const size_t SENTINEL_PRE = cast(size_t) 0xF4F4F4F4F4F4F4F4UL; // 32 or 64 bits
2934     const ubyte SENTINEL_POST = 0xF5;           // 8 bits
2935     const uint SENTINEL_EXTRA = 2 * size_t.sizeof + 1;
2936
2937
2938     size_t* sentinel_size(void *p)  { return &(cast(size_t *)p)[-2]; }
2939     size_t* sentinel_pre(void *p)   { return &(cast(size_t *)p)[-1]; }
2940     ubyte* sentinel_post(void *p) { return &(cast(ubyte *)p)[*sentinel_size(p)]; }
2941
2942
2943     void sentinel_init(void *p, size_t size)
2944     {
2945         *sentinel_size(p) = size;
2946         *sentinel_pre(p) = SENTINEL_PRE;
2947         *sentinel_post(p) = SENTINEL_POST;
2948     }
2949
2950
2951     void sentinel_Invariant(void *p)
2952     {
2953         assert(*sentinel_pre(p) == SENTINEL_PRE);
2954         assert(*sentinel_post(p) == SENTINEL_POST);
2955     }
2956
2957
2958     void *sentinel_add(void *p)
2959     {
2960         return p + 2 * size_t.sizeof;
2961     }
2962
2963
2964     void *sentinel_sub(void *p)
2965     {
2966         return p - 2 * size_t.sizeof;
2967     }
2968 }
2969 else
2970 {
2971     const uint SENTINEL_EXTRA = 0;
2972
2973
2974     void sentinel_init(void *p, size_t size)
2975     {
2976     }
2977
2978
2979     void sentinel_Invariant(void *p)
2980     {
2981     }
2982
2983
2984     void *sentinel_add(void *p)
2985     {
2986         return p;
2987     }
2988
2989
2990     void *sentinel_sub(void *p)
2991     {
2992         return p;
2993     }
2994 }
2995