]> git.llucax.com Git - software/dgc/cdgc.git/blob - gc/gc.d
bce6004155dd2483671db3899fac368ef0d39f20
[software/dgc/cdgc.git] / gc / gc.d
1 /**
2  * This module contains the garbage collector implementation.
3  *
4  * Copyright: Copyright (C) 2001-2007 Digital Mars, www.digitalmars.com.
5  *            All rights reserved.
6  * License:
7  *  This software is provided 'as-is', without any express or implied
8  *  warranty. In no event will the authors be held liable for any damages
9  *  arising from the use of this software.
10  *
11  *  Permission is granted to anyone to use this software for any purpose,
12  *  including commercial applications, and to alter it and redistribute it
13  *  freely, in both source and binary form, subject to the following
14  *  restrictions:
15  *
16  *  o  The origin of this software must not be misrepresented; you must not
17  *     claim that you wrote the original software. If you use this software
18  *     in a product, an acknowledgment in the product documentation would be
19  *     appreciated but is not required.
20  *  o  Altered source versions must be plainly marked as such, and must not
21  *     be misrepresented as being the original software.
22  *  o  This notice may not be removed or altered from any source
23  *     distribution.
24  * Authors:   Walter Bright, David Friedman, Sean Kelly
25  */
26
27 module gc.gc;
28
29 // D Programming Language Garbage Collector implementation
30
31 /************** Debugging ***************************/
32
33 //debug = PRINTF;               // turn on printf's
34 //debug = COLLECT_PRINTF;       // turn on printf's
35 //debug = LOGGING;              // log allocations / frees
36 //debug = MEMSTOMP;             // stomp on memory
37 //debug = SENTINEL;             // add underrun/overrrun protection
38 //debug = PTRCHECK;             // more pointer checking
39 //debug = PTRCHECK2;            // thorough but slow pointer checking
40
41 /*************** Configuration *********************/
42
43 version = STACKGROWSDOWN;       // growing the stack means subtracting from the stack pointer
44                                 // (use for Intel X86 CPUs)
45                                 // else growing the stack means adding to the stack pointer
46
47 /***************************************************/
48
49 import gc.bits;
50 import gc.stats;
51 import gc.alloc;
52 import gc.libc;
53
54
55 version (GNU)
56 {
57     // BUG: The following import will likely not work, since the gcc
58     //      subdirectory is elsewhere.  Instead, perhaps the functions
59     //      could be declared directly or some other resolution could
60     //      be found.
61     import gcc.builtins; // for __builtin_unwind_init
62 }
63
64
65 struct BlkInfo
66 {
67     void*  base;
68     size_t size;
69     uint   attr;
70 }
71
72 private
73 {
74     enum BlkAttr : uint
75     {
76         FINALIZE = 0b0000_0001,
77         NO_SCAN  = 0b0000_0010,
78         NO_MOVE  = 0b0000_0100,
79         ALL_BITS = 0b1111_1111
80     }
81
82     extern (C) void* rt_stackBottom();
83     extern (C) void* rt_stackTop();
84
85     extern (C) void rt_finalize( void* p, bool det = true );
86
87     alias void delegate(Object) DEvent;
88     extern (C) void rt_attachDisposeEvent(Object h, DEvent e);
89     extern (C) bool rt_detachDisposeEvent(Object h, DEvent e);
90
91
92     alias void delegate( void*, void* ) scanFn;
93
94     extern (C) void rt_scanStaticData( scanFn scan );
95
96     extern (C) bool thread_needLock();
97     extern (C) void thread_suspendAll();
98     extern (C) void thread_resumeAll();
99
100     extern (C) void thread_scanAll( scanFn fn, void* curStackTop = null );
101
102     extern (C) void onOutOfMemoryError();
103
104     enum
105     {
106         OPFAIL = ~cast(size_t)0
107     }
108 }
109
110
111 alias GC gc_t;
112
113
114 /* ======================= Leak Detector =========================== */
115
116
117 debug (LOGGING)
118 {
119     struct Log
120     {
121         void*  p;
122         size_t size;
123         size_t line;
124         char*  file;
125         void*  parent;
126
127         void print()
128         {
129             printf("    p = %x, size = %d, parent = %x ", p, size, parent);
130             if (file)
131             {
132                 printf("%s(%u)", file, line);
133             }
134             printf("\n");
135         }
136     }
137
138
139     struct LogArray
140     {
141         size_t dim;
142         size_t allocdim;
143         Log *data;
144
145         void Dtor()
146         {
147             if (data)
148                 .free(data);
149             data = null;
150         }
151
152         void reserve(size_t nentries)
153         {
154             assert(dim <= allocdim);
155             if (allocdim - dim < nentries)
156             {
157                 allocdim = (dim + nentries) * 2;
158                 assert(dim + nentries <= allocdim);
159                 if (!data)
160                 {
161                     data = cast(Log*) .malloc(allocdim * Log.sizeof);
162                     if (!data && allocdim)
163                         onOutOfMemoryError();
164                 }
165                 else
166                 {   Log *newdata;
167
168                     newdata = cast(Log*) .malloc(allocdim * Log.sizeof);
169                     if (!newdata && allocdim)
170                         onOutOfMemoryError();
171                     .memcpy(newdata, data, dim * Log.sizeof);
172                     .free(data);
173                     data = newdata;
174                 }
175             }
176         }
177
178
179         void push(Log log)
180         {
181             reserve(1);
182             data[dim++] = log;
183         }
184
185         void remove(size_t i)
186         {
187             .memmove(data + i, data + i + 1, (dim - i) * Log.sizeof);
188             dim--;
189         }
190
191
192         size_t find(void *p)
193         {
194             for (size_t i = 0; i < dim; i++)
195             {
196                 if (data[i].p == p)
197                     return i;
198             }
199             return OPFAIL; // not found
200         }
201
202
203         void copy(LogArray *from)
204         {
205             reserve(from.dim - dim);
206             assert(from.dim <= allocdim);
207             .memcpy(data, from.data, from.dim * Log.sizeof);
208             dim = from.dim;
209         }
210     }
211 }
212
213
214 /* ============================ GC =============================== */
215
216
217 class GCLock { }                // just a dummy so we can get a global lock
218
219
220 const uint GCVERSION = 1;       // increment every time we change interface
221                                 // to GC.
222
223 class GC
224 {
225     // For passing to debug code
226     static size_t line;
227     static char*  file;
228
229     uint gcversion = GCVERSION;
230
231     Gcx *gcx;                   // implementation
232     static ClassInfo gcLock;    // global lock
233
234
235     void initialize()
236     {
237         gcLock = GCLock.classinfo;
238         gcx = cast(Gcx*) .calloc(1, Gcx.sizeof);
239         if (!gcx)
240             onOutOfMemoryError();
241         gcx.initialize();
242         setStackBottom(rt_stackBottom());
243     }
244
245
246     void Dtor()
247     {
248         if (gcx)
249         {
250             gcx.Dtor();
251             .free(gcx);
252             gcx = null;
253         }
254     }
255
256
257     /**
258      *
259      */
260     void enable()
261     {
262         if (!thread_needLock())
263         {
264             assert(gcx.disabled > 0);
265             gcx.disabled--;
266         }
267         else synchronized (gcLock)
268         {
269             assert(gcx.disabled > 0);
270             gcx.disabled--;
271         }
272     }
273
274
275     /**
276      *
277      */
278     void disable()
279     {
280         if (!thread_needLock())
281         {
282             gcx.disabled++;
283         }
284         else synchronized (gcLock)
285         {
286             gcx.disabled++;
287         }
288     }
289
290
291     /**
292      *
293      */
294     uint getAttr(void* p)
295     {
296         if (!p)
297         {
298             return 0;
299         }
300
301         uint go()
302         {
303             Pool* pool = gcx.findPool(p);
304             uint  oldb = 0;
305
306             if (pool)
307             {
308                 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
309
310                 oldb = gcx.getBits(pool, biti);
311             }
312             return oldb;
313         }
314
315         if (!thread_needLock())
316         {
317             return go();
318         }
319         else synchronized (gcLock)
320         {
321             return go();
322         }
323     }
324
325
326     /**
327      *
328      */
329     uint setAttr(void* p, uint mask)
330     {
331         if (!p)
332         {
333             return 0;
334         }
335
336         uint go()
337         {
338             Pool* pool = gcx.findPool(p);
339             uint  oldb = 0;
340
341             if (pool)
342             {
343                 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
344
345                 oldb = gcx.getBits(pool, biti);
346                 gcx.setBits(pool, biti, mask);
347             }
348             return oldb;
349         }
350
351         if (!thread_needLock())
352         {
353             return go();
354         }
355         else synchronized (gcLock)
356         {
357             return go();
358         }
359     }
360
361
362     /**
363      *
364      */
365     uint clrAttr(void* p, uint mask)
366     {
367         if (!p)
368         {
369             return 0;
370         }
371
372         uint go()
373         {
374             Pool* pool = gcx.findPool(p);
375             uint  oldb = 0;
376
377             if (pool)
378             {
379                 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
380
381                 oldb = gcx.getBits(pool, biti);
382                 gcx.clrBits(pool, biti, mask);
383             }
384             return oldb;
385         }
386
387         if (!thread_needLock())
388         {
389             return go();
390         }
391         else synchronized (gcLock)
392         {
393             return go();
394         }
395     }
396
397
398     /**
399      *
400      */
401     void *malloc(size_t size, uint bits = 0)
402     {
403         if (!size)
404         {
405             return null;
406         }
407
408         if (!thread_needLock())
409         {
410             return mallocNoSync(size, bits);
411         }
412         else synchronized (gcLock)
413         {
414             return mallocNoSync(size, bits);
415         }
416     }
417
418
419     //
420     //
421     //
422     private void *mallocNoSync(size_t size, uint bits = 0)
423     {
424         assert(size != 0);
425
426         void *p = null;
427         Bins bin;
428
429         //debug(PRINTF) printf("GC::malloc(size = %d, gcx = %p)\n", size, gcx);
430         assert(gcx);
431
432         size += SENTINEL_EXTRA;
433
434         // Compute size bin
435         // Cache previous binsize lookup - Dave Fladebo.
436         static size_t lastsize = -1;
437         static Bins lastbin;
438         if (size == lastsize)
439             bin = lastbin;
440         else
441         {
442             bin = gcx.findBin(size);
443             lastsize = size;
444             lastbin = bin;
445         }
446
447         if (bin < B_PAGE)
448         {
449             p = gcx.bucket[bin];
450             if (p is null)
451             {
452                 if (!gcx.allocPage(bin) && !gcx.disabled)   // try to find a new page
453                 {
454                     if (!thread_needLock())
455                     {
456                         /* Then we haven't locked it yet. Be sure
457                          * and lock for a collection, since a finalizer
458                          * may start a new thread.
459                          */
460                         synchronized (gcLock)
461                         {
462                             gcx.fullcollectshell();
463                         }
464                     }
465                     else if (!gcx.fullcollectshell())       // collect to find a new page
466                     {
467                         //gcx.newPool(1);
468                     }
469                 }
470                 if (!gcx.bucket[bin] && !gcx.allocPage(bin))
471                 {   int result;
472
473                     gcx.newPool(1);         // allocate new pool to find a new page
474                     result = gcx.allocPage(bin);
475                     if (!result)
476                         onOutOfMemoryError();
477                 }
478                 p = gcx.bucket[bin];
479             }
480
481             // Return next item from free list
482             gcx.bucket[bin] = (cast(List*)p).next;
483             if( !(bits & BlkAttr.NO_SCAN) )
484                 .memset(p + size, 0, binsize[bin] - size);
485             //debug(PRINTF) printf("\tmalloc => %x\n", p);
486             debug (MEMSTOMP) .memset(p, 0xF0, size);
487         }
488         else
489         {
490             p = gcx.bigAlloc(size);
491             if (!p)
492                 onOutOfMemoryError();
493         }
494         size -= SENTINEL_EXTRA;
495         p = sentinel_add(p);
496         sentinel_init(p, size);
497         gcx.log_malloc(p, size);
498
499         if (bits)
500         {
501             Pool *pool = gcx.findPool(p);
502             assert(pool);
503
504             gcx.setBits(pool, cast(size_t)(p - pool.baseAddr) / 16, bits);
505         }
506         return p;
507     }
508
509
510     /**
511      *
512      */
513     void *calloc(size_t size, uint bits = 0)
514     {
515         if (!size)
516         {
517             return null;
518         }
519
520         if (!thread_needLock())
521         {
522             return callocNoSync(size, bits);
523         }
524         else synchronized (gcLock)
525         {
526             return callocNoSync(size, bits);
527         }
528     }
529
530
531     //
532     //
533     //
534     private void *callocNoSync(size_t size, uint bits = 0)
535     {
536         assert(size != 0);
537
538         //debug(PRINTF) printf("calloc: %x len %d\n", p, len);
539         void *p = mallocNoSync(size, bits);
540         .memset(p, 0, size);
541         return p;
542     }
543
544
545     /**
546      *
547      */
548     void *realloc(void *p, size_t size, uint bits = 0)
549     {
550         if (!thread_needLock())
551         {
552             return reallocNoSync(p, size, bits);
553         }
554         else synchronized (gcLock)
555         {
556             return reallocNoSync(p, size, bits);
557         }
558     }
559
560
561     //
562     //
563     //
564     private void *reallocNoSync(void *p, size_t size, uint bits = 0)
565     {
566         if (!size)
567         {   if (p)
568             {   freeNoSync(p);
569                 p = null;
570             }
571         }
572         else if (!p)
573         {
574             p = mallocNoSync(size, bits);
575         }
576         else
577         {   void *p2;
578             size_t psize;
579
580             //debug(PRINTF) printf("GC::realloc(p = %x, size = %u)\n", p, size);
581             version (SENTINEL)
582             {
583                 sentinel_Invariant(p);
584                 psize = *sentinel_size(p);
585                 if (psize != size)
586                 {
587                     if (psize)
588                     {
589                         Pool *pool = gcx.findPool(p);
590
591                         if (pool)
592                         {
593                             auto biti = cast(size_t)(p - pool.baseAddr) / 16;
594
595                             if (bits)
596                             {
597                                 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
598                                 gcx.setBits(pool, biti, bits);
599                             }
600                             else
601                             {
602                                 bits = gcx.getBits(pool, biti);
603                             }
604                         }
605                     }
606                     p2 = mallocNoSync(size, bits);
607                     if (psize < size)
608                         size = psize;
609                     //debug(PRINTF) printf("\tcopying %d bytes\n",size);
610                     .memcpy(p2, p, size);
611                     p = p2;
612                 }
613             }
614             else
615             {
616                 psize = gcx.findSize(p);        // find allocated size
617                 if (psize >= PAGESIZE && size >= PAGESIZE)
618                 {
619                     auto psz = psize / PAGESIZE;
620                     auto newsz = (size + PAGESIZE - 1) / PAGESIZE;
621                     if (newsz == psz)
622                         return p;
623
624                     auto pool = gcx.findPool(p);
625                     auto pagenum = (p - pool.baseAddr) / PAGESIZE;
626
627                     if (newsz < psz)
628                     {   // Shrink in place
629                         synchronized (gcLock)
630                         {
631                             debug (MEMSTOMP) .memset(p + size, 0xF2, psize - size);
632                             pool.freePages(pagenum + newsz, psz - newsz);
633                         }
634                         return p;
635                     }
636                     else if (pagenum + newsz <= pool.npages)
637                     {
638                         // Attempt to expand in place
639                         synchronized (gcLock)
640                         {
641                             for (size_t i = pagenum + psz; 1;)
642                             {
643                                 if (i == pagenum + newsz)
644                                 {
645                                     debug (MEMSTOMP) .memset(p + psize, 0xF0, size - psize);
646                                     .memset(&pool.pagetable[pagenum + psz], B_PAGEPLUS, newsz - psz);
647                                     return p;
648                                 }
649                                 if (i == pool.npages)
650                                 {
651                                     break;
652                                 }
653                                 if (pool.pagetable[i] != B_FREE)
654                                     break;
655                                 i++;
656                             }
657                         }
658                     }
659                 }
660                 if (psize < size ||             // if new size is bigger
661                     psize > size * 2)           // or less than half
662                 {
663                     if (psize)
664                     {
665                         Pool *pool = gcx.findPool(p);
666
667                         if (pool)
668                         {
669                             auto biti = cast(size_t)(p - pool.baseAddr) / 16;
670
671                             if (bits)
672                             {
673                                 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
674                                 gcx.setBits(pool, biti, bits);
675                             }
676                             else
677                             {
678                                 bits = gcx.getBits(pool, biti);
679                             }
680                         }
681                     }
682                     p2 = mallocNoSync(size, bits);
683                     if (psize < size)
684                         size = psize;
685                     //debug(PRINTF) printf("\tcopying %d bytes\n",size);
686                     .memcpy(p2, p, size);
687                     p = p2;
688                 }
689             }
690         }
691         return p;
692     }
693
694
695     /**
696      * Attempt to in-place enlarge the memory block pointed to by p by at least
697      * minbytes beyond its current capacity, up to a maximum of maxsize.  This
698      * does not attempt to move the memory block (like realloc() does).
699      *
700      * Returns:
701      *  0 if could not extend p,
702      *  total size of entire memory block if successful.
703      */
704     size_t extend(void* p, size_t minsize, size_t maxsize)
705     {
706         if (!thread_needLock())
707         {
708             return extendNoSync(p, minsize, maxsize);
709         }
710         else synchronized (gcLock)
711         {
712             return extendNoSync(p, minsize, maxsize);
713         }
714     }
715
716
717     //
718     //
719     //
720     private size_t extendNoSync(void* p, size_t minsize, size_t maxsize)
721     in
722     {
723         assert( minsize <= maxsize );
724     }
725     body
726     {
727         //debug(PRINTF) printf("GC::extend(p = %x, minsize = %u, maxsize = %u)\n", p, minsize, maxsize);
728         version (SENTINEL)
729         {
730             return 0;
731         }
732         auto psize = gcx.findSize(p);   // find allocated size
733         if (psize < PAGESIZE)
734             return 0;                   // cannot extend buckets
735
736         auto psz = psize / PAGESIZE;
737         auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE;
738         auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE;
739
740         auto pool = gcx.findPool(p);
741         auto pagenum = (p - pool.baseAddr) / PAGESIZE;
742
743         size_t sz;
744         for (sz = 0; sz < maxsz; sz++)
745         {
746             auto i = pagenum + psz + sz;
747             if (i == pool.npages)
748                 break;
749             if (pool.pagetable[i] != B_FREE)
750             {   if (sz < minsz)
751                     return 0;
752                 break;
753             }
754         }
755         if (sz < minsz)
756             return 0;
757         debug (MEMSTOMP) .memset(p + psize, 0xF0, (psz + sz) * PAGESIZE - psize);
758         .memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
759         gcx.p_cache = null;
760         gcx.size_cache = 0;
761         return (psz + sz) * PAGESIZE;
762     }
763
764
765     /**
766      *
767      */
768     size_t reserve(size_t size)
769     {
770         if (!size)
771         {
772             return 0;
773         }
774
775         if (!thread_needLock())
776         {
777             return reserveNoSync(size);
778         }
779         else synchronized (gcLock)
780         {
781             return reserveNoSync(size);
782         }
783     }
784
785
786     //
787     //
788     //
789     private size_t reserveNoSync(size_t size)
790     {
791         assert(size != 0);
792         assert(gcx);
793
794         return gcx.reserve(size);
795     }
796
797
798     /**
799      *
800      */
801     void free(void *p)
802     {
803         if (!p)
804         {
805             return;
806         }
807
808         if (!thread_needLock())
809         {
810             return freeNoSync(p);
811         }
812         else synchronized (gcLock)
813         {
814             return freeNoSync(p);
815         }
816     }
817
818
819     //
820     //
821     //
822     private void freeNoSync(void *p)
823     {
824         assert (p);
825
826         Pool*  pool;
827         size_t pagenum;
828         Bins   bin;
829         size_t biti;
830
831         // Find which page it is in
832         pool = gcx.findPool(p);
833         if (!pool)                              // if not one of ours
834             return;                             // ignore
835         sentinel_Invariant(p);
836         p = sentinel_sub(p);
837         pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
838         biti = cast(size_t)(p - pool.baseAddr) / 16;
839         gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
840
841         bin = cast(Bins)pool.pagetable[pagenum];
842         if (bin == B_PAGE)              // if large alloc
843         {   size_t npages;
844             size_t n;
845
846             // Free pages
847             npages = 1;
848             n = pagenum;
849             while (++n < pool.npages && pool.pagetable[n] == B_PAGEPLUS)
850                 npages++;
851             debug (MEMSTOMP) .memset(p, 0xF2, npages * PAGESIZE);
852             pool.freePages(pagenum, npages);
853         }
854         else
855         {   // Add to free list
856             List *list = cast(List*)p;
857
858             debug (MEMSTOMP) .memset(p, 0xF2, binsize[bin]);
859
860             list.next = gcx.bucket[bin];
861             gcx.bucket[bin] = list;
862         }
863         gcx.log_free(sentinel_add(p));
864     }
865
866
867     /**
868      * Determine the base address of the block containing p.  If p is not a gc
869      * allocated pointer, return null.
870      */
871     void* addrOf(void *p)
872     {
873         if (!p)
874         {
875             return null;
876         }
877
878         if (!thread_needLock())
879         {
880             return addrOfNoSync(p);
881         }
882         else synchronized (gcLock)
883         {
884             return addrOfNoSync(p);
885         }
886     }
887
888
889     //
890     //
891     //
892     void* addrOfNoSync(void *p)
893     {
894         if (!p)
895         {
896             return null;
897         }
898
899         return gcx.findBase(p);
900     }
901
902
903     /**
904      * Determine the allocated size of pointer p.  If p is an interior pointer
905      * or not a gc allocated pointer, return 0.
906      */
907     size_t sizeOf(void *p)
908     {
909         if (!p)
910         {
911             return 0;
912         }
913
914         if (!thread_needLock())
915         {
916             return sizeOfNoSync(p);
917         }
918         else synchronized (gcLock)
919         {
920             return sizeOfNoSync(p);
921         }
922     }
923
924
925     //
926     //
927     //
928     private size_t sizeOfNoSync(void *p)
929     {
930         assert (p);
931
932         version (SENTINEL)
933         {
934             p = sentinel_sub(p);
935             size_t size = gcx.findSize(p);
936
937             // Check for interior pointer
938             // This depends on:
939             // 1) size is a power of 2 for less than PAGESIZE values
940             // 2) base of memory pool is aligned on PAGESIZE boundary
941             if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
942                 size = 0;
943             return size ? size - SENTINEL_EXTRA : 0;
944         }
945         else
946         {
947             if (p == gcx.p_cache)
948                 return gcx.size_cache;
949
950             size_t size = gcx.findSize(p);
951
952             // Check for interior pointer
953             // This depends on:
954             // 1) size is a power of 2 for less than PAGESIZE values
955             // 2) base of memory pool is aligned on PAGESIZE boundary
956             if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
957                 size = 0;
958             else
959             {
960                 gcx.p_cache = p;
961                 gcx.size_cache = size;
962             }
963
964             return size;
965         }
966     }
967
968
969     /**
970      * Determine the base address of the block containing p.  If p is not a gc
971      * allocated pointer, return null.
972      */
973     BlkInfo query(void *p)
974     {
975         if (!p)
976         {
977             BlkInfo i;
978             return  i;
979         }
980
981         if (!thread_needLock())
982         {
983             return queryNoSync(p);
984         }
985         else synchronized (gcLock)
986         {
987             return queryNoSync(p);
988         }
989     }
990
991
992     //
993     //
994     //
995     BlkInfo queryNoSync(void *p)
996     {
997         assert(p);
998
999         return gcx.getInfo(p);
1000     }
1001
1002
1003     /**
1004      * Verify that pointer p:
1005      *  1) belongs to this memory pool
1006      *  2) points to the start of an allocated piece of memory
1007      *  3) is not on a free list
1008      */
1009     void check(void *p)
1010     {
1011         if (!p)
1012         {
1013             return;
1014         }
1015
1016         if (!thread_needLock())
1017         {
1018             checkNoSync(p);
1019         }
1020         else synchronized (gcLock)
1021         {
1022             checkNoSync(p);
1023         }
1024     }
1025
1026
1027     //
1028     //
1029     //
1030     private void checkNoSync(void *p)
1031     {
1032         assert(p);
1033
1034         sentinel_Invariant(p);
1035         debug (PTRCHECK)
1036         {
1037             Pool*  pool;
1038             size_t pagenum;
1039             Bins   bin;
1040             size_t size;
1041
1042             p = sentinel_sub(p);
1043             pool = gcx.findPool(p);
1044             assert(pool);
1045             pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1046             bin = cast(Bins)pool.pagetable[pagenum];
1047             assert(bin <= B_PAGE);
1048             size = binsize[bin];
1049             assert((cast(size_t)p & (size - 1)) == 0);
1050
1051             debug (PTRCHECK2)
1052             {
1053                 if (bin < B_PAGE)
1054                 {
1055                     // Check that p is not on a free list
1056                     List *list;
1057
1058                     for (list = gcx.bucket[bin]; list; list = list.next)
1059                     {
1060                         assert(cast(void*)list != p);
1061                     }
1062                 }
1063             }
1064         }
1065     }
1066
1067
1068     //
1069     //
1070     //
1071     private void setStackBottom(void *p)
1072     {
1073         version (STACKGROWSDOWN)
1074         {
1075             //p = (void *)((uint *)p + 4);
1076             if (p > gcx.stackBottom)
1077             {
1078                 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1079                 gcx.stackBottom = p;
1080             }
1081         }
1082         else
1083         {
1084             //p = (void *)((uint *)p - 4);
1085             if (p < gcx.stackBottom)
1086             {
1087                 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1088                 gcx.stackBottom = cast(char*)p;
1089             }
1090         }
1091     }
1092
1093
1094     /**
1095      * add p to list of roots
1096      */
1097     void addRoot(void *p)
1098     {
1099         if (!p)
1100         {
1101             return;
1102         }
1103
1104         if (!thread_needLock())
1105         {
1106             gcx.addRoot(p);
1107         }
1108         else synchronized (gcLock)
1109         {
1110             gcx.addRoot(p);
1111         }
1112     }
1113
1114
1115     /**
1116      * remove p from list of roots
1117      */
1118     void removeRoot(void *p)
1119     {
1120         if (!p)
1121         {
1122             return;
1123         }
1124
1125         if (!thread_needLock())
1126         {
1127             gcx.removeRoot(p);
1128         }
1129         else synchronized (gcLock)
1130         {
1131             gcx.removeRoot(p);
1132         }
1133     }
1134
1135
1136     /**
1137      * add range to scan for roots
1138      */
1139     void addRange(void *p, size_t sz)
1140     {
1141         if (!p || !sz)
1142         {
1143             return;
1144         }
1145
1146         //debug(PRINTF) printf("+GC.addRange(pbot = x%x, ptop = x%x)\n", pbot, ptop);
1147         if (!thread_needLock())
1148         {
1149             gcx.addRange(p, p + sz);
1150         }
1151         else synchronized (gcLock)
1152         {
1153             gcx.addRange(p, p + sz);
1154         }
1155         //debug(PRINTF) printf("-GC.addRange()\n");
1156     }
1157
1158
1159     /**
1160      * remove range
1161      */
1162     void removeRange(void *p)
1163     {
1164         if (!p)
1165         {
1166             return;
1167         }
1168
1169         if (!thread_needLock())
1170         {
1171             gcx.removeRange(p);
1172         }
1173         else synchronized (gcLock)
1174         {
1175             gcx.removeRange(p);
1176         }
1177     }
1178
1179
1180     /**
1181      * do full garbage collection
1182      */
1183     void fullCollect()
1184     {
1185         debug(PRINTF) printf("GC.fullCollect()\n");
1186
1187         if (!thread_needLock())
1188         {
1189             gcx.fullcollectshell();
1190         }
1191         else synchronized (gcLock)
1192         {
1193             gcx.fullcollectshell();
1194         }
1195
1196         version (none)
1197         {
1198             GCStats stats;
1199
1200             getStats(stats);
1201             debug(PRINTF) printf("poolsize = %x, usedsize = %x, freelistsize = %x\n",
1202                     stats.poolsize, stats.usedsize, stats.freelistsize);
1203         }
1204
1205         gcx.log_collect();
1206     }
1207
1208
1209     /**
1210      * do full garbage collection ignoring roots
1211      */
1212     void fullCollectNoStack()
1213     {
1214         if (!thread_needLock())
1215         {
1216             gcx.noStack++;
1217             gcx.fullcollectshell();
1218             gcx.noStack--;
1219         }
1220         else synchronized (gcLock)
1221         {
1222             gcx.noStack++;
1223             gcx.fullcollectshell();
1224             gcx.noStack--;
1225         }
1226     }
1227
1228
1229     /**
1230      * minimize free space usage
1231      */
1232     void minimize()
1233     {
1234         if (!thread_needLock())
1235         {
1236             gcx.minimize();
1237         }
1238         else synchronized (gcLock)
1239         {
1240             gcx.minimize();
1241         }
1242     }
1243
1244
1245     /**
1246      * Retrieve statistics about garbage collection.
1247      * Useful for debugging and tuning.
1248      */
1249     void getStats(out GCStats stats)
1250     {
1251         if (!thread_needLock())
1252         {
1253             getStatsNoSync(stats);
1254         }
1255         else synchronized (gcLock)
1256         {
1257             getStatsNoSync(stats);
1258         }
1259     }
1260
1261
1262     //
1263     //
1264     //
1265     private void getStatsNoSync(out GCStats stats)
1266     {
1267         size_t psize = 0;
1268         size_t usize = 0;
1269         size_t flsize = 0;
1270
1271         size_t n;
1272         size_t bsize = 0;
1273
1274         //debug(PRINTF) printf("getStats()\n");
1275         .memset(&stats, 0, GCStats.sizeof);
1276
1277         for (n = 0; n < gcx.npools; n++)
1278         {   Pool *pool = gcx.pooltable[n];
1279
1280             psize += pool.npages * PAGESIZE;
1281             for (size_t j = 0; j < pool.npages; j++)
1282             {
1283                 Bins bin = cast(Bins)pool.pagetable[j];
1284                 if (bin == B_FREE)
1285                     stats.freeblocks++;
1286                 else if (bin == B_PAGE)
1287                     stats.pageblocks++;
1288                 else if (bin < B_PAGE)
1289                     bsize += PAGESIZE;
1290             }
1291         }
1292
1293         for (n = 0; n < B_PAGE; n++)
1294         {
1295             //debug(PRINTF) printf("bin %d\n", n);
1296             for (List *list = gcx.bucket[n]; list; list = list.next)
1297             {
1298                 //debug(PRINTF) printf("\tlist %x\n", list);
1299                 flsize += binsize[n];
1300             }
1301         }
1302
1303         usize = bsize - flsize;
1304
1305         stats.poolsize = psize;
1306         stats.usedsize = bsize - flsize;
1307         stats.freelistsize = flsize;
1308     }
1309
1310     /******************* weak-reference support *********************/
1311
1312     // call locked if necessary
1313     private T locked(T)(in T delegate() code)
1314     {
1315         if (thread_needLock)
1316             synchronized(gcLock) return code();
1317         else
1318            return code();
1319     }
1320
1321     private struct WeakPointer
1322     {
1323         Object reference;
1324
1325         void ondestroy(Object r)
1326         {
1327             assert(r is reference);
1328             // lock for memory consistency (parallel readers)
1329             // also ensures that weakpointerDestroy can be called while another
1330             // thread is freeing the reference with "delete"
1331             locked!(void)({ reference = null; });
1332         }
1333     }
1334
1335     /**
1336      * Create a weak pointer to the given object.
1337      * Returns a pointer to an opaque struct allocated in C memory.
1338      */
1339     void* weakpointerCreate( Object r )
1340     {
1341         if (r)
1342         {
1343             // must be allocated in C memory
1344             // 1. to hide the reference from the GC
1345             // 2. the GC doesn't scan delegates added by rt_attachDisposeEvent
1346             //    for references
1347             auto wp = cast(WeakPointer*)(libc.malloc(WeakPointer.sizeof));
1348             if (!wp)
1349                 onOutOfMemoryError();
1350             wp.reference = r;
1351             rt_attachDisposeEvent(r, &wp.ondestroy);
1352             return wp;
1353         }
1354         return null;
1355     }
1356
1357     /**
1358      * Destroy a weak pointer returned by weakpointerCreate().
1359      * If null is passed, nothing happens.
1360      */
1361     void weakpointerDestroy( void* p )
1362     {
1363         if (p)
1364         {
1365             auto wp = cast(WeakPointer*)p;
1366             // must be extra careful about the GC or parallel threads
1367             // finalizing the reference at the same time
1368             locked!(void)({
1369                    if (wp.reference)
1370                        rt_detachDisposeEvent(wp.reference, &wp.ondestroy);
1371                   });
1372             .free(wp);
1373         }
1374     }
1375
1376     /**
1377      * Query a weak pointer and return either the object passed to
1378      * weakpointerCreate, or null if it was free'd in the meantime.
1379      * If null is passed, null is returned.
1380      */
1381     Object weakpointerGet( void* p )
1382     {
1383         if (p)
1384         {
1385             // NOTE: could avoid the lock by using Fawzi style GC counters but
1386             // that'd require core.sync.Atomic and lots of care about memory
1387             // consistency it's an optional optimization see
1388             // http://dsource.org/projects/tango/browser/trunk/user/tango/core/Lifetime.d?rev=5100#L158
1389             return locked!(Object)({
1390                   return (cast(WeakPointer*)p).reference;
1391                   });
1392             }
1393     }
1394 }
1395
1396
1397 /* ============================ Gcx =============================== */
1398
1399 enum
1400 {   PAGESIZE =    4096,
1401     POOLSIZE =   (4096*256),
1402 }
1403
1404
1405 enum
1406 {
1407     B_16,
1408     B_32,
1409     B_64,
1410     B_128,
1411     B_256,
1412     B_512,
1413     B_1024,
1414     B_2048,
1415     B_PAGE,             // start of large alloc
1416     B_PAGEPLUS,         // continuation of large alloc
1417     B_FREE,             // free page
1418     B_MAX
1419 }
1420
1421
1422 alias ubyte Bins;
1423
1424
1425 struct List
1426 {
1427     List *next;
1428 }
1429
1430
1431 struct Range
1432 {
1433     void *pbot;
1434     void *ptop;
1435 }
1436
1437
1438 const uint binsize[B_MAX] = [ 16,32,64,128,256,512,1024,2048,4096 ];
1439 const uint notbinsize[B_MAX] = [ ~(16u-1),~(32u-1),~(64u-1),~(128u-1),~(256u-1),
1440                                 ~(512u-1),~(1024u-1),~(2048u-1),~(4096u-1) ];
1441
1442 /* ============================ Gcx =============================== */
1443
1444
1445 struct Gcx
1446 {
1447
1448     void *p_cache;
1449     size_t size_cache;
1450
1451     size_t nroots;
1452     size_t rootdim;
1453     void **roots;
1454
1455     size_t nranges;
1456     size_t rangedim;
1457     Range *ranges;
1458
1459     uint noStack;       // !=0 means don't scan stack
1460     uint log;           // turn on logging
1461     uint anychanges;
1462     void *stackBottom;
1463     uint inited;
1464     int disabled;       // turn off collections if >0
1465
1466     byte *minAddr;      // min(baseAddr)
1467     byte *maxAddr;      // max(topAddr)
1468
1469     size_t npools;
1470     Pool **pooltable;
1471
1472     List *bucket[B_MAX];        // free list for each size
1473
1474
1475     void initialize()
1476     {   int dummy;
1477
1478         (cast(byte*)this)[0 .. Gcx.sizeof] = 0;
1479         stackBottom = cast(char*)&dummy;
1480         log_init();
1481         //printf("gcx = %p, self = %x\n", this, self);
1482         inited = 1;
1483     }
1484
1485
1486     void Dtor()
1487     {
1488         inited = 0;
1489
1490         for (size_t i = 0; i < npools; i++)
1491         {   Pool *pool = pooltable[i];
1492
1493             pool.Dtor();
1494             .free(pool);
1495         }
1496         if (pooltable)
1497             .free(pooltable);
1498
1499         if (roots)
1500             .free(roots);
1501
1502         if (ranges)
1503             .free(ranges);
1504     }
1505
1506
1507     void Invariant() { }
1508
1509
1510     invariant
1511     {
1512         if (inited)
1513         {
1514         //printf("Gcx.invariant(): this = %p\n", this);
1515             size_t i;
1516
1517             for (i = 0; i < npools; i++)
1518             {   Pool *pool = pooltable[i];
1519
1520                 pool.Invariant();
1521                 if (i == 0)
1522                 {
1523                     assert(minAddr == pool.baseAddr);
1524                 }
1525                 if (i + 1 < npools)
1526                 {
1527                     assert(pool.opCmp(pooltable[i + 1]) < 0);
1528                 }
1529                 else if (i + 1 == npools)
1530                 {
1531                     assert(maxAddr == pool.topAddr);
1532                 }
1533             }
1534
1535             if (roots)
1536             {
1537                 assert(rootdim != 0);
1538                 assert(nroots <= rootdim);
1539             }
1540
1541             if (ranges)
1542             {
1543                 assert(rangedim != 0);
1544                 assert(nranges <= rangedim);
1545
1546                 for (i = 0; i < nranges; i++)
1547                 {
1548                     assert(ranges[i].pbot);
1549                     assert(ranges[i].ptop);
1550                     assert(ranges[i].pbot <= ranges[i].ptop);
1551                 }
1552             }
1553
1554             for (i = 0; i < B_PAGE; i++)
1555             {
1556                 for (List *list = bucket[i]; list; list = list.next)
1557                 {
1558                 }
1559             }
1560         }
1561     }
1562
1563
1564     /**
1565      *
1566      */
1567     void addRoot(void *p)
1568     {
1569         if (nroots == rootdim)
1570         {
1571             size_t newdim = rootdim * 2 + 16;
1572             void** newroots;
1573
1574             newroots = cast(void**) .malloc(newdim * newroots[0].sizeof);
1575             if (!newroots)
1576                 onOutOfMemoryError();
1577             if (roots)
1578             {   .memcpy(newroots, roots, nroots * newroots[0].sizeof);
1579                 .free(roots);
1580             }
1581             roots = newroots;
1582             rootdim = newdim;
1583         }
1584         roots[nroots] = p;
1585         nroots++;
1586     }
1587
1588
1589     /**
1590      *
1591      */
1592     void removeRoot(void *p)
1593     {
1594         for (size_t i = nroots; i--;)
1595         {
1596             if (roots[i] == p)
1597             {
1598                 nroots--;
1599                 .memmove(roots + i, roots + i + 1, (nroots - i) * roots[0].sizeof);
1600                 return;
1601             }
1602         }
1603         assert(0);
1604     }
1605
1606
1607     /**
1608      *
1609      */
1610     void addRange(void *pbot, void *ptop)
1611     {
1612         debug (PRINTF) printf("%x.Gcx::addRange(%x, %x), nranges = %d\n", this,
1613                 pbot, ptop, nranges);
1614         if (nranges == rangedim)
1615         {
1616             size_t newdim = rangedim * 2 + 16;
1617             Range *newranges;
1618
1619             newranges = cast(Range*) .malloc(newdim * newranges[0].sizeof);
1620             if (!newranges)
1621                 onOutOfMemoryError();
1622             if (ranges)
1623             {   .memcpy(newranges, ranges, nranges * newranges[0].sizeof);
1624                 .free(ranges);
1625             }
1626             ranges = newranges;
1627             rangedim = newdim;
1628         }
1629         ranges[nranges].pbot = pbot;
1630         ranges[nranges].ptop = ptop;
1631         nranges++;
1632     }
1633
1634
1635     /**
1636      *
1637      */
1638     void removeRange(void *pbot)
1639     {
1640         debug (PRINTF) printf("%x.Gcx.removeRange(%x), nranges = %d\n", this,
1641                 pbot, nranges);
1642         for (size_t i = nranges; i--;)
1643         {
1644             if (ranges[i].pbot == pbot)
1645             {
1646                 nranges--;
1647                 .memmove(ranges + i, ranges + i + 1, (nranges - i) * ranges[0].sizeof);
1648                 return;
1649             }
1650         }
1651         debug(PRINTF) printf("Wrong thread\n");
1652
1653         // This is a fatal error, but ignore it.
1654         // The problem is that we can get a Close() call on a thread
1655         // other than the one the range was allocated on.
1656         //assert(zero);
1657     }
1658
1659
1660     /**
1661      * Find Pool that pointer is in.
1662      * Return null if not in a Pool.
1663      * Assume pooltable[] is sorted.
1664      */
1665     Pool *findPool(void *p)
1666     {
1667         if (p >= minAddr && p < maxAddr)
1668         {
1669             if (npools == 1)
1670             {
1671                 return pooltable[0];
1672             }
1673
1674             for (size_t i = 0; i < npools; i++)
1675             {   Pool *pool;
1676
1677                 pool = pooltable[i];
1678                 if (p < pool.topAddr)
1679                 {   if (pool.baseAddr <= p)
1680                         return pool;
1681                     break;
1682                 }
1683             }
1684         }
1685         return null;
1686     }
1687
1688
1689     /**
1690      * Find base address of block containing pointer p.
1691      * Returns null if not a gc'd pointer
1692      */
1693     void* findBase(void *p)
1694     {
1695         Pool *pool;
1696
1697         pool = findPool(p);
1698         if (pool)
1699         {
1700             size_t offset = cast(size_t)(p - pool.baseAddr);
1701             size_t pn = offset / PAGESIZE;
1702             Bins   bin = cast(Bins)pool.pagetable[pn];
1703
1704             // Adjust bit to be at start of allocated memory block
1705             if (bin <= B_PAGE)
1706             {
1707                 return pool.baseAddr + (offset & notbinsize[bin]);
1708             }
1709             else if (bin == B_PAGEPLUS)
1710             {
1711                 do
1712                 {   --pn, offset -= PAGESIZE;
1713                 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1714
1715                 return pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1716             }
1717             else
1718             {
1719                 // we are in a B_FREE page
1720                 return null;
1721             }
1722         }
1723         return null;
1724     }
1725
1726
1727     /**
1728      * Find size of pointer p.
1729      * Returns 0 if not a gc'd pointer
1730      */
1731     size_t findSize(void *p)
1732     {
1733         Pool*  pool;
1734         size_t size = 0;
1735
1736         pool = findPool(p);
1737         if (pool)
1738         {
1739             size_t pagenum;
1740             Bins   bin;
1741
1742             pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1743             bin = cast(Bins)pool.pagetable[pagenum];
1744             size = binsize[bin];
1745             if (bin == B_PAGE)
1746             {
1747                 ubyte* pt;
1748                 size_t i;
1749
1750                 pt = &pool.pagetable[0];
1751                 for (i = pagenum + 1; i < pool.npages; i++)
1752                 {
1753                     if (pt[i] != B_PAGEPLUS)
1754                         break;
1755                 }
1756                 size = (i - pagenum) * PAGESIZE;
1757             }
1758         }
1759         return size;
1760     }
1761
1762
1763     /**
1764      *
1765      */
1766     BlkInfo getInfo(void* p)
1767     {
1768         Pool*   pool;
1769         BlkInfo info;
1770
1771         pool = findPool(p);
1772         if (pool)
1773         {
1774             size_t offset = cast(size_t)(p - pool.baseAddr);
1775             size_t pn = offset / PAGESIZE;
1776             Bins   bin = cast(Bins)pool.pagetable[pn];
1777
1778             ////////////////////////////////////////////////////////////////////
1779             // findAddr
1780             ////////////////////////////////////////////////////////////////////
1781
1782             if (bin <= B_PAGE)
1783             {
1784                 info.base = pool.baseAddr + (offset & notbinsize[bin]);
1785             }
1786             else if (bin == B_PAGEPLUS)
1787             {
1788                 do
1789                 {   --pn, offset -= PAGESIZE;
1790                 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1791
1792                 info.base = pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1793
1794                 // fix bin for use by size calc below
1795                 bin = cast(Bins)pool.pagetable[pn];
1796             }
1797
1798             ////////////////////////////////////////////////////////////////////
1799             // findSize
1800             ////////////////////////////////////////////////////////////////////
1801
1802             info.size = binsize[bin];
1803             if (bin == B_PAGE)
1804             {
1805                 ubyte* pt;
1806                 size_t i;
1807
1808                 pt = &pool.pagetable[0];
1809                 for (i = pn + 1; i < pool.npages; i++)
1810                 {
1811                     if (pt[i] != B_PAGEPLUS)
1812                         break;
1813                 }
1814                 info.size = (i - pn) * PAGESIZE;
1815             }
1816
1817             ////////////////////////////////////////////////////////////////////
1818             // getBits
1819             ////////////////////////////////////////////////////////////////////
1820
1821             info.attr = getBits(pool, cast(size_t)(offset / 16));
1822         }
1823         return info;
1824     }
1825
1826
1827     /**
1828      * Compute bin for size.
1829      */
1830     static Bins findBin(size_t size)
1831     {   Bins bin;
1832
1833         if (size <= 256)
1834         {
1835             if (size <= 64)
1836             {
1837                 if (size <= 16)
1838                     bin = B_16;
1839                 else if (size <= 32)
1840                     bin = B_32;
1841                 else
1842                     bin = B_64;
1843             }
1844             else
1845             {
1846                 if (size <= 128)
1847                     bin = B_128;
1848                 else
1849                     bin = B_256;
1850             }
1851         }
1852         else
1853         {
1854             if (size <= 1024)
1855             {
1856                 if (size <= 512)
1857                     bin = B_512;
1858                 else
1859                     bin = B_1024;
1860             }
1861             else
1862             {
1863                 if (size <= 2048)
1864                     bin = B_2048;
1865                 else
1866                     bin = B_PAGE;
1867             }
1868         }
1869         return bin;
1870     }
1871
1872
1873     /**
1874      * Allocate a new pool of at least size bytes.
1875      * Sort it into pooltable[].
1876      * Mark all memory in the pool as B_FREE.
1877      * Return the actual number of bytes reserved or 0 on error.
1878      */
1879     size_t reserve(size_t size)
1880     {
1881         size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
1882         Pool*  pool = newPool(npages);
1883
1884         if (!pool)
1885             return 0;
1886         return pool.npages * PAGESIZE;
1887     }
1888
1889
1890     /**
1891      * Minimizes physical memory usage by returning free pools to the OS.
1892      */
1893     void minimize()
1894     {
1895         size_t n;
1896         size_t pn;
1897         Pool*  pool;
1898
1899         for (n = 0; n < npools; n++)
1900         {
1901             pool = pooltable[n];
1902             for (pn = 0; pn < pool.npages; pn++)
1903             {
1904                 if (cast(Bins)pool.pagetable[pn] != B_FREE)
1905                     break;
1906             }
1907             if (pn < pool.npages)
1908             {
1909                 n++;
1910                 continue;
1911             }
1912             pool.Dtor();
1913             .free(pool);
1914             .memmove(pooltable + n,
1915                             pooltable + n + 1,
1916                             (--npools - n) * (Pool*).sizeof);
1917             minAddr = pooltable[0].baseAddr;
1918             maxAddr = pooltable[npools - 1].topAddr;
1919         }
1920     }
1921
1922
1923     /**
1924      * Allocate a chunk of memory that is larger than a page.
1925      * Return null if out of memory.
1926      */
1927     void *bigAlloc(size_t size)
1928     {
1929         Pool*  pool;
1930         size_t npages;
1931         size_t n;
1932         size_t pn;
1933         size_t freedpages;
1934         void*  p;
1935         int    state;
1936
1937         npages = (size + PAGESIZE - 1) / PAGESIZE;
1938
1939         for (state = 0; ; )
1940         {
1941             // This code could use some refinement when repeatedly
1942             // allocating very large arrays.
1943
1944             for (n = 0; n < npools; n++)
1945             {
1946                 pool = pooltable[n];
1947                 pn = pool.allocPages(npages);
1948                 if (pn != OPFAIL)
1949                     goto L1;
1950             }
1951
1952             // Failed
1953             switch (state)
1954             {
1955             case 0:
1956                 if (disabled)
1957                 {   state = 1;
1958                     continue;
1959                 }
1960                 // Try collecting
1961                 freedpages = fullcollectshell();
1962                 if (freedpages >= npools * ((POOLSIZE / PAGESIZE) / 4))
1963                 {   state = 1;
1964                     continue;
1965                 }
1966                 // Release empty pools to prevent bloat
1967                 minimize();
1968                 // Allocate new pool
1969                 pool = newPool(npages);
1970                 if (!pool)
1971                 {   state = 2;
1972                     continue;
1973                 }
1974                 pn = pool.allocPages(npages);
1975                 assert(pn != OPFAIL);
1976                 goto L1;
1977             case 1:
1978                 // Release empty pools to prevent bloat
1979                 minimize();
1980                 // Allocate new pool
1981                 pool = newPool(npages);
1982                 if (!pool)
1983                     goto Lnomemory;
1984                 pn = pool.allocPages(npages);
1985                 assert(pn != OPFAIL);
1986                 goto L1;
1987             case 2:
1988                 goto Lnomemory;
1989             default:
1990                 assert(false);
1991             }
1992         }
1993
1994       L1:
1995         pool.pagetable[pn] = B_PAGE;
1996         if (npages > 1)
1997             .memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
1998         p = pool.baseAddr + pn * PAGESIZE;
1999         .memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
2000         debug (MEMSTOMP) .memset(p, 0xF1, size);
2001         //debug(PRINTF) printf("\tp = %x\n", p);
2002         return p;
2003
2004       Lnomemory:
2005         return null; // let mallocNoSync handle the error
2006     }
2007
2008
2009     /**
2010      * Allocate a new pool with at least npages in it.
2011      * Sort it into pooltable[].
2012      * Return null if failed.
2013      */
2014     Pool *newPool(size_t npages)
2015     {
2016         Pool*  pool;
2017         Pool** newpooltable;
2018         size_t newnpools;
2019         size_t i;
2020
2021         //debug(PRINTF) printf("************Gcx::newPool(npages = %d)****************\n", npages);
2022
2023         // Minimum of POOLSIZE
2024         if (npages < POOLSIZE/PAGESIZE)
2025             npages = POOLSIZE/PAGESIZE;
2026         else if (npages > POOLSIZE/PAGESIZE)
2027         {   // Give us 150% of requested size, so there's room to extend
2028             auto n = npages + (npages >> 1);
2029             if (n < size_t.max/PAGESIZE)
2030                 npages = n;
2031         }
2032
2033         // Allocate successively larger pools up to 8 megs
2034         if (npools)
2035         {   size_t n;
2036
2037             n = npools;
2038             if (n > 8)
2039                 n = 8;                  // cap pool size at 8 megs
2040             n *= (POOLSIZE / PAGESIZE);
2041             if (npages < n)
2042                 npages = n;
2043         }
2044
2045         pool = cast(Pool *) .calloc(1, Pool.sizeof);
2046         if (pool)
2047         {
2048             pool.initialize(npages);
2049             if (!pool.baseAddr)
2050                 goto Lerr;
2051
2052             newnpools = npools + 1;
2053             newpooltable = cast(Pool **) .realloc(pooltable, newnpools * (Pool *).sizeof);
2054             if (!newpooltable)
2055                 goto Lerr;
2056
2057             // Sort pool into newpooltable[]
2058             for (i = 0; i < npools; i++)
2059             {
2060                 if (pool.opCmp(newpooltable[i]) < 0)
2061                      break;
2062             }
2063             .memmove(newpooltable + i + 1, newpooltable + i, (npools - i) * (Pool *).sizeof);
2064             newpooltable[i] = pool;
2065
2066             pooltable = newpooltable;
2067             npools = newnpools;
2068
2069             minAddr = pooltable[0].baseAddr;
2070             maxAddr = pooltable[npools - 1].topAddr;
2071         }
2072         return pool;
2073
2074       Lerr:
2075         pool.Dtor();
2076         .free(pool);
2077         return null;
2078     }
2079
2080
2081     /**
2082      * Allocate a page of bin's.
2083      * Returns:
2084      *  0       failed
2085      */
2086     int allocPage(Bins bin)
2087     {
2088         Pool*  pool;
2089         size_t n;
2090         size_t pn;
2091         byte*  p;
2092         byte*  ptop;
2093
2094         //debug(PRINTF) printf("Gcx::allocPage(bin = %d)\n", bin);
2095         for (n = 0; n < npools; n++)
2096         {
2097             pool = pooltable[n];
2098             pn = pool.allocPages(1);
2099             if (pn != OPFAIL)
2100                 goto L1;
2101         }
2102         return 0;               // failed
2103
2104       L1:
2105         pool.pagetable[pn] = cast(ubyte)bin;
2106
2107         // Convert page to free list
2108         size_t size = binsize[bin];
2109         List **b = &bucket[bin];
2110
2111         p = pool.baseAddr + pn * PAGESIZE;
2112         ptop = p + PAGESIZE;
2113         for (; p < ptop; p += size)
2114         {
2115             (cast(List *)p).next = *b;
2116             *b = cast(List *)p;
2117         }
2118         return 1;
2119     }
2120
2121
2122     /**
2123      * Search a range of memory values and mark any pointers into the GC pool.
2124      */
2125     void mark(void *pbot, void *ptop)
2126     {
2127         void **p1 = cast(void **)pbot;
2128         void **p2 = cast(void **)ptop;
2129         size_t pcache = 0;
2130         uint changes = 0;
2131
2132         //printf("marking range: %p -> %p\n", pbot, ptop);
2133         for (; p1 < p2; p1++)
2134         {
2135             Pool *pool;
2136             byte *p = cast(byte *)(*p1);
2137
2138             //if (log) debug(PRINTF) printf("\tmark %x\n", p);
2139             if (p >= minAddr && p < maxAddr)
2140             {
2141                 if ((cast(size_t)p & ~(PAGESIZE-1)) == pcache)
2142                     continue;
2143
2144                 pool = findPool(p);
2145                 if (pool)
2146                 {
2147                     size_t offset = cast(size_t)(p - pool.baseAddr);
2148                     size_t biti;
2149                     size_t pn = offset / PAGESIZE;
2150                     Bins   bin = cast(Bins)pool.pagetable[pn];
2151
2152                     //debug(PRINTF) printf("\t\tfound pool %x, base=%x, pn = %d, bin = %d, biti = x%x\n", pool, pool.baseAddr, pn, bin, biti);
2153
2154                     // Adjust bit to be at start of allocated memory block
2155                     if (bin <= B_PAGE)
2156                     {
2157                         biti = (offset & notbinsize[bin]) >> 4;
2158                         //debug(PRINTF) printf("\t\tbiti = x%x\n", biti);
2159                     }
2160                     else if (bin == B_PAGEPLUS)
2161                     {
2162                         do
2163                         {   --pn;
2164                         } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
2165                         biti = pn * (PAGESIZE / 16);
2166                     }
2167                     else
2168                     {
2169                         // Don't mark bits in B_FREE pages
2170                         continue;
2171                     }
2172
2173                     if (bin >= B_PAGE) // Cache B_PAGE and B_PAGEPLUS lookups
2174                         pcache = cast(size_t)p & ~(PAGESIZE-1);
2175
2176                     //debug(PRINTF) printf("\t\tmark(x%x) = %d\n", biti, pool.mark.test(biti));
2177                     if (!pool.mark.test(biti))
2178                     {
2179                         //if (log) debug(PRINTF) printf("\t\tmarking %x\n", p);
2180                         pool.mark.set(biti);
2181                         if (!pool.noscan.test(biti))
2182                         {
2183                             pool.scan.set(biti);
2184                             changes = 1;
2185                         }
2186                         log_parent(sentinel_add(pool.baseAddr + biti * 16), sentinel_add(pbot));
2187                     }
2188                 }
2189             }
2190         }
2191         anychanges |= changes;
2192     }
2193
2194
2195     /**
2196      * Return number of full pages free'd.
2197      */
2198     size_t fullcollectshell()
2199     {
2200         // The purpose of the 'shell' is to ensure all the registers
2201         // get put on the stack so they'll be scanned
2202         void *sp;
2203         size_t result;
2204         version (GNU)
2205         {
2206             __builtin_unwind_init();
2207             sp = & sp;
2208         }
2209         else version(LDC)
2210         {
2211             version(X86)
2212             {
2213                 uint eax,ecx,edx,ebx,ebp,esi,edi;
2214                 asm
2215                 {
2216                     mov eax[EBP], EAX      ;
2217                     mov ecx[EBP], ECX      ;
2218                     mov edx[EBP], EDX      ;
2219                     mov ebx[EBP], EBX      ;
2220                     mov ebp[EBP], EBP      ;
2221                     mov esi[EBP], ESI      ;
2222                     mov edi[EBP], EDI      ;
2223                     mov  sp[EBP], ESP      ;
2224                 }
2225             }
2226             else version (X86_64)
2227             {
2228                 ulong rax,rbx,rcx,rdx,rbp,rsi,rdi,r8,r9,r10,r11,r12,r13,r14,r15;
2229                 asm
2230                 {
2231                     movq rax[RBP], RAX      ;
2232                     movq rbx[RBP], RBX      ;
2233                     movq rcx[RBP], RCX      ;
2234                     movq rdx[RBP], RDX      ;
2235                     movq rbp[RBP], RBP      ;
2236                     movq rsi[RBP], RSI      ;
2237                     movq rdi[RBP], RDI      ;
2238                     movq r8 [RBP], R8       ;
2239                     movq r9 [RBP], R9       ;
2240                     movq r10[RBP], R10      ;
2241                     movq r11[RBP], R11      ;
2242                     movq r12[RBP], R12      ;
2243                     movq r13[RBP], R13      ;
2244                     movq r14[RBP], R14      ;
2245                     movq r15[RBP], R15      ;
2246                     movq  sp[RBP], RSP      ;
2247                 }
2248             }
2249             else
2250             {
2251                 static assert( false, "Architecture not supported." );
2252             }
2253         }
2254         else
2255         {
2256         asm
2257         {
2258             pushad              ;
2259             mov sp[EBP],ESP     ;
2260         }
2261         }
2262         result = fullcollect(sp);
2263         version (GNU)
2264         {
2265             // nothing to do
2266         }
2267         else version(LDC)
2268         {
2269             // nothing to do
2270         }
2271         else
2272         {
2273         asm
2274         {
2275             popad               ;
2276         }
2277         }
2278         return result;
2279     }
2280
2281
2282     /**
2283      *
2284      */
2285     size_t fullcollect(void *stackTop)
2286     {
2287         size_t n;
2288         Pool*  pool;
2289
2290         debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n");
2291
2292         thread_suspendAll();
2293
2294         p_cache = null;
2295         size_cache = 0;
2296
2297         anychanges = 0;
2298         for (n = 0; n < npools; n++)
2299         {
2300             pool = pooltable[n];
2301             pool.mark.zero();
2302             pool.scan.zero();
2303             pool.freebits.zero();
2304         }
2305
2306         // Mark each free entry, so it doesn't get scanned
2307         for (n = 0; n < B_PAGE; n++)
2308         {
2309             for (List *list = bucket[n]; list; list = list.next)
2310             {
2311                 pool = findPool(list);
2312                 assert(pool);
2313                 pool.freebits.set(cast(size_t)(cast(byte*)list - pool.baseAddr) / 16);
2314             }
2315         }
2316
2317         for (n = 0; n < npools; n++)
2318         {
2319             pool = pooltable[n];
2320             pool.mark.copy(&pool.freebits);
2321         }
2322
2323         rt_scanStaticData( &mark );
2324
2325         if (!noStack)
2326         {
2327             // Scan stacks and registers for each paused thread
2328             thread_scanAll( &mark, stackTop );
2329         }
2330
2331         // Scan roots[]
2332         debug(COLLECT_PRINTF) printf("scan roots[]\n");
2333         mark(roots, roots + nroots);
2334
2335         // Scan ranges[]
2336         debug(COLLECT_PRINTF) printf("scan ranges[]\n");
2337         //log++;
2338         for (n = 0; n < nranges; n++)
2339         {
2340             debug(COLLECT_PRINTF) printf("\t%x .. %x\n", ranges[n].pbot, ranges[n].ptop);
2341             mark(ranges[n].pbot, ranges[n].ptop);
2342         }
2343         //log--;
2344
2345         debug(COLLECT_PRINTF) printf("\tscan heap\n");
2346         while (anychanges)
2347         {
2348             anychanges = 0;
2349             for (n = 0; n < npools; n++)
2350             {
2351                 uint *bbase;
2352                 uint *b;
2353                 uint *btop;
2354
2355                 pool = pooltable[n];
2356
2357                 bbase = pool.scan.base();
2358                 btop = bbase + pool.scan.nwords;
2359                 for (b = bbase; b < btop;)
2360                 {   Bins   bin;
2361                     size_t pn;
2362                     size_t u;
2363                     size_t bitm;
2364                     byte*  o;
2365
2366                     bitm = *b;
2367                     if (!bitm)
2368                     {   b++;
2369                         continue;
2370                     }
2371                     *b = 0;
2372
2373                     o = pool.baseAddr + (b - bbase) * 32 * 16;
2374                     if (!(bitm & 0xFFFF))
2375                     {
2376                         bitm >>= 16;
2377                         o += 16 * 16;
2378                     }
2379                     for (; bitm; o += 16, bitm >>= 1)
2380                     {
2381                         if (!(bitm & 1))
2382                             continue;
2383
2384                         pn = cast(size_t)(o - pool.baseAddr) / PAGESIZE;
2385                         bin = cast(Bins)pool.pagetable[pn];
2386                         if (bin < B_PAGE)
2387                         {
2388                             mark(o, o + binsize[bin]);
2389                         }
2390                         else if (bin == B_PAGE || bin == B_PAGEPLUS)
2391                         {
2392                             if (bin == B_PAGEPLUS)
2393                             {
2394                                 while (pool.pagetable[pn - 1] != B_PAGE)
2395                                     pn--;
2396                             }
2397                             u = 1;
2398                             while (pn + u < pool.npages && pool.pagetable[pn + u] == B_PAGEPLUS)
2399                                 u++;
2400                             mark(o, o + u * PAGESIZE);
2401                         }
2402                     }
2403                 }
2404             }
2405         }
2406
2407         thread_resumeAll();
2408
2409         // Free up everything not marked
2410         debug(COLLECT_PRINTF) printf("\tfree'ing\n");
2411         size_t freedpages = 0;
2412         size_t freed = 0;
2413         for (n = 0; n < npools; n++)
2414         {   size_t pn;
2415             uint*  bbase;
2416
2417             pool = pooltable[n];
2418             bbase = pool.mark.base();
2419             for (pn = 0; pn < pool.npages; pn++, bbase += PAGESIZE / (32 * 16))
2420             {
2421                 Bins bin = cast(Bins)pool.pagetable[pn];
2422
2423                 if (bin < B_PAGE)
2424                 {   byte* p;
2425                     byte* ptop;
2426                     size_t biti;
2427                     size_t bitstride;
2428                     auto   size = binsize[bin];
2429
2430                     p = pool.baseAddr + pn * PAGESIZE;
2431                     ptop = p + PAGESIZE;
2432                     biti = pn * (PAGESIZE/16);
2433                     bitstride = size / 16;
2434
2435     version(none) // BUG: doesn't work because freebits() must also be cleared
2436     {
2437                     // If free'd entire page
2438                     if (bbase[0] == 0 && bbase[1] == 0 && bbase[2] == 0 && bbase[3] == 0 &&
2439                         bbase[4] == 0 && bbase[5] == 0 && bbase[6] == 0 && bbase[7] == 0)
2440                     {
2441                         for (; p < ptop; p += size, biti += bitstride)
2442                         {
2443                             if (pool.finals.nbits && pool.finals.testClear(biti))
2444                                 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2445                             gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
2446
2447                             List *list = cast(List *)p;
2448                             //debug(PRINTF) printf("\tcollecting %x\n", list);
2449                             log_free(sentinel_add(list));
2450
2451                             debug (MEMSTOMP) .memset(p, 0xF3, size);
2452                         }
2453                         pool.pagetable[pn] = B_FREE;
2454                         freed += PAGESIZE;
2455                         //debug(PRINTF) printf("freeing entire page %d\n", pn);
2456                         continue;
2457                     }
2458     }
2459                     for (; p < ptop; p += size, biti += bitstride)
2460                     {
2461                         if (!pool.mark.test(biti))
2462                         {
2463                             sentinel_Invariant(sentinel_add(p));
2464
2465                             pool.freebits.set(biti);
2466                             if (pool.finals.nbits && pool.finals.testClear(biti))
2467                                 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2468                             clrBits(pool, biti, BlkAttr.ALL_BITS);
2469
2470                             List *list = cast(List *)p;
2471                             debug(PRINTF) printf("\tcollecting %x\n", list);
2472                             log_free(sentinel_add(list));
2473
2474                             debug (MEMSTOMP) .memset(p, 0xF3, size);
2475
2476                             freed += size;
2477                         }
2478                     }
2479                 }
2480                 else if (bin == B_PAGE)
2481                 {   size_t biti = pn * (PAGESIZE / 16);
2482
2483                     if (!pool.mark.test(biti))
2484                     {   byte *p = pool.baseAddr + pn * PAGESIZE;
2485
2486                         sentinel_Invariant(sentinel_add(p));
2487                         if (pool.finals.nbits && pool.finals.testClear(biti))
2488                             rt_finalize(sentinel_add(p), false/*noStack > 0*/);
2489                         clrBits(pool, biti, BlkAttr.ALL_BITS);
2490
2491                         debug(COLLECT_PRINTF) printf("\tcollecting big %x\n", p);
2492                         log_free(sentinel_add(p));
2493                         pool.pagetable[pn] = B_FREE;
2494                         freedpages++;
2495                         debug (MEMSTOMP) .memset(p, 0xF3, PAGESIZE);
2496                         while (pn + 1 < pool.npages && pool.pagetable[pn + 1] == B_PAGEPLUS)
2497                         {
2498                             pn++;
2499                             pool.pagetable[pn] = B_FREE;
2500                             freedpages++;
2501
2502                             debug (MEMSTOMP)
2503                             {   p += PAGESIZE;
2504                                 .memset(p, 0xF3, PAGESIZE);
2505                             }
2506                         }
2507                     }
2508                 }
2509             }
2510         }
2511
2512         // Zero buckets
2513         bucket[] = null;
2514
2515         // Free complete pages, rebuild free list
2516         debug(COLLECT_PRINTF) printf("\tfree complete pages\n");
2517         size_t recoveredpages = 0;
2518         for (n = 0; n < npools; n++)
2519         {   size_t pn;
2520
2521             pool = pooltable[n];
2522             for (pn = 0; pn < pool.npages; pn++)
2523             {
2524                 Bins   bin = cast(Bins)pool.pagetable[pn];
2525                 size_t biti;
2526                 size_t u;
2527
2528                 if (bin < B_PAGE)
2529                 {
2530                     size_t size = binsize[bin];
2531                     size_t bitstride = size / 16;
2532                     size_t bitbase = pn * (PAGESIZE / 16);
2533                     size_t bittop = bitbase + (PAGESIZE / 16);
2534                     byte*  p;
2535
2536                     biti = bitbase;
2537                     for (biti = bitbase; biti < bittop; biti += bitstride)
2538                     {   if (!pool.freebits.test(biti))
2539                             goto Lnotfree;
2540                     }
2541                     pool.pagetable[pn] = B_FREE;
2542                     recoveredpages++;
2543                     continue;
2544
2545                  Lnotfree:
2546                     p = pool.baseAddr + pn * PAGESIZE;
2547                     for (u = 0; u < PAGESIZE; u += size)
2548                     {   biti = bitbase + u / 16;
2549                         if (pool.freebits.test(biti))
2550                         {   List *list;
2551
2552                             list = cast(List *)(p + u);
2553                             if (list.next != bucket[bin])       // avoid unnecessary writes
2554                                 list.next = bucket[bin];
2555                             bucket[bin] = list;
2556                         }
2557                     }
2558                 }
2559             }
2560         }
2561
2562         debug(COLLECT_PRINTF) printf("recovered pages = %d\n", recoveredpages);
2563         debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedpages, npools);
2564
2565         return freedpages + recoveredpages;
2566     }
2567
2568
2569     /**
2570      *
2571      */
2572     uint getBits(Pool* pool, size_t biti)
2573     in
2574     {
2575         assert( pool );
2576     }
2577     body
2578     {
2579         uint bits;
2580
2581         if (pool.finals.nbits &&
2582             pool.finals.test(biti))
2583             bits |= BlkAttr.FINALIZE;
2584         if (pool.noscan.test(biti))
2585             bits |= BlkAttr.NO_SCAN;
2586 //        if (pool.nomove.nbits &&
2587 //            pool.nomove.test(biti))
2588 //            bits |= BlkAttr.NO_MOVE;
2589         return bits;
2590     }
2591
2592
2593     /**
2594      *
2595      */
2596     void setBits(Pool* pool, size_t biti, uint mask)
2597     in
2598     {
2599         assert( pool );
2600     }
2601     body
2602     {
2603         if (mask & BlkAttr.FINALIZE)
2604         {
2605             if (!pool.finals.nbits)
2606                 pool.finals.alloc(pool.mark.nbits);
2607             pool.finals.set(biti);
2608         }
2609         if (mask & BlkAttr.NO_SCAN)
2610         {
2611             pool.noscan.set(biti);
2612         }
2613 //        if (mask & BlkAttr.NO_MOVE)
2614 //        {
2615 //            if (!pool.nomove.nbits)
2616 //                pool.nomove.alloc(pool.mark.nbits);
2617 //            pool.nomove.set(biti);
2618 //        }
2619     }
2620
2621
2622     /**
2623      *
2624      */
2625     void clrBits(Pool* pool, size_t biti, uint mask)
2626     in
2627     {
2628         assert( pool );
2629     }
2630     body
2631     {
2632         if (mask & BlkAttr.FINALIZE && pool.finals.nbits)
2633             pool.finals.clear(biti);
2634         if (mask & BlkAttr.NO_SCAN)
2635             pool.noscan.clear(biti);
2636 //        if (mask & BlkAttr.NO_MOVE && pool.nomove.nbits)
2637 //            pool.nomove.clear(biti);
2638     }
2639
2640
2641     /***** Leak Detector ******/
2642
2643
2644     debug (LOGGING)
2645     {
2646         LogArray current;
2647         LogArray prev;
2648
2649
2650         void log_init()
2651         {
2652             //debug(PRINTF) printf("+log_init()\n");
2653             current.reserve(1000);
2654             prev.reserve(1000);
2655             //debug(PRINTF) printf("-log_init()\n");
2656         }
2657
2658
2659         void log_malloc(void *p, size_t size)
2660         {
2661             //debug(PRINTF) printf("+log_malloc(p = %x, size = %d)\n", p, size);
2662             Log log;
2663
2664             log.p = p;
2665             log.size = size;
2666             log.line = GC.line;
2667             log.file = GC.file;
2668             log.parent = null;
2669
2670             GC.line = 0;
2671             GC.file = null;
2672
2673             current.push(log);
2674             //debug(PRINTF) printf("-log_malloc()\n");
2675         }
2676
2677
2678         void log_free(void *p)
2679         {
2680             //debug(PRINTF) printf("+log_free(%x)\n", p);
2681             size_t i;
2682
2683             i = current.find(p);
2684             if (i == OPFAIL)
2685             {
2686                 debug(PRINTF) printf("free'ing unallocated memory %x\n", p);
2687             }
2688             else
2689                 current.remove(i);
2690             //debug(PRINTF) printf("-log_free()\n");
2691         }
2692
2693
2694         void log_collect()
2695         {
2696             //debug(PRINTF) printf("+log_collect()\n");
2697             // Print everything in current that is not in prev
2698
2699             debug(PRINTF) printf("New pointers this cycle: --------------------------------\n");
2700             size_t used = 0;
2701             for (size_t i = 0; i < current.dim; i++)
2702             {
2703                 size_t j;
2704
2705                 j = prev.find(current.data[i].p);
2706                 if (j == OPFAIL)
2707                     current.data[i].print();
2708                 else
2709                     used++;
2710             }
2711
2712             debug(PRINTF) printf("All roots this cycle: --------------------------------\n");
2713             for (size_t i = 0; i < current.dim; i++)
2714             {
2715                 void *p;
2716                 size_t j;
2717
2718                 p = current.data[i].p;
2719                 if (!findPool(current.data[i].parent))
2720                 {
2721                     j = prev.find(current.data[i].p);
2722                     if (j == OPFAIL)
2723                         debug(PRINTF) printf("N");
2724                     else
2725                         debug(PRINTF) printf(" ");;
2726                     current.data[i].print();
2727                 }
2728             }
2729
2730             debug(PRINTF) printf("Used = %d-------------------------------------------------\n", used);
2731             prev.copy(&current);
2732
2733             debug(PRINTF) printf("-log_collect()\n");
2734         }
2735
2736
2737         void log_parent(void *p, void *parent)
2738         {
2739             //debug(PRINTF) printf("+log_parent()\n");
2740             size_t i;
2741
2742             i = current.find(p);
2743             if (i == OPFAIL)
2744             {
2745                 debug(PRINTF) printf("parent'ing unallocated memory %x, parent = %x\n", p, parent);
2746                 Pool *pool;
2747                 pool = findPool(p);
2748                 assert(pool);
2749                 size_t offset = cast(size_t)(p - pool.baseAddr);
2750                 size_t biti;
2751                 size_t pn = offset / PAGESIZE;
2752                 Bins bin = cast(Bins)pool.pagetable[pn];
2753                 biti = (offset & notbinsize[bin]);
2754                 debug(PRINTF) printf("\tbin = %d, offset = x%x, biti = x%x\n", bin, offset, biti);
2755             }
2756             else
2757             {
2758                 current.data[i].parent = parent;
2759             }
2760             //debug(PRINTF) printf("-log_parent()\n");
2761         }
2762
2763     }
2764     else
2765     {
2766         void log_init() { }
2767         void log_malloc(void *p, size_t size) { }
2768         void log_free(void *p) { }
2769         void log_collect() { }
2770         void log_parent(void *p, void *parent) { }
2771     }
2772 }
2773
2774
2775 /* ============================ Pool  =============================== */
2776
2777
2778 struct Pool
2779 {
2780     byte* baseAddr;
2781     byte* topAddr;
2782     GCBits mark;        // entries already scanned, or should not be scanned
2783     GCBits scan;        // entries that need to be scanned
2784     GCBits freebits;    // entries that are on the free list
2785     GCBits finals;      // entries that need finalizer run on them
2786     GCBits noscan;      // entries that should not be scanned
2787
2788     size_t npages;
2789     ubyte* pagetable;
2790
2791
2792     void initialize(size_t npages)
2793     {
2794         size_t poolsize;
2795
2796         //debug(PRINTF) printf("Pool::Pool(%u)\n", npages);
2797         poolsize = npages * PAGESIZE;
2798         assert(poolsize >= POOLSIZE);
2799         baseAddr = cast(byte *)os_mem_map(poolsize);
2800
2801         // Some of the code depends on page alignment of memory pools
2802         assert((cast(size_t)baseAddr & (PAGESIZE - 1)) == 0);
2803
2804         if (!baseAddr)
2805         {
2806             //debug(PRINTF) printf("GC fail: poolsize = x%x, errno = %d\n", poolsize, errno);
2807             //debug(PRINTF) printf("message = '%s'\n", sys_errlist[errno]);
2808
2809             npages = 0;
2810             poolsize = 0;
2811         }
2812         //assert(baseAddr);
2813         topAddr = baseAddr + poolsize;
2814
2815         mark.alloc(cast(size_t)poolsize / 16);
2816         scan.alloc(cast(size_t)poolsize / 16);
2817         freebits.alloc(cast(size_t)poolsize / 16);
2818         noscan.alloc(cast(size_t)poolsize / 16);
2819
2820         pagetable = cast(ubyte*) .malloc(npages);
2821         if (!pagetable)
2822             onOutOfMemoryError();
2823         .memset(pagetable, B_FREE, npages);
2824
2825         this.npages = npages;
2826     }
2827
2828
2829     void Dtor()
2830     {
2831         if (baseAddr)
2832         {
2833             int result;
2834
2835             if (npages)
2836             {
2837                 result = os_mem_unmap(baseAddr, npages * PAGESIZE);
2838                 assert(result);
2839                 npages = 0;
2840             }
2841
2842             baseAddr = null;
2843             topAddr = null;
2844         }
2845         if (pagetable)
2846             .free(pagetable);
2847
2848         mark.Dtor();
2849         scan.Dtor();
2850         freebits.Dtor();
2851         finals.Dtor();
2852         noscan.Dtor();
2853     }
2854
2855
2856     void Invariant() { }
2857
2858
2859     invariant
2860     {
2861         //mark.Invariant();
2862         //scan.Invariant();
2863         //freebits.Invariant();
2864         //finals.Invariant();
2865         //noscan.Invariant();
2866
2867         if (baseAddr)
2868         {
2869             //if (baseAddr + npages * PAGESIZE != topAddr)
2870                 //printf("baseAddr = %p, npages = %d, topAddr = %p\n", baseAddr, npages, topAddr);
2871             assert(baseAddr + npages * PAGESIZE == topAddr);
2872         }
2873
2874         for (size_t i = 0; i < npages; i++)
2875         {   Bins bin = cast(Bins)pagetable[i];
2876
2877             assert(bin < B_MAX);
2878         }
2879     }
2880
2881
2882     /**
2883      * Allocate n pages from Pool.
2884      * Returns OPFAIL on failure.
2885      */
2886     size_t allocPages(size_t n)
2887     {
2888         size_t i;
2889         size_t n2;
2890
2891         //debug(PRINTF) printf("Pool::allocPages(n = %d)\n", n);
2892         n2 = n;
2893         for (i = 0; i < npages; i++)
2894         {
2895             if (pagetable[i] == B_FREE)
2896             {
2897                 if (--n2 == 0)
2898                 {   //debug(PRINTF) printf("\texisting pn = %d\n", i - n + 1);
2899                     return i - n + 1;
2900                 }
2901             }
2902             else
2903                 n2 = n;
2904         }
2905         return OPFAIL;
2906     }
2907
2908
2909     /**
2910      * Free npages pages starting with pagenum.
2911      */
2912     void freePages(size_t pagenum, size_t npages)
2913     {
2914         .memset(&pagetable[pagenum], B_FREE, npages);
2915     }
2916
2917
2918     /**
2919      * Used for sorting pooltable[]
2920      */
2921     int opCmp(Pool *p2)
2922     {
2923         if (baseAddr < p2.baseAddr)
2924             return -1;
2925         else
2926         return cast(int)(baseAddr > p2.baseAddr);
2927     }
2928 }
2929
2930
2931 /* ============================ SENTINEL =============================== */
2932
2933
2934 version (SENTINEL)
2935 {
2936     const size_t SENTINEL_PRE = cast(size_t) 0xF4F4F4F4F4F4F4F4UL; // 32 or 64 bits
2937     const ubyte SENTINEL_POST = 0xF5;           // 8 bits
2938     const uint SENTINEL_EXTRA = 2 * size_t.sizeof + 1;
2939
2940
2941     size_t* sentinel_size(void *p)  { return &(cast(size_t *)p)[-2]; }
2942     size_t* sentinel_pre(void *p)   { return &(cast(size_t *)p)[-1]; }
2943     ubyte* sentinel_post(void *p) { return &(cast(ubyte *)p)[*sentinel_size(p)]; }
2944
2945
2946     void sentinel_init(void *p, size_t size)
2947     {
2948         *sentinel_size(p) = size;
2949         *sentinel_pre(p) = SENTINEL_PRE;
2950         *sentinel_post(p) = SENTINEL_POST;
2951     }
2952
2953
2954     void sentinel_Invariant(void *p)
2955     {
2956         assert(*sentinel_pre(p) == SENTINEL_PRE);
2957         assert(*sentinel_post(p) == SENTINEL_POST);
2958     }
2959
2960
2961     void *sentinel_add(void *p)
2962     {
2963         return p + 2 * size_t.sizeof;
2964     }
2965
2966
2967     void *sentinel_sub(void *p)
2968     {
2969         return p - 2 * size_t.sizeof;
2970     }
2971 }
2972 else
2973 {
2974     const uint SENTINEL_EXTRA = 0;
2975
2976
2977     void sentinel_init(void *p, size_t size)
2978     {
2979     }
2980
2981
2982     void sentinel_Invariant(void *p)
2983     {
2984     }
2985
2986
2987     void *sentinel_add(void *p)
2988     {
2989         return p;
2990     }
2991
2992
2993     void *sentinel_sub(void *p)
2994     {
2995         return p;
2996     }
2997 }
2998