]> git.llucax.com Git - software/dgc/cdgc.git/blob - rt/gc/cdgc/gc.d
5787455d17303f40ebef70d3b8bdf900af486c0c
[software/dgc/cdgc.git] / rt / gc / cdgc / gc.d
1 /**
2  * This module contains the garbage collector implementation.
3  *
4  * Copyright: Copyright (C) 2001-2007 Digital Mars, www.digitalmars.com.
5  *            All rights reserved.
6  * License:
7  *  This software is provided 'as-is', without any express or implied
8  *  warranty. In no event will the authors be held liable for any damages
9  *  arising from the use of this software.
10  *
11  *  Permission is granted to anyone to use this software for any purpose,
12  *  including commercial applications, and to alter it and redistribute it
13  *  freely, in both source and binary form, subject to the following
14  *  restrictions:
15  *
16  *  o  The origin of this software must not be misrepresented; you must not
17  *     claim that you wrote the original software. If you use this software
18  *     in a product, an acknowledgment in the product documentation would be
19  *     appreciated but is not required.
20  *  o  Altered source versions must be plainly marked as such, and must not
21  *     be misrepresented as being the original software.
22  *  o  This notice may not be removed or altered from any source
23  *     distribution.
24  * Authors:   Walter Bright, David Friedman, Sean Kelly
25  */
26
27 module rt.gc.cdgc.gc;
28
29 // D Programming Language Garbage Collector implementation
30
31 /************** Debugging ***************************/
32
33 //debug = PRINTF;               // turn on printf's
34 //debug = COLLECT_PRINTF;       // turn on printf's
35 //debug = LOGGING;              // log allocations / frees
36 //debug = MEMSTOMP;             // stomp on memory
37 //debug = SENTINEL;             // add underrun/overrrun protection
38 //debug = PTRCHECK;             // more pointer checking
39 //debug = PTRCHECK2;            // thorough but slow pointer checking
40
41 /*************** Configuration *********************/
42
43 version = STACKGROWSDOWN;       // growing the stack means subtracting from the stack pointer
44                                 // (use for Intel X86 CPUs)
45                                 // else growing the stack means adding to the stack pointer
46
47 /***************************************************/
48
49 import rt.gc.cdgc.bits: GCBits;
50 import rt.gc.cdgc.stats: GCStats;
51 import alloc = rt.gc.cdgc.alloc;
52 import libc = rt.gc.cdgc.libc;
53
54
55 version (GNU)
56 {
57     // BUG: The following import will likely not work, since the gcc
58     //      subdirectory is elsewhere.  Instead, perhaps the functions
59     //      could be declared directly or some other resolution could
60     //      be found.
61     static import gcc.builtins; // for __builtin_unwind_int
62 }
63
64
65 struct BlkInfo
66 {
67     void*  base;
68     size_t size;
69     uint   attr;
70 }
71
72 private
73 {
74     enum BlkAttr : uint
75     {
76         FINALIZE = 0b0000_0001,
77         NO_SCAN  = 0b0000_0010,
78         NO_MOVE  = 0b0000_0100,
79         ALL_BITS = 0b1111_1111
80     }
81
82     extern (C) void* rt_stackBottom();
83     extern (C) void* rt_stackTop();
84
85     extern (C) void rt_finalize( void* p, bool det = true );
86
87     alias void delegate(Object) DEvent;
88     extern (C) void rt_attachDisposeEvent(Object h, DEvent e);
89     extern (C) bool rt_detachDisposeEvent(Object h, DEvent e);
90
91
92     alias void delegate( void*, void* ) scanFn;
93
94     extern (C) void rt_scanStaticData( scanFn scan );
95
96     extern (C) bool thread_needLock();
97     extern (C) void thread_suspendAll();
98     extern (C) void thread_resumeAll();
99
100     extern (C) void thread_scanAll( scanFn fn, void* curStackTop = null );
101
102     extern (C) void onOutOfMemoryError();
103
104     enum
105     {
106         OPFAIL = ~cast(size_t)0
107     }
108 }
109
110
111 alias GC gc_t;
112
113
114 /* ======================= Leak Detector =========================== */
115
116
117 debug (LOGGING)
118 {
119     struct Log
120     {
121         void*  p;
122         size_t size;
123         size_t line;
124         char*  file;
125         void*  parent;
126
127         void print()
128         {
129             printf("    p = %x, size = %d, parent = %x ", p, size, parent);
130             if (file)
131             {
132                 printf("%s(%u)", file, line);
133             }
134             printf("\n");
135         }
136     }
137
138
139     struct LogArray
140     {
141         size_t dim;
142         size_t allocdim;
143         Log *data;
144
145         void Dtor()
146         {
147             if (data)
148                 libc.free(data);
149             data = null;
150         }
151
152         void reserve(size_t nentries)
153         {
154             assert(dim <= allocdim);
155             if (allocdim - dim < nentries)
156             {
157                 allocdim = (dim + nentries) * 2;
158                 assert(dim + nentries <= allocdim);
159                 if (!data)
160                 {
161                     data = cast(Log*) libc.malloc(allocdim * Log.sizeof);
162                     if (!data && allocdim)
163                         onOutOfMemoryError();
164                 }
165                 else
166                 {
167                     Log *newdata = cast(Log*) libc.malloc(
168                             allocdim * Log.sizeof);
169                     if (!newdata && allocdim)
170                         onOutOfMemoryError();
171                     libc.memcpy(newdata, data, dim * Log.sizeof);
172                     libc.free(data);
173                     data = newdata;
174                 }
175             }
176         }
177
178
179         void push(Log log)
180         {
181             reserve(1);
182             data[dim++] = log;
183         }
184
185         void remove(size_t i)
186         {
187             libc.memmove(data + i, data + i + 1, (dim - i) * Log.sizeof);
188             dim--;
189         }
190
191
192         size_t find(void *p)
193         {
194             for (size_t i = 0; i < dim; i++)
195             {
196                 if (data[i].p == p)
197                     return i;
198             }
199             return OPFAIL; // not found
200         }
201
202
203         void copy(LogArray *from)
204         {
205             reserve(from.dim - dim);
206             assert(from.dim <= allocdim);
207             libc.memcpy(data, from.data, from.dim * Log.sizeof);
208             dim = from.dim;
209         }
210     }
211 }
212
213
214 /* ============================ GC =============================== */
215
216
217 class GCLock { }                // just a dummy so we can get a global lock
218
219
220 const uint GCVERSION = 1;       // increment every time we change interface
221                                 // to GC.
222
223 class GC
224 {
225     // For passing to debug code
226     static size_t line;
227     static char*  file;
228
229     uint gcversion = GCVERSION;
230
231     Gcx *gcx;                   // implementation
232     static ClassInfo gcLock;    // global lock
233
234
235     void initialize()
236     {
237         gcLock = GCLock.classinfo;
238         gcx = cast(Gcx*) libc.calloc(1, Gcx.sizeof);
239         if (!gcx)
240             onOutOfMemoryError();
241         gcx.initialize();
242         setStackBottom(rt_stackBottom());
243     }
244
245
246     void Dtor()
247     {
248         if (gcx)
249         {
250             gcx.Dtor();
251             libc.free(gcx);
252             gcx = null;
253         }
254     }
255
256
257     /**
258      *
259      */
260     void enable()
261     {
262         if (!thread_needLock())
263         {
264             assert(gcx.disabled > 0);
265             gcx.disabled--;
266         }
267         else synchronized (gcLock)
268         {
269             assert(gcx.disabled > 0);
270             gcx.disabled--;
271         }
272     }
273
274
275     /**
276      *
277      */
278     void disable()
279     {
280         if (!thread_needLock())
281         {
282             gcx.disabled++;
283         }
284         else synchronized (gcLock)
285         {
286             gcx.disabled++;
287         }
288     }
289
290
291     /**
292      *
293      */
294     uint getAttr(void* p)
295     {
296         if (!p)
297         {
298             return 0;
299         }
300
301         uint go()
302         {
303             Pool* pool = gcx.findPool(p);
304             uint  oldb = 0;
305
306             if (pool)
307             {
308                 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
309
310                 oldb = gcx.getBits(pool, biti);
311             }
312             return oldb;
313         }
314
315         if (!thread_needLock())
316         {
317             return go();
318         }
319         else synchronized (gcLock)
320         {
321             return go();
322         }
323     }
324
325
326     /**
327      *
328      */
329     uint setAttr(void* p, uint mask)
330     {
331         if (!p)
332         {
333             return 0;
334         }
335
336         uint go()
337         {
338             Pool* pool = gcx.findPool(p);
339             uint  oldb = 0;
340
341             if (pool)
342             {
343                 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
344
345                 oldb = gcx.getBits(pool, biti);
346                 gcx.setBits(pool, biti, mask);
347             }
348             return oldb;
349         }
350
351         if (!thread_needLock())
352         {
353             return go();
354         }
355         else synchronized (gcLock)
356         {
357             return go();
358         }
359     }
360
361
362     /**
363      *
364      */
365     uint clrAttr(void* p, uint mask)
366     {
367         if (!p)
368         {
369             return 0;
370         }
371
372         uint go()
373         {
374             Pool* pool = gcx.findPool(p);
375             uint  oldb = 0;
376
377             if (pool)
378             {
379                 auto biti = cast(size_t)(p - pool.baseAddr) / 16;
380
381                 oldb = gcx.getBits(pool, biti);
382                 gcx.clrBits(pool, biti, mask);
383             }
384             return oldb;
385         }
386
387         if (!thread_needLock())
388         {
389             return go();
390         }
391         else synchronized (gcLock)
392         {
393             return go();
394         }
395     }
396
397
398     /**
399      *
400      */
401     void *malloc(size_t size, uint bits = 0)
402     {
403         if (!size)
404         {
405             return null;
406         }
407
408         if (!thread_needLock())
409         {
410             return mallocNoSync(size, bits);
411         }
412         else synchronized (gcLock)
413         {
414             return mallocNoSync(size, bits);
415         }
416     }
417
418
419     //
420     //
421     //
422     private void *mallocNoSync(size_t size, uint bits = 0)
423     {
424         assert(size != 0);
425
426         void *p = null;
427         Bins bin;
428
429         //debug(PRINTF) printf("GC::malloc(size = %d, gcx = %p)\n", size, gcx);
430         assert(gcx);
431
432         size += SENTINEL_EXTRA;
433
434         // Compute size bin
435         // Cache previous binsize lookup - Dave Fladebo.
436         static size_t lastsize = -1;
437         static Bins lastbin;
438         if (size == lastsize)
439             bin = lastbin;
440         else
441         {
442             bin = gcx.findBin(size);
443             lastsize = size;
444             lastbin = bin;
445         }
446
447         if (bin < B_PAGE)
448         {
449             p = gcx.bucket[bin];
450             if (p is null)
451             {
452                 if (!gcx.allocPage(bin) && !gcx.disabled)   // try to find a new page
453                 {
454                     if (!thread_needLock())
455                     {
456                         /* Then we haven't locked it yet. Be sure
457                          * and lock for a collection, since a finalizer
458                          * may start a new thread.
459                          */
460                         synchronized (gcLock)
461                         {
462                             gcx.fullcollectshell();
463                         }
464                     }
465                     else if (!gcx.fullcollectshell())       // collect to find a new page
466                     {
467                         //gcx.newPool(1);
468                     }
469                 }
470                 if (!gcx.bucket[bin] && !gcx.allocPage(bin))
471                 {
472                     gcx.newPool(1);         // allocate new pool to find a new page
473                     int result = gcx.allocPage(bin);
474                     if (!result)
475                         onOutOfMemoryError();
476                 }
477                 p = gcx.bucket[bin];
478             }
479
480             // Return next item from free list
481             gcx.bucket[bin] = (cast(List*)p).next;
482             if( !(bits & BlkAttr.NO_SCAN) )
483                 libc.memset(p + size, 0, binsize[bin] - size);
484             //debug(PRINTF) printf("\tmalloc => %x\n", p);
485             debug (MEMSTOMP) libc.memset(p, 0xF0, size);
486         }
487         else
488         {
489             p = gcx.bigAlloc(size);
490             if (!p)
491                 onOutOfMemoryError();
492         }
493         size -= SENTINEL_EXTRA;
494         p = sentinel_add(p);
495         sentinel_init(p, size);
496         gcx.log_malloc(p, size);
497
498         if (bits)
499         {
500             Pool *pool = gcx.findPool(p);
501             assert(pool);
502
503             gcx.setBits(pool, cast(size_t)(p - pool.baseAddr) / 16, bits);
504         }
505         return p;
506     }
507
508
509     /**
510      *
511      */
512     void *calloc(size_t size, uint bits = 0)
513     {
514         if (!size)
515         {
516             return null;
517         }
518
519         if (!thread_needLock())
520         {
521             return callocNoSync(size, bits);
522         }
523         else synchronized (gcLock)
524         {
525             return callocNoSync(size, bits);
526         }
527     }
528
529
530     //
531     //
532     //
533     private void *callocNoSync(size_t size, uint bits = 0)
534     {
535         assert(size != 0);
536
537         //debug(PRINTF) printf("calloc: %x len %d\n", p, len);
538         void *p = mallocNoSync(size, bits);
539         libc.memset(p, 0, size);
540         return p;
541     }
542
543
544     /**
545      *
546      */
547     void *realloc(void *p, size_t size, uint bits = 0)
548     {
549         if (!thread_needLock())
550         {
551             return reallocNoSync(p, size, bits);
552         }
553         else synchronized (gcLock)
554         {
555             return reallocNoSync(p, size, bits);
556         }
557     }
558
559
560     //
561     //
562     //
563     private void *reallocNoSync(void *p, size_t size, uint bits = 0)
564     {
565         if (!size)
566         {
567             if (p)
568             {
569                 freeNoSync(p);
570                 p = null;
571             }
572         }
573         else if (!p)
574         {
575             p = mallocNoSync(size, bits);
576         }
577         else
578         {
579             void *p2;
580             size_t psize;
581
582             //debug(PRINTF) printf("GC::realloc(p = %x, size = %u)\n", p, size);
583             version (SENTINEL)
584             {
585                 sentinel_Invariant(p);
586                 psize = *sentinel_size(p);
587                 if (psize != size)
588                 {
589                     if (psize)
590                     {
591                         Pool *pool = gcx.findPool(p);
592
593                         if (pool)
594                         {
595                             auto biti = cast(size_t)(p - pool.baseAddr) / 16;
596
597                             if (bits)
598                             {
599                                 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
600                                 gcx.setBits(pool, biti, bits);
601                             }
602                             else
603                             {
604                                 bits = gcx.getBits(pool, biti);
605                             }
606                         }
607                     }
608                     p2 = mallocNoSync(size, bits);
609                     if (psize < size)
610                         size = psize;
611                     //debug(PRINTF) printf("\tcopying %d bytes\n",size);
612                     libc.memcpy(p2, p, size);
613                     p = p2;
614                 }
615             }
616             else
617             {
618                 psize = gcx.findSize(p);        // find allocated size
619                 if (psize >= PAGESIZE && size >= PAGESIZE)
620                 {
621                     auto psz = psize / PAGESIZE;
622                     auto newsz = (size + PAGESIZE - 1) / PAGESIZE;
623                     if (newsz == psz)
624                         return p;
625
626                     auto pool = gcx.findPool(p);
627                     auto pagenum = (p - pool.baseAddr) / PAGESIZE;
628
629                     if (newsz < psz)
630                     {
631                         // Shrink in place
632                         synchronized (gcLock)
633                         {
634                             debug (MEMSTOMP)
635                                 libc.memset(p + size, 0xF2, psize - size);
636                             pool.freePages(pagenum + newsz, psz - newsz);
637                         }
638                         return p;
639                     }
640                     else if (pagenum + newsz <= pool.npages)
641                     {
642                         // Attempt to expand in place
643                         synchronized (gcLock)
644                         {
645                             for (size_t i = pagenum + psz; 1;)
646                             {
647                                 if (i == pagenum + newsz)
648                                 {
649                                     debug (MEMSTOMP)
650                                         libc.memset(p + psize, 0xF0,
651                                                 size - psize);
652                                     libc.memset(&pool.pagetable[pagenum + psz],
653                                             B_PAGEPLUS, newsz - psz);
654                                     return p;
655                                 }
656                                 if (i == pool.npages)
657                                 {
658                                     break;
659                                 }
660                                 if (pool.pagetable[i] != B_FREE)
661                                     break;
662                                 i++;
663                             }
664                         }
665                     }
666                 }
667                 if (psize < size ||             // if new size is bigger
668                     psize > size * 2)           // or less than half
669                 {
670                     if (psize)
671                     {
672                         Pool *pool = gcx.findPool(p);
673
674                         if (pool)
675                         {
676                             auto biti = cast(size_t)(p - pool.baseAddr) / 16;
677
678                             if (bits)
679                             {
680                                 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
681                                 gcx.setBits(pool, biti, bits);
682                             }
683                             else
684                             {
685                                 bits = gcx.getBits(pool, biti);
686                             }
687                         }
688                     }
689                     p2 = mallocNoSync(size, bits);
690                     if (psize < size)
691                         size = psize;
692                     //debug(PRINTF) printf("\tcopying %d bytes\n",size);
693                     libc.memcpy(p2, p, size);
694                     p = p2;
695                 }
696             }
697         }
698         return p;
699     }
700
701
702     /**
703      * Attempt to in-place enlarge the memory block pointed to by p by at least
704      * minbytes beyond its current capacity, up to a maximum of maxsize.  This
705      * does not attempt to move the memory block (like realloc() does).
706      *
707      * Returns:
708      *  0 if could not extend p,
709      *  total size of entire memory block if successful.
710      */
711     size_t extend(void* p, size_t minsize, size_t maxsize)
712     {
713         if (!thread_needLock())
714         {
715             return extendNoSync(p, minsize, maxsize);
716         }
717         else synchronized (gcLock)
718         {
719             return extendNoSync(p, minsize, maxsize);
720         }
721     }
722
723
724     //
725     //
726     //
727     private size_t extendNoSync(void* p, size_t minsize, size_t maxsize)
728     in
729     {
730         assert( minsize <= maxsize );
731     }
732     body
733     {
734         //debug(PRINTF) printf("GC::extend(p = %x, minsize = %u, maxsize = %u)\n", p, minsize, maxsize);
735         version (SENTINEL)
736         {
737             return 0;
738         }
739         auto psize = gcx.findSize(p);   // find allocated size
740         if (psize < PAGESIZE)
741             return 0;                   // cannot extend buckets
742
743         auto psz = psize / PAGESIZE;
744         auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE;
745         auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE;
746
747         auto pool = gcx.findPool(p);
748         auto pagenum = (p - pool.baseAddr) / PAGESIZE;
749
750         size_t sz;
751         for (sz = 0; sz < maxsz; sz++)
752         {
753             auto i = pagenum + psz + sz;
754             if (i == pool.npages)
755                 break;
756             if (pool.pagetable[i] != B_FREE)
757             {
758                 if (sz < minsz)
759                     return 0;
760                 break;
761             }
762         }
763         if (sz < minsz)
764             return 0;
765         debug (MEMSTOMP)
766             libc.memset(p + psize, 0xF0, (psz + sz) * PAGESIZE - psize);
767         libc.memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
768         gcx.p_cache = null;
769         gcx.size_cache = 0;
770         return (psz + sz) * PAGESIZE;
771     }
772
773
774     /**
775      *
776      */
777     size_t reserve(size_t size)
778     {
779         if (!size)
780         {
781             return 0;
782         }
783
784         if (!thread_needLock())
785         {
786             return reserveNoSync(size);
787         }
788         else synchronized (gcLock)
789         {
790             return reserveNoSync(size);
791         }
792     }
793
794
795     //
796     //
797     //
798     private size_t reserveNoSync(size_t size)
799     {
800         assert(size != 0);
801         assert(gcx);
802
803         return gcx.reserve(size);
804     }
805
806
807     /**
808      *
809      */
810     void free(void *p)
811     {
812         if (!p)
813         {
814             return;
815         }
816
817         if (!thread_needLock())
818         {
819             return freeNoSync(p);
820         }
821         else synchronized (gcLock)
822         {
823             return freeNoSync(p);
824         }
825     }
826
827
828     //
829     //
830     //
831     private void freeNoSync(void *p)
832     {
833         assert (p);
834
835         Pool*  pool;
836         size_t pagenum;
837         Bins   bin;
838         size_t biti;
839
840         // Find which page it is in
841         pool = gcx.findPool(p);
842         if (!pool)                              // if not one of ours
843             return;                             // ignore
844         sentinel_Invariant(p);
845         p = sentinel_sub(p);
846         pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
847         biti = cast(size_t)(p - pool.baseAddr) / 16;
848         gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
849
850         bin = cast(Bins)pool.pagetable[pagenum];
851         if (bin == B_PAGE)              // if large alloc
852         {
853             // Free pages
854             size_t npages = 1;
855             size_t n = pagenum;
856             while (++n < pool.npages && pool.pagetable[n] == B_PAGEPLUS)
857                 npages++;
858             debug (MEMSTOMP) libc.memset(p, 0xF2, npages * PAGESIZE);
859             pool.freePages(pagenum, npages);
860         }
861         else
862         {
863             // Add to free list
864             List *list = cast(List*)p;
865
866             debug (MEMSTOMP) libc.memset(p, 0xF2, binsize[bin]);
867
868             list.next = gcx.bucket[bin];
869             gcx.bucket[bin] = list;
870         }
871         gcx.log_free(sentinel_add(p));
872     }
873
874
875     /**
876      * Determine the base address of the block containing p.  If p is not a gc
877      * allocated pointer, return null.
878      */
879     void* addrOf(void *p)
880     {
881         if (!p)
882         {
883             return null;
884         }
885
886         if (!thread_needLock())
887         {
888             return addrOfNoSync(p);
889         }
890         else synchronized (gcLock)
891         {
892             return addrOfNoSync(p);
893         }
894     }
895
896
897     //
898     //
899     //
900     void* addrOfNoSync(void *p)
901     {
902         if (!p)
903         {
904             return null;
905         }
906
907         return gcx.findBase(p);
908     }
909
910
911     /**
912      * Determine the allocated size of pointer p.  If p is an interior pointer
913      * or not a gc allocated pointer, return 0.
914      */
915     size_t sizeOf(void *p)
916     {
917         if (!p)
918         {
919             return 0;
920         }
921
922         if (!thread_needLock())
923         {
924             return sizeOfNoSync(p);
925         }
926         else synchronized (gcLock)
927         {
928             return sizeOfNoSync(p);
929         }
930     }
931
932
933     //
934     //
935     //
936     private size_t sizeOfNoSync(void *p)
937     {
938         assert (p);
939
940         version (SENTINEL)
941         {
942             p = sentinel_sub(p);
943             size_t size = gcx.findSize(p);
944
945             // Check for interior pointer
946             // This depends on:
947             // 1) size is a power of 2 for less than PAGESIZE values
948             // 2) base of memory pool is aligned on PAGESIZE boundary
949             if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
950                 size = 0;
951             return size ? size - SENTINEL_EXTRA : 0;
952         }
953         else
954         {
955             if (p == gcx.p_cache)
956                 return gcx.size_cache;
957
958             size_t size = gcx.findSize(p);
959
960             // Check for interior pointer
961             // This depends on:
962             // 1) size is a power of 2 for less than PAGESIZE values
963             // 2) base of memory pool is aligned on PAGESIZE boundary
964             if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
965                 size = 0;
966             else
967             {
968                 gcx.p_cache = p;
969                 gcx.size_cache = size;
970             }
971
972             return size;
973         }
974     }
975
976
977     /**
978      * Determine the base address of the block containing p.  If p is not a gc
979      * allocated pointer, return null.
980      */
981     BlkInfo query(void *p)
982     {
983         if (!p)
984         {
985             BlkInfo i;
986             return  i;
987         }
988
989         if (!thread_needLock())
990         {
991             return queryNoSync(p);
992         }
993         else synchronized (gcLock)
994         {
995             return queryNoSync(p);
996         }
997     }
998
999
1000     //
1001     //
1002     //
1003     BlkInfo queryNoSync(void *p)
1004     {
1005         assert(p);
1006
1007         return gcx.getInfo(p);
1008     }
1009
1010
1011     /**
1012      * Verify that pointer p:
1013      *  1) belongs to this memory pool
1014      *  2) points to the start of an allocated piece of memory
1015      *  3) is not on a free list
1016      */
1017     void check(void *p)
1018     {
1019         if (!p)
1020         {
1021             return;
1022         }
1023
1024         if (!thread_needLock())
1025         {
1026             checkNoSync(p);
1027         }
1028         else synchronized (gcLock)
1029         {
1030             checkNoSync(p);
1031         }
1032     }
1033
1034
1035     //
1036     //
1037     //
1038     private void checkNoSync(void *p)
1039     {
1040         assert(p);
1041
1042         sentinel_Invariant(p);
1043         debug (PTRCHECK)
1044         {
1045             Pool*  pool;
1046             size_t pagenum;
1047             Bins   bin;
1048             size_t size;
1049
1050             p = sentinel_sub(p);
1051             pool = gcx.findPool(p);
1052             assert(pool);
1053             pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1054             bin = cast(Bins)pool.pagetable[pagenum];
1055             assert(bin <= B_PAGE);
1056             size = binsize[bin];
1057             assert((cast(size_t)p & (size - 1)) == 0);
1058
1059             debug (PTRCHECK2)
1060             {
1061                 if (bin < B_PAGE)
1062                 {
1063                     // Check that p is not on a free list
1064                     List *list;
1065
1066                     for (list = gcx.bucket[bin]; list; list = list.next)
1067                     {
1068                         assert(cast(void*)list != p);
1069                     }
1070                 }
1071             }
1072         }
1073     }
1074
1075
1076     //
1077     //
1078     //
1079     private void setStackBottom(void *p)
1080     {
1081         version (STACKGROWSDOWN)
1082         {
1083             //p = (void *)((uint *)p + 4);
1084             if (p > gcx.stackBottom)
1085             {
1086                 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1087                 gcx.stackBottom = p;
1088             }
1089         }
1090         else
1091         {
1092             //p = (void *)((uint *)p - 4);
1093             if (p < gcx.stackBottom)
1094             {
1095                 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1096                 gcx.stackBottom = cast(char*)p;
1097             }
1098         }
1099     }
1100
1101
1102     /**
1103      * add p to list of roots
1104      */
1105     void addRoot(void *p)
1106     {
1107         if (!p)
1108         {
1109             return;
1110         }
1111
1112         if (!thread_needLock())
1113         {
1114             gcx.addRoot(p);
1115         }
1116         else synchronized (gcLock)
1117         {
1118             gcx.addRoot(p);
1119         }
1120     }
1121
1122
1123     /**
1124      * remove p from list of roots
1125      */
1126     void removeRoot(void *p)
1127     {
1128         if (!p)
1129         {
1130             return;
1131         }
1132
1133         if (!thread_needLock())
1134         {
1135             gcx.removeRoot(p);
1136         }
1137         else synchronized (gcLock)
1138         {
1139             gcx.removeRoot(p);
1140         }
1141     }
1142
1143
1144     /**
1145      * add range to scan for roots
1146      */
1147     void addRange(void *p, size_t sz)
1148     {
1149         if (!p || !sz)
1150         {
1151             return;
1152         }
1153
1154         //debug(PRINTF) printf("+GC.addRange(pbot = x%x, ptop = x%x)\n", pbot, ptop);
1155         if (!thread_needLock())
1156         {
1157             gcx.addRange(p, p + sz);
1158         }
1159         else synchronized (gcLock)
1160         {
1161             gcx.addRange(p, p + sz);
1162         }
1163         //debug(PRINTF) printf("-GC.addRange()\n");
1164     }
1165
1166
1167     /**
1168      * remove range
1169      */
1170     void removeRange(void *p)
1171     {
1172         if (!p)
1173         {
1174             return;
1175         }
1176
1177         if (!thread_needLock())
1178         {
1179             gcx.removeRange(p);
1180         }
1181         else synchronized (gcLock)
1182         {
1183             gcx.removeRange(p);
1184         }
1185     }
1186
1187
1188     /**
1189      * do full garbage collection
1190      */
1191     void fullCollect()
1192     {
1193         debug(PRINTF) printf("GC.fullCollect()\n");
1194
1195         if (!thread_needLock())
1196         {
1197             gcx.fullcollectshell();
1198         }
1199         else synchronized (gcLock)
1200         {
1201             gcx.fullcollectshell();
1202         }
1203
1204         version (none)
1205         {
1206             GCStats stats;
1207
1208             getStats(stats);
1209             debug(PRINTF) printf("poolsize = %x, usedsize = %x, freelistsize = %x\n",
1210                     stats.poolsize, stats.usedsize, stats.freelistsize);
1211         }
1212
1213         gcx.log_collect();
1214     }
1215
1216
1217     /**
1218      * do full garbage collection ignoring roots
1219      */
1220     void fullCollectNoStack()
1221     {
1222         if (!thread_needLock())
1223         {
1224             gcx.noStack++;
1225             gcx.fullcollectshell();
1226             gcx.noStack--;
1227         }
1228         else synchronized (gcLock)
1229         {
1230             gcx.noStack++;
1231             gcx.fullcollectshell();
1232             gcx.noStack--;
1233         }
1234     }
1235
1236
1237     /**
1238      * minimize free space usage
1239      */
1240     void minimize()
1241     {
1242         if (!thread_needLock())
1243         {
1244             gcx.minimize();
1245         }
1246         else synchronized (gcLock)
1247         {
1248             gcx.minimize();
1249         }
1250     }
1251
1252
1253     /**
1254      * Retrieve statistics about garbage collection.
1255      * Useful for debugging and tuning.
1256      */
1257     void getStats(out GCStats stats)
1258     {
1259         if (!thread_needLock())
1260         {
1261             getStatsNoSync(stats);
1262         }
1263         else synchronized (gcLock)
1264         {
1265             getStatsNoSync(stats);
1266         }
1267     }
1268
1269
1270     //
1271     //
1272     //
1273     private void getStatsNoSync(out GCStats stats)
1274     {
1275         size_t psize = 0;
1276         size_t usize = 0;
1277         size_t flsize = 0;
1278
1279         size_t n;
1280         size_t bsize = 0;
1281
1282         //debug(PRINTF) printf("getStats()\n");
1283         libc.memset(&stats, 0, GCStats.sizeof);
1284
1285         for (n = 0; n < gcx.npools; n++)
1286         {
1287             Pool *pool = gcx.pooltable[n];
1288             psize += pool.npages * PAGESIZE;
1289             for (size_t j = 0; j < pool.npages; j++)
1290             {
1291                 Bins bin = cast(Bins)pool.pagetable[j];
1292                 if (bin == B_FREE)
1293                     stats.freeblocks++;
1294                 else if (bin == B_PAGE)
1295                     stats.pageblocks++;
1296                 else if (bin < B_PAGE)
1297                     bsize += PAGESIZE;
1298             }
1299         }
1300
1301         for (n = 0; n < B_PAGE; n++)
1302         {
1303             //debug(PRINTF) printf("bin %d\n", n);
1304             for (List *list = gcx.bucket[n]; list; list = list.next)
1305             {
1306                 //debug(PRINTF) printf("\tlist %x\n", list);
1307                 flsize += binsize[n];
1308             }
1309         }
1310
1311         usize = bsize - flsize;
1312
1313         stats.poolsize = psize;
1314         stats.usedsize = bsize - flsize;
1315         stats.freelistsize = flsize;
1316     }
1317
1318     /******************* weak-reference support *********************/
1319
1320     // call locked if necessary
1321     private T locked(T)(in T delegate() code)
1322     {
1323         if (thread_needLock)
1324             synchronized(gcLock) return code();
1325         else
1326            return code();
1327     }
1328
1329     private struct WeakPointer
1330     {
1331         Object reference;
1332
1333         void ondestroy(Object r)
1334         {
1335             assert(r is reference);
1336             // lock for memory consistency (parallel readers)
1337             // also ensures that weakpointerDestroy can be called while another
1338             // thread is freeing the reference with "delete"
1339             locked!(void)({ reference = null; });
1340         }
1341     }
1342
1343     /**
1344      * Create a weak pointer to the given object.
1345      * Returns a pointer to an opaque struct allocated in C memory.
1346      */
1347     void* weakpointerCreate( Object r )
1348     {
1349         if (r)
1350         {
1351             // must be allocated in C memory
1352             // 1. to hide the reference from the GC
1353             // 2. the GC doesn't scan delegates added by rt_attachDisposeEvent
1354             //    for references
1355             auto wp = cast(WeakPointer*)(libc.malloc(WeakPointer.sizeof));
1356             if (!wp)
1357                 onOutOfMemoryError();
1358             wp.reference = r;
1359             rt_attachDisposeEvent(r, &wp.ondestroy);
1360             return wp;
1361         }
1362         return null;
1363     }
1364
1365     /**
1366      * Destroy a weak pointer returned by weakpointerCreate().
1367      * If null is passed, nothing happens.
1368      */
1369     void weakpointerDestroy( void* p )
1370     {
1371         if (p)
1372         {
1373             auto wp = cast(WeakPointer*)p;
1374             // must be extra careful about the GC or parallel threads
1375             // finalizing the reference at the same time
1376             locked!(void)({
1377                    if (wp.reference)
1378                        rt_detachDisposeEvent(wp.reference, &wp.ondestroy);
1379                   });
1380             libc.free(wp);
1381         }
1382     }
1383
1384     /**
1385      * Query a weak pointer and return either the object passed to
1386      * weakpointerCreate, or null if it was free'd in the meantime.
1387      * If null is passed, null is returned.
1388      */
1389     Object weakpointerGet( void* p )
1390     {
1391         if (p)
1392         {
1393             // NOTE: could avoid the lock by using Fawzi style GC counters but
1394             // that'd require core.sync.Atomic and lots of care about memory
1395             // consistency it's an optional optimization see
1396             // http://dsource.org/projects/tango/browser/trunk/user/tango/core/Lifetime.d?rev=5100#L158
1397             return locked!(Object)({
1398                   return (cast(WeakPointer*)p).reference;
1399                   });
1400             }
1401     }
1402 }
1403
1404
1405 /* ============================ Gcx =============================== */
1406
1407 enum
1408 {
1409     PAGESIZE =    4096,
1410     POOLSIZE =   (4096*256),
1411 }
1412
1413
1414 enum
1415 {
1416     B_16,
1417     B_32,
1418     B_64,
1419     B_128,
1420     B_256,
1421     B_512,
1422     B_1024,
1423     B_2048,
1424     B_PAGE,             // start of large alloc
1425     B_PAGEPLUS,         // continuation of large alloc
1426     B_FREE,             // free page
1427     B_MAX
1428 }
1429
1430
1431 alias ubyte Bins;
1432
1433
1434 struct List
1435 {
1436     List *next;
1437 }
1438
1439
1440 struct Range
1441 {
1442     void *pbot;
1443     void *ptop;
1444 }
1445
1446
1447 const uint binsize[B_MAX] = [ 16,32,64,128,256,512,1024,2048,4096 ];
1448 const uint notbinsize[B_MAX] = [ ~(16u-1),~(32u-1),~(64u-1),~(128u-1),~(256u-1),
1449                                 ~(512u-1),~(1024u-1),~(2048u-1),~(4096u-1) ];
1450
1451 /* ============================ Gcx =============================== */
1452
1453
1454 struct Gcx
1455 {
1456
1457     void *p_cache;
1458     size_t size_cache;
1459
1460     size_t nroots;
1461     size_t rootdim;
1462     void **roots;
1463
1464     size_t nranges;
1465     size_t rangedim;
1466     Range *ranges;
1467
1468     uint noStack;       // !=0 means don't scan stack
1469     uint log;           // turn on logging
1470     uint anychanges;
1471     void *stackBottom;
1472     uint inited;
1473     int disabled;       // turn off collections if >0
1474
1475     byte *minAddr;      // min(baseAddr)
1476     byte *maxAddr;      // max(topAddr)
1477
1478     size_t npools;
1479     Pool **pooltable;
1480
1481     List *bucket[B_MAX];        // free list for each size
1482
1483
1484     void initialize()
1485     {
1486         int dummy;
1487         (cast(byte*)this)[0 .. Gcx.sizeof] = 0;
1488         stackBottom = cast(char*)&dummy;
1489         log_init();
1490         //printf("gcx = %p, self = %x\n", this, self);
1491         inited = 1;
1492     }
1493
1494
1495     void Dtor()
1496     {
1497         inited = 0;
1498
1499         for (size_t i = 0; i < npools; i++)
1500         {
1501             Pool *pool = pooltable[i];
1502             pool.Dtor();
1503             libc.free(pool);
1504         }
1505         if (pooltable)
1506             libc.free(pooltable);
1507
1508         if (roots)
1509             libc.free(roots);
1510
1511         if (ranges)
1512             libc.free(ranges);
1513     }
1514
1515
1516     void Invariant() { }
1517
1518
1519     invariant
1520     {
1521         if (inited)
1522         {
1523         //printf("Gcx.invariant(): this = %p\n", this);
1524             size_t i;
1525
1526             for (i = 0; i < npools; i++)
1527             {
1528                 Pool *pool = pooltable[i];
1529                 pool.Invariant();
1530                 if (i == 0)
1531                 {
1532                     assert(minAddr == pool.baseAddr);
1533                 }
1534                 if (i + 1 < npools)
1535                 {
1536                     assert(pool.opCmp(pooltable[i + 1]) < 0);
1537                 }
1538                 else if (i + 1 == npools)
1539                 {
1540                     assert(maxAddr == pool.topAddr);
1541                 }
1542             }
1543
1544             if (roots)
1545             {
1546                 assert(rootdim != 0);
1547                 assert(nroots <= rootdim);
1548             }
1549
1550             if (ranges)
1551             {
1552                 assert(rangedim != 0);
1553                 assert(nranges <= rangedim);
1554
1555                 for (i = 0; i < nranges; i++)
1556                 {
1557                     assert(ranges[i].pbot);
1558                     assert(ranges[i].ptop);
1559                     assert(ranges[i].pbot <= ranges[i].ptop);
1560                 }
1561             }
1562
1563             for (i = 0; i < B_PAGE; i++)
1564             {
1565                 for (List *list = bucket[i]; list; list = list.next)
1566                 {
1567                 }
1568             }
1569         }
1570     }
1571
1572
1573     /**
1574      *
1575      */
1576     void addRoot(void *p)
1577     {
1578         if (nroots == rootdim)
1579         {
1580             size_t newdim = rootdim * 2 + 16;
1581             void** newroots;
1582
1583             newroots = cast(void**) libc.malloc(newdim * newroots[0].sizeof);
1584             if (!newroots)
1585                 onOutOfMemoryError();
1586             if (roots)
1587             {
1588                 libc.memcpy(newroots, roots, nroots * newroots[0].sizeof);
1589                 libc.free(roots);
1590             }
1591             roots = newroots;
1592             rootdim = newdim;
1593         }
1594         roots[nroots] = p;
1595         nroots++;
1596     }
1597
1598
1599     /**
1600      *
1601      */
1602     void removeRoot(void *p)
1603     {
1604         for (size_t i = nroots; i--;)
1605         {
1606             if (roots[i] == p)
1607             {
1608                 nroots--;
1609                 libc.memmove(roots + i, roots + i + 1,
1610                         (nroots - i) * roots[0].sizeof);
1611                 return;
1612             }
1613         }
1614         assert(0);
1615     }
1616
1617
1618     /**
1619      *
1620      */
1621     void addRange(void *pbot, void *ptop)
1622     {
1623         debug (PRINTF) printf("%x.Gcx::addRange(%x, %x), nranges = %d\n", this,
1624                 pbot, ptop, nranges);
1625         if (nranges == rangedim)
1626         {
1627             size_t newdim = rangedim * 2 + 16;
1628             Range *newranges;
1629
1630             newranges = cast(Range*) libc.malloc(newdim * newranges[0].sizeof);
1631             if (!newranges)
1632                 onOutOfMemoryError();
1633             if (ranges)
1634             {
1635                 libc.memcpy(newranges, ranges, nranges * newranges[0].sizeof);
1636                 libc.free(ranges);
1637             }
1638             ranges = newranges;
1639             rangedim = newdim;
1640         }
1641         ranges[nranges].pbot = pbot;
1642         ranges[nranges].ptop = ptop;
1643         nranges++;
1644     }
1645
1646
1647     /**
1648      *
1649      */
1650     void removeRange(void *pbot)
1651     {
1652         debug (PRINTF) printf("%x.Gcx.removeRange(%x), nranges = %d\n", this,
1653                 pbot, nranges);
1654         for (size_t i = nranges; i--;)
1655         {
1656             if (ranges[i].pbot == pbot)
1657             {
1658                 nranges--;
1659                 libc.memmove(ranges + i, ranges + i + 1,
1660                         (nranges - i) * ranges[0].sizeof);
1661                 return;
1662             }
1663         }
1664         debug(PRINTF) printf("Wrong thread\n");
1665
1666         // This is a fatal error, but ignore it.
1667         // The problem is that we can get a Close() call on a thread
1668         // other than the one the range was allocated on.
1669         //assert(zero);
1670     }
1671
1672
1673     /**
1674      * Find Pool that pointer is in.
1675      * Return null if not in a Pool.
1676      * Assume pooltable[] is sorted.
1677      */
1678     Pool *findPool(void *p)
1679     {
1680         if (p >= minAddr && p < maxAddr)
1681         {
1682             if (npools == 1)
1683             {
1684                 return pooltable[0];
1685             }
1686
1687             for (size_t i = 0; i < npools; i++)
1688             {
1689                 Pool *pool;
1690
1691                 pool = pooltable[i];
1692                 if (p < pool.topAddr)
1693                 {
1694                     if (pool.baseAddr <= p)
1695                         return pool;
1696                     break;
1697                 }
1698             }
1699         }
1700         return null;
1701     }
1702
1703
1704     /**
1705      * Find base address of block containing pointer p.
1706      * Returns null if not a gc'd pointer
1707      */
1708     void* findBase(void *p)
1709     {
1710         Pool *pool;
1711
1712         pool = findPool(p);
1713         if (pool)
1714         {
1715             size_t offset = cast(size_t)(p - pool.baseAddr);
1716             size_t pn = offset / PAGESIZE;
1717             Bins   bin = cast(Bins)pool.pagetable[pn];
1718
1719             // Adjust bit to be at start of allocated memory block
1720             if (bin <= B_PAGE)
1721             {
1722                 return pool.baseAddr + (offset & notbinsize[bin]);
1723             }
1724             else if (bin == B_PAGEPLUS)
1725             {
1726                 do
1727                 {
1728                     --pn, offset -= PAGESIZE;
1729                 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1730
1731                 return pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1732             }
1733             else
1734             {
1735                 // we are in a B_FREE page
1736                 return null;
1737             }
1738         }
1739         return null;
1740     }
1741
1742
1743     /**
1744      * Find size of pointer p.
1745      * Returns 0 if not a gc'd pointer
1746      */
1747     size_t findSize(void *p)
1748     {
1749         Pool*  pool;
1750         size_t size = 0;
1751
1752         pool = findPool(p);
1753         if (pool)
1754         {
1755             size_t pagenum;
1756             Bins   bin;
1757
1758             pagenum = cast(size_t)(p - pool.baseAddr) / PAGESIZE;
1759             bin = cast(Bins)pool.pagetable[pagenum];
1760             size = binsize[bin];
1761             if (bin == B_PAGE)
1762             {
1763                 ubyte* pt;
1764                 size_t i;
1765
1766                 pt = &pool.pagetable[0];
1767                 for (i = pagenum + 1; i < pool.npages; i++)
1768                 {
1769                     if (pt[i] != B_PAGEPLUS)
1770                         break;
1771                 }
1772                 size = (i - pagenum) * PAGESIZE;
1773             }
1774         }
1775         return size;
1776     }
1777
1778
1779     /**
1780      *
1781      */
1782     BlkInfo getInfo(void* p)
1783     {
1784         Pool*   pool;
1785         BlkInfo info;
1786
1787         pool = findPool(p);
1788         if (pool)
1789         {
1790             size_t offset = cast(size_t)(p - pool.baseAddr);
1791             size_t pn = offset / PAGESIZE;
1792             Bins   bin = cast(Bins)pool.pagetable[pn];
1793
1794             ////////////////////////////////////////////////////////////////////
1795             // findAddr
1796             ////////////////////////////////////////////////////////////////////
1797
1798             if (bin <= B_PAGE)
1799             {
1800                 info.base = pool.baseAddr + (offset & notbinsize[bin]);
1801             }
1802             else if (bin == B_PAGEPLUS)
1803             {
1804                 do
1805                 {
1806                     --pn, offset -= PAGESIZE;
1807                 }
1808                 while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1809
1810                 info.base = pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1811
1812                 // fix bin for use by size calc below
1813                 bin = cast(Bins)pool.pagetable[pn];
1814             }
1815
1816             ////////////////////////////////////////////////////////////////////
1817             // findSize
1818             ////////////////////////////////////////////////////////////////////
1819
1820             info.size = binsize[bin];
1821             if (bin == B_PAGE)
1822             {
1823                 ubyte* pt;
1824                 size_t i;
1825
1826                 pt = &pool.pagetable[0];
1827                 for (i = pn + 1; i < pool.npages; i++)
1828                 {
1829                     if (pt[i] != B_PAGEPLUS)
1830                         break;
1831                 }
1832                 info.size = (i - pn) * PAGESIZE;
1833             }
1834
1835             ////////////////////////////////////////////////////////////////////
1836             // getBits
1837             ////////////////////////////////////////////////////////////////////
1838
1839             info.attr = getBits(pool, cast(size_t)(offset / 16));
1840         }
1841         return info;
1842     }
1843
1844
1845     /**
1846      * Compute bin for size.
1847      */
1848     static Bins findBin(size_t size)
1849     {
1850         Bins bin;
1851         if (size <= 256)
1852         {
1853             if (size <= 64)
1854             {
1855                 if (size <= 16)
1856                     bin = B_16;
1857                 else if (size <= 32)
1858                     bin = B_32;
1859                 else
1860                     bin = B_64;
1861             }
1862             else
1863             {
1864                 if (size <= 128)
1865                     bin = B_128;
1866                 else
1867                     bin = B_256;
1868             }
1869         }
1870         else
1871         {
1872             if (size <= 1024)
1873             {
1874                 if (size <= 512)
1875                     bin = B_512;
1876                 else
1877                     bin = B_1024;
1878             }
1879             else
1880             {
1881                 if (size <= 2048)
1882                     bin = B_2048;
1883                 else
1884                     bin = B_PAGE;
1885             }
1886         }
1887         return bin;
1888     }
1889
1890
1891     /**
1892      * Allocate a new pool of at least size bytes.
1893      * Sort it into pooltable[].
1894      * Mark all memory in the pool as B_FREE.
1895      * Return the actual number of bytes reserved or 0 on error.
1896      */
1897     size_t reserve(size_t size)
1898     {
1899         size_t npages = (size + PAGESIZE - 1) / PAGESIZE;
1900         Pool*  pool = newPool(npages);
1901
1902         if (!pool)
1903             return 0;
1904         return pool.npages * PAGESIZE;
1905     }
1906
1907
1908     /**
1909      * Minimizes physical memory usage by returning free pools to the OS.
1910      */
1911     void minimize()
1912     {
1913         size_t n;
1914         size_t pn;
1915         Pool*  pool;
1916
1917         for (n = 0; n < npools; n++)
1918         {
1919             pool = pooltable[n];
1920             for (pn = 0; pn < pool.npages; pn++)
1921             {
1922                 if (cast(Bins)pool.pagetable[pn] != B_FREE)
1923                     break;
1924             }
1925             if (pn < pool.npages)
1926             {
1927                 n++;
1928                 continue;
1929             }
1930             pool.Dtor();
1931             libc.free(pool);
1932             libc.memmove(pooltable + n,
1933                             pooltable + n + 1,
1934                             (--npools - n) * (Pool*).sizeof);
1935             minAddr = pooltable[0].baseAddr;
1936             maxAddr = pooltable[npools - 1].topAddr;
1937         }
1938     }
1939
1940
1941     /**
1942      * Allocate a chunk of memory that is larger than a page.
1943      * Return null if out of memory.
1944      */
1945     void *bigAlloc(size_t size)
1946     {
1947         Pool*  pool;
1948         size_t npages;
1949         size_t n;
1950         size_t pn;
1951         size_t freedpages;
1952         void*  p;
1953         int    state;
1954
1955         npages = (size + PAGESIZE - 1) / PAGESIZE;
1956
1957         for (state = 0; ; )
1958         {
1959             // This code could use some refinement when repeatedly
1960             // allocating very large arrays.
1961
1962             for (n = 0; n < npools; n++)
1963             {
1964                 pool = pooltable[n];
1965                 pn = pool.allocPages(npages);
1966                 if (pn != OPFAIL)
1967                     goto L1;
1968             }
1969
1970             // Failed
1971             switch (state)
1972             {
1973             case 0:
1974                 if (disabled)
1975                 {
1976                     state = 1;
1977                     continue;
1978                 }
1979                 // Try collecting
1980                 freedpages = fullcollectshell();
1981                 if (freedpages >= npools * ((POOLSIZE / PAGESIZE) / 4))
1982                 {
1983                     state = 1;
1984                     continue;
1985                 }
1986                 // Release empty pools to prevent bloat
1987                 minimize();
1988                 // Allocate new pool
1989                 pool = newPool(npages);
1990                 if (!pool)
1991                 {
1992                     state = 2;
1993                     continue;
1994                 }
1995                 pn = pool.allocPages(npages);
1996                 assert(pn != OPFAIL);
1997                 goto L1;
1998             case 1:
1999                 // Release empty pools to prevent bloat
2000                 minimize();
2001                 // Allocate new pool
2002                 pool = newPool(npages);
2003                 if (!pool)
2004                     goto Lnomemory;
2005                 pn = pool.allocPages(npages);
2006                 assert(pn != OPFAIL);
2007                 goto L1;
2008             case 2:
2009                 goto Lnomemory;
2010             default:
2011                 assert(false);
2012             }
2013         }
2014
2015       L1:
2016         pool.pagetable[pn] = B_PAGE;
2017         if (npages > 1)
2018             libc.memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
2019         p = pool.baseAddr + pn * PAGESIZE;
2020         libc.memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
2021         debug (MEMSTOMP) libc.memset(p, 0xF1, size);
2022         //debug(PRINTF) printf("\tp = %x\n", p);
2023         return p;
2024
2025       Lnomemory:
2026         return null; // let mallocNoSync handle the error
2027     }
2028
2029
2030     /**
2031      * Allocate a new pool with at least npages in it.
2032      * Sort it into pooltable[].
2033      * Return null if failed.
2034      */
2035     Pool *newPool(size_t npages)
2036     {
2037         Pool*  pool;
2038         Pool** newpooltable;
2039         size_t newnpools;
2040         size_t i;
2041
2042         //debug(PRINTF) printf("************Gcx::newPool(npages = %d)****************\n", npages);
2043
2044         // Minimum of POOLSIZE
2045         if (npages < POOLSIZE/PAGESIZE)
2046             npages = POOLSIZE/PAGESIZE;
2047         else if (npages > POOLSIZE/PAGESIZE)
2048         {
2049             // Give us 150% of requested size, so there's room to extend
2050             auto n = npages + (npages >> 1);
2051             if (n < size_t.max/PAGESIZE)
2052                 npages = n;
2053         }
2054
2055         // Allocate successively larger pools up to 8 megs
2056         if (npools)
2057         {
2058             size_t n = npools;
2059             if (n > 8)
2060                 n = 8;                  // cap pool size at 8 megs
2061             n *= (POOLSIZE / PAGESIZE);
2062             if (npages < n)
2063                 npages = n;
2064         }
2065
2066         pool = cast(Pool *) libc.calloc(1, Pool.sizeof);
2067         if (pool)
2068         {
2069             pool.initialize(npages);
2070             if (!pool.baseAddr)
2071                 goto Lerr;
2072
2073             newnpools = npools + 1;
2074             newpooltable = cast(Pool **) libc.realloc(pooltable,
2075                     newnpools * (Pool *).sizeof);
2076             if (!newpooltable)
2077                 goto Lerr;
2078
2079             // Sort pool into newpooltable[]
2080             for (i = 0; i < npools; i++)
2081             {
2082                 if (pool.opCmp(newpooltable[i]) < 0)
2083                      break;
2084             }
2085             libc.memmove(newpooltable + i + 1, newpooltable + i,
2086                     (npools - i) * (Pool *).sizeof);
2087             newpooltable[i] = pool;
2088
2089             pooltable = newpooltable;
2090             npools = newnpools;
2091
2092             minAddr = pooltable[0].baseAddr;
2093             maxAddr = pooltable[npools - 1].topAddr;
2094         }
2095         return pool;
2096
2097       Lerr:
2098         pool.Dtor();
2099         libc.free(pool);
2100         return null;
2101     }
2102
2103
2104     /**
2105      * Allocate a page of bin's.
2106      * Returns:
2107      *  0       failed
2108      */
2109     int allocPage(Bins bin)
2110     {
2111         Pool*  pool;
2112         size_t n;
2113         size_t pn;
2114         byte*  p;
2115         byte*  ptop;
2116
2117         //debug(PRINTF) printf("Gcx::allocPage(bin = %d)\n", bin);
2118         for (n = 0; n < npools; n++)
2119         {
2120             pool = pooltable[n];
2121             pn = pool.allocPages(1);
2122             if (pn != OPFAIL)
2123                 goto L1;
2124         }
2125         return 0;               // failed
2126
2127       L1:
2128         pool.pagetable[pn] = cast(ubyte)bin;
2129
2130         // Convert page to free list
2131         size_t size = binsize[bin];
2132         List **b = &bucket[bin];
2133
2134         p = pool.baseAddr + pn * PAGESIZE;
2135         ptop = p + PAGESIZE;
2136         for (; p < ptop; p += size)
2137         {
2138             (cast(List *)p).next = *b;
2139             *b = cast(List *)p;
2140         }
2141         return 1;
2142     }
2143
2144
2145     /**
2146      * Search a range of memory values and mark any pointers into the GC pool.
2147      */
2148     void mark(void *pbot, void *ptop)
2149     {
2150         void **p1 = cast(void **)pbot;
2151         void **p2 = cast(void **)ptop;
2152         size_t pcache = 0;
2153         uint changes = 0;
2154
2155         //printf("marking range: %p -> %p\n", pbot, ptop);
2156         for (; p1 < p2; p1++)
2157         {
2158             Pool *pool;
2159             byte *p = cast(byte *)(*p1);
2160
2161             //if (log) debug(PRINTF) printf("\tmark %x\n", p);
2162             if (p >= minAddr && p < maxAddr)
2163             {
2164                 if ((cast(size_t)p & ~(PAGESIZE-1)) == pcache)
2165                     continue;
2166
2167                 pool = findPool(p);
2168                 if (pool)
2169                 {
2170                     size_t offset = cast(size_t)(p - pool.baseAddr);
2171                     size_t biti;
2172                     size_t pn = offset / PAGESIZE;
2173                     Bins   bin = cast(Bins)pool.pagetable[pn];
2174
2175                     //debug(PRINTF) printf("\t\tfound pool %x, base=%x, pn = %d, bin = %d, biti = x%x\n", pool, pool.baseAddr, pn, bin, biti);
2176
2177                     // Adjust bit to be at start of allocated memory block
2178                     if (bin <= B_PAGE)
2179                     {
2180                         biti = (offset & notbinsize[bin]) >> 4;
2181                         //debug(PRINTF) printf("\t\tbiti = x%x\n", biti);
2182                     }
2183                     else if (bin == B_PAGEPLUS)
2184                     {
2185                         do
2186                         {
2187                             --pn;
2188                         }
2189                         while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
2190                         biti = pn * (PAGESIZE / 16);
2191                     }
2192                     else
2193                     {
2194                         // Don't mark bits in B_FREE pages
2195                         continue;
2196                     }
2197
2198                     if (bin >= B_PAGE) // Cache B_PAGE and B_PAGEPLUS lookups
2199                         pcache = cast(size_t)p & ~(PAGESIZE-1);
2200
2201                     //debug(PRINTF) printf("\t\tmark(x%x) = %d\n", biti, pool.mark.test(biti));
2202                     if (!pool.mark.test(biti))
2203                     {
2204                         //if (log) debug(PRINTF) printf("\t\tmarking %x\n", p);
2205                         pool.mark.set(biti);
2206                         if (!pool.noscan.test(biti))
2207                         {
2208                             pool.scan.set(biti);
2209                             changes = 1;
2210                         }
2211                         log_parent(sentinel_add(pool.baseAddr + biti * 16), sentinel_add(pbot));
2212                     }
2213                 }
2214             }
2215         }
2216         anychanges |= changes;
2217     }
2218
2219
2220     /**
2221      * Return number of full pages free'd.
2222      */
2223     size_t fullcollectshell()
2224     {
2225         // The purpose of the 'shell' is to ensure all the registers
2226         // get put on the stack so they'll be scanned
2227         void *sp;
2228         size_t result;
2229         version (GNU)
2230         {
2231             gcc.builtins.__builtin_unwind_init();
2232             sp = & sp;
2233         }
2234         else version(LDC)
2235         {
2236             version(X86)
2237             {
2238                 uint eax,ecx,edx,ebx,ebp,esi,edi;
2239                 asm
2240                 {
2241                     mov eax[EBP], EAX      ;
2242                     mov ecx[EBP], ECX      ;
2243                     mov edx[EBP], EDX      ;
2244                     mov ebx[EBP], EBX      ;
2245                     mov ebp[EBP], EBP      ;
2246                     mov esi[EBP], ESI      ;
2247                     mov edi[EBP], EDI      ;
2248                     mov  sp[EBP], ESP      ;
2249                 }
2250             }
2251             else version (X86_64)
2252             {
2253                 ulong rax,rbx,rcx,rdx,rbp,rsi,rdi,r8,r9,r10,r11,r12,r13,r14,r15;
2254                 asm
2255                 {
2256                     movq rax[RBP], RAX      ;
2257                     movq rbx[RBP], RBX      ;
2258                     movq rcx[RBP], RCX      ;
2259                     movq rdx[RBP], RDX      ;
2260                     movq rbp[RBP], RBP      ;
2261                     movq rsi[RBP], RSI      ;
2262                     movq rdi[RBP], RDI      ;
2263                     movq r8 [RBP], R8       ;
2264                     movq r9 [RBP], R9       ;
2265                     movq r10[RBP], R10      ;
2266                     movq r11[RBP], R11      ;
2267                     movq r12[RBP], R12      ;
2268                     movq r13[RBP], R13      ;
2269                     movq r14[RBP], R14      ;
2270                     movq r15[RBP], R15      ;
2271                     movq  sp[RBP], RSP      ;
2272                 }
2273             }
2274             else
2275             {
2276                 static assert( false, "Architecture not supported." );
2277             }
2278         }
2279         else
2280         {
2281         asm
2282         {
2283             pushad              ;
2284             mov sp[EBP],ESP     ;
2285         }
2286         }
2287         result = fullcollect(sp);
2288         version (GNU)
2289         {
2290             // nothing to do
2291         }
2292         else version(LDC)
2293         {
2294             // nothing to do
2295         }
2296         else
2297         {
2298         asm
2299         {
2300             popad               ;
2301         }
2302         }
2303         return result;
2304     }
2305
2306
2307     /**
2308      *
2309      */
2310     size_t fullcollect(void *stackTop)
2311     {
2312         size_t n;
2313         Pool*  pool;
2314
2315         debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n");
2316
2317         thread_suspendAll();
2318
2319         p_cache = null;
2320         size_cache = 0;
2321
2322         anychanges = 0;
2323         for (n = 0; n < npools; n++)
2324         {
2325             pool = pooltable[n];
2326             pool.mark.zero();
2327             pool.scan.zero();
2328             pool.freebits.zero();
2329         }
2330
2331         // Mark each free entry, so it doesn't get scanned
2332         for (n = 0; n < B_PAGE; n++)
2333         {
2334             for (List *list = bucket[n]; list; list = list.next)
2335             {
2336                 pool = findPool(list);
2337                 assert(pool);
2338                 pool.freebits.set(cast(size_t)(cast(byte*)list - pool.baseAddr) / 16);
2339             }
2340         }
2341
2342         for (n = 0; n < npools; n++)
2343         {
2344             pool = pooltable[n];
2345             pool.mark.copy(&pool.freebits);
2346         }
2347
2348         rt_scanStaticData( &mark );
2349
2350         if (!noStack)
2351         {
2352             // Scan stacks and registers for each paused thread
2353             thread_scanAll( &mark, stackTop );
2354         }
2355
2356         // Scan roots[]
2357         debug(COLLECT_PRINTF) printf("scan roots[]\n");
2358         mark(roots, roots + nroots);
2359
2360         // Scan ranges[]
2361         debug(COLLECT_PRINTF) printf("scan ranges[]\n");
2362         //log++;
2363         for (n = 0; n < nranges; n++)
2364         {
2365             debug(COLLECT_PRINTF) printf("\t%x .. %x\n", ranges[n].pbot, ranges[n].ptop);
2366             mark(ranges[n].pbot, ranges[n].ptop);
2367         }
2368         //log--;
2369
2370         debug(COLLECT_PRINTF) printf("\tscan heap\n");
2371         while (anychanges)
2372         {
2373             anychanges = 0;
2374             for (n = 0; n < npools; n++)
2375             {
2376                 uint *bbase;
2377                 uint *b;
2378                 uint *btop;
2379
2380                 pool = pooltable[n];
2381
2382                 bbase = pool.scan.base();
2383                 btop = bbase + pool.scan.nwords;
2384                 for (b = bbase; b < btop;)
2385                 {
2386                     Bins   bin;
2387                     size_t pn;
2388                     size_t u;
2389                     size_t bitm;
2390                     byte*  o;
2391
2392                     bitm = *b;
2393                     if (!bitm)
2394                     {
2395                         b++;
2396                         continue;
2397                     }
2398                     *b = 0;
2399
2400                     o = pool.baseAddr + (b - bbase) * 32 * 16;
2401                     if (!(bitm & 0xFFFF))
2402                     {
2403                         bitm >>= 16;
2404                         o += 16 * 16;
2405                     }
2406                     for (; bitm; o += 16, bitm >>= 1)
2407                     {
2408                         if (!(bitm & 1))
2409                             continue;
2410
2411                         pn = cast(size_t)(o - pool.baseAddr) / PAGESIZE;
2412                         bin = cast(Bins)pool.pagetable[pn];
2413                         if (bin < B_PAGE)
2414                         {
2415                             mark(o, o + binsize[bin]);
2416                         }
2417                         else if (bin == B_PAGE || bin == B_PAGEPLUS)
2418                         {
2419                             if (bin == B_PAGEPLUS)
2420                             {
2421                                 while (pool.pagetable[pn - 1] != B_PAGE)
2422                                     pn--;
2423                             }
2424                             u = 1;
2425                             while (pn + u < pool.npages && pool.pagetable[pn + u] == B_PAGEPLUS)
2426                                 u++;
2427                             mark(o, o + u * PAGESIZE);
2428                         }
2429                     }
2430                 }
2431             }
2432         }
2433
2434         thread_resumeAll();
2435
2436         // Free up everything not marked
2437         debug(COLLECT_PRINTF) printf("\tfree'ing\n");
2438         size_t freedpages = 0;
2439         size_t freed = 0;
2440         for (n = 0; n < npools; n++)
2441         {
2442             pool = pooltable[n];
2443             uint*  bbase = pool.mark.base();
2444             size_t pn;
2445             for (pn = 0; pn < pool.npages; pn++, bbase += PAGESIZE / (32 * 16))
2446             {
2447                 Bins bin = cast(Bins)pool.pagetable[pn];
2448
2449                 if (bin < B_PAGE)
2450                 {
2451                     auto size = binsize[bin];
2452                     byte* p = pool.baseAddr + pn * PAGESIZE;
2453                     byte* ptop = p + PAGESIZE;
2454                     size_t biti = pn * (PAGESIZE/16);
2455                     size_t bitstride = size / 16;
2456
2457     version(none) // BUG: doesn't work because freebits() must also be cleared
2458     {
2459                     // If free'd entire page
2460                     if (bbase[0] == 0 && bbase[1] == 0 && bbase[2] == 0 && bbase[3] == 0 &&
2461                         bbase[4] == 0 && bbase[5] == 0 && bbase[6] == 0 && bbase[7] == 0)
2462                     {
2463                         for (; p < ptop; p += size, biti += bitstride)
2464                         {
2465                             if (pool.finals.nbits && pool.finals.testClear(biti))
2466                                 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2467                             gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
2468
2469                             List *list = cast(List *)p;
2470                             //debug(PRINTF) printf("\tcollecting %x\n", list);
2471                             log_free(sentinel_add(list));
2472
2473                             debug (MEMSTOMP) libc.memset(p, 0xF3, size);
2474                         }
2475                         pool.pagetable[pn] = B_FREE;
2476                         freed += PAGESIZE;
2477                         //debug(PRINTF) printf("freeing entire page %d\n", pn);
2478                         continue;
2479                     }
2480     }
2481                     for (; p < ptop; p += size, biti += bitstride)
2482                     {
2483                         if (!pool.mark.test(biti))
2484                         {
2485                             sentinel_Invariant(sentinel_add(p));
2486
2487                             pool.freebits.set(biti);
2488                             if (pool.finals.nbits && pool.finals.testClear(biti))
2489                                 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2490                             clrBits(pool, biti, BlkAttr.ALL_BITS);
2491
2492                             List *list = cast(List *)p;
2493                             debug(PRINTF) printf("\tcollecting %x\n", list);
2494                             log_free(sentinel_add(list));
2495
2496                             debug (MEMSTOMP) libc.memset(p, 0xF3, size);
2497
2498                             freed += size;
2499                         }
2500                     }
2501                 }
2502                 else if (bin == B_PAGE)
2503                 {
2504                     size_t biti = pn * (PAGESIZE / 16);
2505                     if (!pool.mark.test(biti))
2506                     {
2507                         byte *p = pool.baseAddr + pn * PAGESIZE;
2508                         sentinel_Invariant(sentinel_add(p));
2509                         if (pool.finals.nbits && pool.finals.testClear(biti))
2510                             rt_finalize(sentinel_add(p), false/*noStack > 0*/);
2511                         clrBits(pool, biti, BlkAttr.ALL_BITS);
2512
2513                         debug(COLLECT_PRINTF) printf("\tcollecting big %x\n", p);
2514                         log_free(sentinel_add(p));
2515                         pool.pagetable[pn] = B_FREE;
2516                         freedpages++;
2517                         debug (MEMSTOMP) libc.memset(p, 0xF3, PAGESIZE);
2518                         while (pn + 1 < pool.npages && pool.pagetable[pn + 1] == B_PAGEPLUS)
2519                         {
2520                             pn++;
2521                             pool.pagetable[pn] = B_FREE;
2522                             freedpages++;
2523
2524                             debug (MEMSTOMP)
2525                             {
2526                                 p += PAGESIZE;
2527                                 libc.memset(p, 0xF3, PAGESIZE);
2528                             }
2529                         }
2530                     }
2531                 }
2532             }
2533         }
2534
2535         // Zero buckets
2536         bucket[] = null;
2537
2538         // Free complete pages, rebuild free list
2539         debug(COLLECT_PRINTF) printf("\tfree complete pages\n");
2540         size_t recoveredpages = 0;
2541         for (n = 0; n < npools; n++)
2542         {
2543             pool = pooltable[n];
2544             for (size_t pn = 0; pn < pool.npages; pn++)
2545             {
2546                 Bins   bin = cast(Bins)pool.pagetable[pn];
2547                 size_t biti;
2548                 size_t u;
2549
2550                 if (bin < B_PAGE)
2551                 {
2552                     size_t size = binsize[bin];
2553                     size_t bitstride = size / 16;
2554                     size_t bitbase = pn * (PAGESIZE / 16);
2555                     size_t bittop = bitbase + (PAGESIZE / 16);
2556                     byte*  p;
2557
2558                     biti = bitbase;
2559                     for (biti = bitbase; biti < bittop; biti += bitstride)
2560                     {
2561                         if (!pool.freebits.test(biti))
2562                             goto Lnotfree;
2563                     }
2564                     pool.pagetable[pn] = B_FREE;
2565                     recoveredpages++;
2566                     continue;
2567
2568                  Lnotfree:
2569                     p = pool.baseAddr + pn * PAGESIZE;
2570                     for (u = 0; u < PAGESIZE; u += size)
2571                     {
2572                         biti = bitbase + u / 16;
2573                         if (pool.freebits.test(biti))
2574                         {
2575                             List *list = cast(List *)(p + u);
2576                             if (list.next != bucket[bin])       // avoid unnecessary writes
2577                                 list.next = bucket[bin];
2578                             bucket[bin] = list;
2579                         }
2580                     }
2581                 }
2582             }
2583         }
2584
2585         debug(COLLECT_PRINTF) printf("recovered pages = %d\n", recoveredpages);
2586         debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedpages, npools);
2587
2588         return freedpages + recoveredpages;
2589     }
2590
2591
2592     /**
2593      *
2594      */
2595     uint getBits(Pool* pool, size_t biti)
2596     in
2597     {
2598         assert( pool );
2599     }
2600     body
2601     {
2602         uint bits;
2603
2604         if (pool.finals.nbits &&
2605             pool.finals.test(biti))
2606             bits |= BlkAttr.FINALIZE;
2607         if (pool.noscan.test(biti))
2608             bits |= BlkAttr.NO_SCAN;
2609 //        if (pool.nomove.nbits &&
2610 //            pool.nomove.test(biti))
2611 //            bits |= BlkAttr.NO_MOVE;
2612         return bits;
2613     }
2614
2615
2616     /**
2617      *
2618      */
2619     void setBits(Pool* pool, size_t biti, uint mask)
2620     in
2621     {
2622         assert( pool );
2623     }
2624     body
2625     {
2626         if (mask & BlkAttr.FINALIZE)
2627         {
2628             if (!pool.finals.nbits)
2629                 pool.finals.alloc(pool.mark.nbits);
2630             pool.finals.set(biti);
2631         }
2632         if (mask & BlkAttr.NO_SCAN)
2633         {
2634             pool.noscan.set(biti);
2635         }
2636 //        if (mask & BlkAttr.NO_MOVE)
2637 //        {
2638 //            if (!pool.nomove.nbits)
2639 //                pool.nomove.alloc(pool.mark.nbits);
2640 //            pool.nomove.set(biti);
2641 //        }
2642     }
2643
2644
2645     /**
2646      *
2647      */
2648     void clrBits(Pool* pool, size_t biti, uint mask)
2649     in
2650     {
2651         assert( pool );
2652     }
2653     body
2654     {
2655         if (mask & BlkAttr.FINALIZE && pool.finals.nbits)
2656             pool.finals.clear(biti);
2657         if (mask & BlkAttr.NO_SCAN)
2658             pool.noscan.clear(biti);
2659 //        if (mask & BlkAttr.NO_MOVE && pool.nomove.nbits)
2660 //            pool.nomove.clear(biti);
2661     }
2662
2663
2664     /***** Leak Detector ******/
2665
2666
2667     debug (LOGGING)
2668     {
2669         LogArray current;
2670         LogArray prev;
2671
2672
2673         void log_init()
2674         {
2675             //debug(PRINTF) printf("+log_init()\n");
2676             current.reserve(1000);
2677             prev.reserve(1000);
2678             //debug(PRINTF) printf("-log_init()\n");
2679         }
2680
2681
2682         void log_malloc(void *p, size_t size)
2683         {
2684             //debug(PRINTF) printf("+log_malloc(p = %x, size = %d)\n", p, size);
2685             Log log;
2686
2687             log.p = p;
2688             log.size = size;
2689             log.line = GC.line;
2690             log.file = GC.file;
2691             log.parent = null;
2692
2693             GC.line = 0;
2694             GC.file = null;
2695
2696             current.push(log);
2697             //debug(PRINTF) printf("-log_malloc()\n");
2698         }
2699
2700
2701         void log_free(void *p)
2702         {
2703             //debug(PRINTF) printf("+log_free(%x)\n", p);
2704             size_t i;
2705
2706             i = current.find(p);
2707             if (i == OPFAIL)
2708             {
2709                 debug(PRINTF) printf("free'ing unallocated memory %x\n", p);
2710             }
2711             else
2712                 current.remove(i);
2713             //debug(PRINTF) printf("-log_free()\n");
2714         }
2715
2716
2717         void log_collect()
2718         {
2719             //debug(PRINTF) printf("+log_collect()\n");
2720             // Print everything in current that is not in prev
2721
2722             debug(PRINTF) printf("New pointers this cycle: --------------------------------\n");
2723             size_t used = 0;
2724             for (size_t i = 0; i < current.dim; i++)
2725             {
2726                 size_t j;
2727
2728                 j = prev.find(current.data[i].p);
2729                 if (j == OPFAIL)
2730                     current.data[i].print();
2731                 else
2732                     used++;
2733             }
2734
2735             debug(PRINTF) printf("All roots this cycle: --------------------------------\n");
2736             for (size_t i = 0; i < current.dim; i++)
2737             {
2738                 void *p;
2739                 size_t j;
2740
2741                 p = current.data[i].p;
2742                 if (!findPool(current.data[i].parent))
2743                 {
2744                     j = prev.find(current.data[i].p);
2745                     if (j == OPFAIL)
2746                         debug(PRINTF) printf("N");
2747                     else
2748                         debug(PRINTF) printf(" ");;
2749                     current.data[i].print();
2750                 }
2751             }
2752
2753             debug(PRINTF) printf("Used = %d-------------------------------------------------\n", used);
2754             prev.copy(&current);
2755
2756             debug(PRINTF) printf("-log_collect()\n");
2757         }
2758
2759
2760         void log_parent(void *p, void *parent)
2761         {
2762             //debug(PRINTF) printf("+log_parent()\n");
2763             size_t i;
2764
2765             i = current.find(p);
2766             if (i == OPFAIL)
2767             {
2768                 debug(PRINTF) printf("parent'ing unallocated memory %x, parent = %x\n", p, parent);
2769                 Pool *pool;
2770                 pool = findPool(p);
2771                 assert(pool);
2772                 size_t offset = cast(size_t)(p - pool.baseAddr);
2773                 size_t biti;
2774                 size_t pn = offset / PAGESIZE;
2775                 Bins bin = cast(Bins)pool.pagetable[pn];
2776                 biti = (offset & notbinsize[bin]);
2777                 debug(PRINTF) printf("\tbin = %d, offset = x%x, biti = x%x\n", bin, offset, biti);
2778             }
2779             else
2780             {
2781                 current.data[i].parent = parent;
2782             }
2783             //debug(PRINTF) printf("-log_parent()\n");
2784         }
2785
2786     }
2787     else
2788     {
2789         void log_init() { }
2790         void log_malloc(void *p, size_t size) { }
2791         void log_free(void *p) { }
2792         void log_collect() { }
2793         void log_parent(void *p, void *parent) { }
2794     }
2795 }
2796
2797
2798 /* ============================ Pool  =============================== */
2799
2800
2801 struct Pool
2802 {
2803     byte* baseAddr;
2804     byte* topAddr;
2805     GCBits mark;     // entries already scanned, or should not be scanned
2806     GCBits scan;     // entries that need to be scanned
2807     GCBits freebits; // entries that are on the free list
2808     GCBits finals;   // entries that need finalizer run on them
2809     GCBits noscan;   // entries that should not be scanned
2810
2811     size_t npages;
2812     ubyte* pagetable;
2813
2814
2815     void initialize(size_t npages)
2816     {
2817         size_t poolsize;
2818
2819         //debug(PRINTF) printf("Pool::Pool(%u)\n", npages);
2820         poolsize = npages * PAGESIZE;
2821         assert(poolsize >= POOLSIZE);
2822         baseAddr = cast(byte *) alloc.os_mem_map(poolsize);
2823
2824         // Some of the code depends on page alignment of memory pools
2825         assert((cast(size_t)baseAddr & (PAGESIZE - 1)) == 0);
2826
2827         if (!baseAddr)
2828         {
2829             //debug(PRINTF) printf("GC fail: poolsize = x%x, errno = %d\n", poolsize, errno);
2830             //debug(PRINTF) printf("message = '%s'\n", sys_errlist[errno]);
2831
2832             npages = 0;
2833             poolsize = 0;
2834         }
2835         //assert(baseAddr);
2836         topAddr = baseAddr + poolsize;
2837
2838         mark.alloc(cast(size_t)poolsize / 16);
2839         scan.alloc(cast(size_t)poolsize / 16);
2840         freebits.alloc(cast(size_t)poolsize / 16);
2841         noscan.alloc(cast(size_t)poolsize / 16);
2842
2843         pagetable = cast(ubyte*) libc.malloc(npages);
2844         if (!pagetable)
2845             onOutOfMemoryError();
2846         libc.memset(pagetable, B_FREE, npages);
2847
2848         this.npages = npages;
2849     }
2850
2851
2852     void Dtor()
2853     {
2854         if (baseAddr)
2855         {
2856             int result;
2857
2858             if (npages)
2859             {
2860                 result = alloc.os_mem_unmap(baseAddr, npages * PAGESIZE);
2861                 assert(result);
2862                 npages = 0;
2863             }
2864
2865             baseAddr = null;
2866             topAddr = null;
2867         }
2868         if (pagetable)
2869             libc.free(pagetable);
2870
2871         mark.Dtor();
2872         scan.Dtor();
2873         freebits.Dtor();
2874         finals.Dtor();
2875         noscan.Dtor();
2876     }
2877
2878
2879     void Invariant() { }
2880
2881
2882     invariant
2883     {
2884         //mark.Invariant();
2885         //scan.Invariant();
2886         //freebits.Invariant();
2887         //finals.Invariant();
2888         //noscan.Invariant();
2889
2890         if (baseAddr)
2891         {
2892             //if (baseAddr + npages * PAGESIZE != topAddr)
2893                 //printf("baseAddr = %p, npages = %d, topAddr = %p\n", baseAddr, npages, topAddr);
2894             assert(baseAddr + npages * PAGESIZE == topAddr);
2895         }
2896
2897         for (size_t i = 0; i < npages; i++)
2898         {
2899             Bins bin = cast(Bins)pagetable[i];
2900             assert(bin < B_MAX);
2901         }
2902     }
2903
2904
2905     /**
2906      * Allocate n pages from Pool.
2907      * Returns OPFAIL on failure.
2908      */
2909     size_t allocPages(size_t n)
2910     {
2911         size_t i;
2912         size_t n2;
2913
2914         //debug(PRINTF) printf("Pool::allocPages(n = %d)\n", n);
2915         n2 = n;
2916         for (i = 0; i < npages; i++)
2917         {
2918             if (pagetable[i] == B_FREE)
2919             {
2920                 if (--n2 == 0)
2921                 {
2922                     //debug(PRINTF) printf("\texisting pn = %d\n", i - n + 1);
2923                     return i - n + 1;
2924                 }
2925             }
2926             else
2927                 n2 = n;
2928         }
2929         return OPFAIL;
2930     }
2931
2932
2933     /**
2934      * Free npages pages starting with pagenum.
2935      */
2936     void freePages(size_t pagenum, size_t npages)
2937     {
2938         libc.memset(&pagetable[pagenum], B_FREE, npages);
2939     }
2940
2941
2942     /**
2943      * Used for sorting pooltable[]
2944      */
2945     int opCmp(Pool *p2)
2946     {
2947         if (baseAddr < p2.baseAddr)
2948             return -1;
2949         else
2950         return cast(int)(baseAddr > p2.baseAddr);
2951     }
2952 }
2953
2954
2955 /* ============================ SENTINEL =============================== */
2956
2957
2958 version (SENTINEL)
2959 {
2960     const size_t SENTINEL_PRE = cast(size_t) 0xF4F4F4F4F4F4F4F4UL; // 32 or 64 bits
2961     const ubyte SENTINEL_POST = 0xF5;           // 8 bits
2962     const uint SENTINEL_EXTRA = 2 * size_t.sizeof + 1;
2963
2964
2965     size_t* sentinel_size(void *p)  { return &(cast(size_t *)p)[-2]; }
2966     size_t* sentinel_pre(void *p)   { return &(cast(size_t *)p)[-1]; }
2967     ubyte* sentinel_post(void *p) { return &(cast(ubyte *)p)[*sentinel_size(p)]; }
2968
2969
2970     void sentinel_init(void *p, size_t size)
2971     {
2972         *sentinel_size(p) = size;
2973         *sentinel_pre(p) = SENTINEL_PRE;
2974         *sentinel_post(p) = SENTINEL_POST;
2975     }
2976
2977
2978     void sentinel_Invariant(void *p)
2979     {
2980         assert(*sentinel_pre(p) == SENTINEL_PRE);
2981         assert(*sentinel_post(p) == SENTINEL_POST);
2982     }
2983
2984
2985     void *sentinel_add(void *p)
2986     {
2987         return p + 2 * size_t.sizeof;
2988     }
2989
2990
2991     void *sentinel_sub(void *p)
2992     {
2993         return p - 2 * size_t.sizeof;
2994     }
2995 }
2996 else
2997 {
2998     const uint SENTINEL_EXTRA = 0;
2999
3000
3001     void sentinel_init(void *p, size_t size)
3002     {
3003     }
3004
3005
3006     void sentinel_Invariant(void *p)
3007     {
3008     }
3009
3010
3011     void *sentinel_add(void *p)
3012     {
3013         return p;
3014     }
3015
3016
3017     void *sentinel_sub(void *p)
3018     {
3019         return p;
3020     }
3021 }
3022