]> git.llucax.com Git - software/dgc/cdgc.git/blobdiff - rt/gc/cdgc/gc.d
Change the min_free default to 5%
[software/dgc/cdgc.git] / rt / gc / cdgc / gc.d
index 11944d8dde11ced157cb9404a0cdfd27f0e4aa4d..2ec72c4b9ac273a046ca96eef200e5dce60fa5a7 100644 (file)
@@ -216,6 +216,11 @@ struct GC
     /// max(pool.topAddr)
     byte *max_addr;
 
     /// max(pool.topAddr)
     byte *max_addr;
 
+    /// Total heap memory
+    size_t total_mem;
+    /// Free heap memory
+    size_t free_mem;
+
     /// Free list for each size
     List*[B_MAX] free_list;
 
     /// Free list for each size
     List*[B_MAX] free_list;
 
@@ -237,10 +242,19 @@ private T locked(T, alias Code)()
 
 private GC* gc;
 
 
 private GC* gc;
 
+
+bool collect_in_progress()
+{
+    return gc.mark_proc_pid != 0;
+}
+
+
 bool Invariant()
 {
     assert (gc !is null);
     if (gc.inited) {
 bool Invariant()
 {
     assert (gc !is null);
     if (gc.inited) {
+        size_t total_mem = 0;
+        size_t free_mem = 0;
         for (size_t i = 0; i < gc.pools.length; i++) {
             Pool* pool = gc.pools[i];
             pool.Invariant();
         for (size_t i = 0; i < gc.pools.length; i++) {
             Pool* pool = gc.pools[i];
             pool.Invariant();
@@ -250,6 +264,10 @@ bool Invariant()
                 assert(*pool < *gc.pools[i + 1]);
             else if (i + 1 == gc.pools.length)
                 assert(gc.max_addr == pool.topAddr);
                 assert(*pool < *gc.pools[i + 1]);
             else if (i + 1 == gc.pools.length)
                 assert(gc.max_addr == pool.topAddr);
+            total_mem += pool.npages * PAGESIZE;
+            for (size_t pn = 0; pn < pool.npages; ++pn)
+                if (pool.pagetable[pn] == B_FREE)
+                    free_mem += PAGESIZE;
         }
 
         gc.roots.Invariant();
         }
 
         gc.roots.Invariant();
@@ -269,8 +287,11 @@ bool Invariant()
                 assert (p >= pool.baseAddr);
                 assert (p < pool.topAddr);
                 assert (pool.freebits.test((p - pool.baseAddr) / 16));
                 assert (p >= pool.baseAddr);
                 assert (p < pool.topAddr);
                 assert (pool.freebits.test((p - pool.baseAddr) / 16));
+                free_mem += binsize[i];
             }
         }
             }
         }
+        assert (gc.total_mem == total_mem);
+        assert (gc.free_mem == free_mem);
     }
     return true;
 }
     }
     return true;
 }
@@ -408,21 +429,23 @@ size_t reserve(size_t size)
 
 /**
  * Minimizes physical memory usage by returning free pools to the OS.
 
 /**
  * Minimizes physical memory usage by returning free pools to the OS.
+ *
+ * If full is false, keep some pools alive if the resulting free memory would
+ * be too small.
  */
  */
-void minimize()
+void minimize(bool full = true)
 {
 {
-    // Disabled if a parallel collection is in progress because the shared mark
-    // bits of the freed pool might be used by the mark process
-    if (gc.mark_proc_pid != 0)
+    // The shared mark bits of the freed pool might be used by the mark process
+    if (collect_in_progress())
         return;
 
         return;
 
-    size_t n;
-    size_t pn;
-    Pool* pool;
+    if (gc.pools.length == 0)
+        return;
 
 
-    for (n = 0; n < gc.pools.length; n++)
+    for (size_t n = 0; n < gc.pools.length; n++)
     {
     {
-        pool = gc.pools[n];
+        Pool* pool = gc.pools[n];
+        size_t pn;
         for (pn = 0; pn < pool.npages; pn++)
         {
             if (cast(Bins)pool.pagetable[pn] != B_FREE)
         for (pn = 0; pn < pool.npages; pn++)
         {
             if (cast(Bins)pool.pagetable[pn] != B_FREE)
@@ -430,6 +453,16 @@ void minimize()
         }
         if (pn < pool.npages)
             continue;
         }
         if (pn < pool.npages)
             continue;
+        // Free pool
+        size_t pool_size = pool.npages * PAGESIZE;
+        if (!full) {
+            double percent_free = (gc.free_mem - pool_size) * 100.0 /
+                    (gc.total_mem - pool_size);
+            if (percent_free < opts.options.min_free)
+                continue; // not enough free, don't remove this pool
+        }
+        gc.total_mem -= pool_size;
+        gc.free_mem -= pool_size;
         pool.Dtor();
         cstdlib.free(pool);
         gc.pools.remove_at(n);
         pool.Dtor();
         cstdlib.free(pool);
         gc.pools.remove_at(n);
@@ -444,8 +477,9 @@ void minimize()
  * Allocate a chunk of memory that is larger than a page.
  * Return null if out of memory.
  */
  * Allocate a chunk of memory that is larger than a page.
  * Return null if out of memory.
  */
-void* bigAlloc(size_t npages, out Pool* pool, size_t* pn)
+void* bigAlloc(size_t npages, out Pool* pool, size_t* pn, bool* collected)
 {
 {
+    *collected = false;
     // This code could use some refinement when repeatedly
     // allocating very large arrays.
 
     // This code could use some refinement when repeatedly
     // allocating very large arrays.
 
@@ -463,8 +497,6 @@ void* bigAlloc(size_t npages, out Pool* pool, size_t* pn)
 
     void* alloc_more()
     {
 
     void* alloc_more()
     {
-        // Release empty pools to prevent bloat
-        minimize();
         // Allocate new pool
         pool = newPool(npages);
         if (!pool)
         // Allocate new pool
         pool = newPool(npages);
         if (!pool)
@@ -482,7 +514,8 @@ void* bigAlloc(size_t npages, out Pool* pool, size_t* pn)
 
     // Try collecting
     size_t freedpages = fullcollectshell();
 
     // Try collecting
     size_t freedpages = fullcollectshell();
-    if (freedpages >= gc.pools.length * ((POOLSIZE / PAGESIZE) / 4)) {
+    *collected = true;
+    if (freedpages >= npages) {
         if (void* p = find_block())
             return p;
     }
         if (void* p = find_block())
             return p;
     }
@@ -538,6 +571,9 @@ Pool *newPool(size_t npages)
     assert (inserted_pool is pool);
     gc.min_addr = gc.pools[0].baseAddr;
     gc.max_addr = gc.pools[gc.pools.length - 1].topAddr;
     assert (inserted_pool is pool);
     gc.min_addr = gc.pools[0].baseAddr;
     gc.max_addr = gc.pools[gc.pools.length - 1].topAddr;
+    size_t pool_size = pool.topAddr - pool.baseAddr;
+    gc.total_mem += pool_size;
+    gc.free_mem += pool_size;
     return pool;
 }
 
     return pool;
 }
 
@@ -769,7 +805,7 @@ size_t fullcollect(void *stackTop)
     // memory is freed (if that not the case, the caller will allocate more
     // memory and the next time it's exhausted it will run a new collection).
     if (opts.options.eager_alloc) {
     // memory is freed (if that not the case, the caller will allocate more
     // memory and the next time it's exhausted it will run a new collection).
     if (opts.options.eager_alloc) {
-        if (gc.mark_proc_pid != 0) { // there is a mark process in progress
+        if (collect_in_progress()) {
             os.WRes r = os.wait_pid(gc.mark_proc_pid, false); // don't block
             assert (r != os.WRes.ERROR);
             switch (r) {
             os.WRes r = os.wait_pid(gc.mark_proc_pid, false); // don't block
             assert (r != os.WRes.ERROR);
             switch (r) {
@@ -982,6 +1018,7 @@ size_t sweep()
     debug(COLLECT_PRINTF) printf("\tsweep\n");
     gc.p_cache = null;
     gc.size_cache = 0;
     debug(COLLECT_PRINTF) printf("\tsweep\n");
     gc.p_cache = null;
     gc.size_cache = 0;
+    gc.free_mem = 0; // will be recalculated
     size_t freedpages = 0;
     size_t freed = 0;
     for (size_t n = 0; n < gc.pools.length; n++)
     size_t freedpages = 0;
     size_t freed = 0;
     for (size_t n = 0; n < gc.pools.length; n++)
@@ -1071,6 +1108,7 @@ version(none) // BUG: doesn't work because freebits() must also be cleared
                     pool.pagetable[pn] = B_FREE;
                     pool.freebits.set_group(bit_i, PAGESIZE / 16);
                     freedpages++;
                     pool.pagetable[pn] = B_FREE;
                     pool.freebits.set_group(bit_i, PAGESIZE / 16);
                     freedpages++;
+                    gc.free_mem += PAGESIZE;
                     if (opts.options.mem_stomp)
                         memset(p, 0xF3, PAGESIZE);
                     while (pn + 1 < pool.npages && pool.pagetable[pn + 1] == B_PAGEPLUS)
                     if (opts.options.mem_stomp)
                         memset(p, 0xF3, PAGESIZE);
                     while (pn + 1 < pool.npages && pool.pagetable[pn + 1] == B_PAGEPLUS)
@@ -1080,6 +1118,7 @@ version(none) // BUG: doesn't work because freebits() must also be cleared
                         bit_i += bit_stride;
                         pool.freebits.set_group(bit_i, PAGESIZE / 16);
                         freedpages++;
                         bit_i += bit_stride;
                         pool.freebits.set_group(bit_i, PAGESIZE / 16);
                         freedpages++;
+                        gc.free_mem += PAGESIZE;
 
                         if (opts.options.mem_stomp)
                         {
 
                         if (opts.options.mem_stomp)
                         {
@@ -1089,6 +1128,9 @@ version(none) // BUG: doesn't work because freebits() must also be cleared
                     }
                 }
             }
                     }
                 }
             }
+            else if (bin == B_FREE) {
+                gc.free_mem += PAGESIZE;
+            }
         }
     }
 
         }
     }
 
@@ -1124,6 +1166,7 @@ version(none) // BUG: doesn't work because freebits() must also be cleared
                 pool.pagetable[pn] = B_FREE;
                 pool.freebits.set_group(bit_base, PAGESIZE / 16);
                 recoveredpages++;
                 pool.pagetable[pn] = B_FREE;
                 pool.freebits.set_group(bit_base, PAGESIZE / 16);
                 recoveredpages++;
+                gc.free_mem += PAGESIZE;
                 continue;
 
              Lnotfree:
                 continue;
 
              Lnotfree:
@@ -1142,6 +1185,7 @@ version(none) // BUG: doesn't work because freebits() must also be cleared
                         if (list.pool != pool)
                             list.pool = pool;
                         gc.free_list[bin] = list;
                         if (list.pool != pool)
                             list.pool = pool;
                         gc.free_list[bin] = list;
+                        gc.free_mem += binsize[bin];
                     }
                 }
             }
                     }
                 }
             }
@@ -1290,6 +1334,7 @@ private void *malloc(size_t size, uint attrs, size_t* pm_bitmask)
     Pool* pool = void;
     size_t bit_i = void;
     size_t capacity = void; // to figure out where to store the bitmask
     Pool* pool = void;
     size_t bit_i = void;
     size_t capacity = void; // to figure out where to store the bitmask
+    bool collected = false;
     if (bin < B_PAGE)
     {
         p = gc.free_list[bin];
     if (bin < B_PAGE)
     {
         p = gc.free_list[bin];
@@ -1312,6 +1357,7 @@ private void *malloc(size_t size, uint attrs, size_t* pm_bitmask)
                 {
                     //newPool(1);
                 }
                 {
                     //newPool(1);
                 }
+                collected = true;
             }
             if (!gc.free_list[bin] && !allocPage(bin))
             {
             }
             if (!gc.free_list[bin] && !allocPage(bin))
             {
@@ -1343,7 +1389,7 @@ private void *malloc(size_t size, uint attrs, size_t* pm_bitmask)
     {
         size_t pn;
         size_t npages = round_up(size, PAGESIZE);
     {
         size_t pn;
         size_t npages = round_up(size, PAGESIZE);
-        p = bigAlloc(npages, pool, &pn);
+        p = bigAlloc(npages, pool, &pn, &collected);
         if (!p)
             onOutOfMemoryError();
         assert (pool !is null);
         if (!p)
             onOutOfMemoryError();
         assert (pool !is null);
@@ -1380,6 +1426,21 @@ private void *malloc(size_t size, uint attrs, size_t* pm_bitmask)
         assert (bin >= B_PAGE || !pool.freebits.test(bit_i));
     }
 
         assert (bin >= B_PAGE || !pool.freebits.test(bit_i));
     }
 
+    gc.free_mem -= capacity;
+    if (collected) {
+        // If there is not enough free memory, allocate a new pool big enough
+        // to have at least the min_free% of the total heap free. If there is
+        // too much free memory, try to free some empty pools.
+        double percent_free = gc.free_mem * 100.0 / gc.total_mem;
+        if (percent_free < opts.options.min_free) {
+            auto pool_size = gc.total_mem * 1.0 / opts.options.min_free
+                    - gc.free_mem;
+            newPool(round_up(cast(size_t)pool_size, PAGESIZE));
+        }
+        else
+            minimize(false);
+    }
+
     return p;
 }
 
     return p;
 }
 
@@ -1403,139 +1464,125 @@ private void *calloc(size_t size, uint attrs, size_t* pm_bitmask)
 private void *realloc(void *p, size_t size, uint attrs,
         size_t* pm_bitmask)
 {
 private void *realloc(void *p, size_t size, uint attrs,
         size_t* pm_bitmask)
 {
-    if (!size)
-    {
+    if (!size) {
         if (p)
         if (p)
-        {
             free(p);
             free(p);
-            p = null;
-        }
+        return null;
     }
     }
-    else if (!p)
-    {
-        p = malloc(size, attrs, pm_bitmask);
+
+    if (p is null)
+        return malloc(size, attrs, pm_bitmask);
+
+    Pool* pool = findPool(p);
+    if (pool is null)
+        return null;
+
+    // Set or retrieve attributes as appropriate
+    auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
+    if (attrs) {
+        clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
+        setAttr(pool, bit_i, attrs);
     }
     else
     }
     else
-    {
-        Pool* pool = findPool(p);
-        if (pool is null)
-            return null;
+        attrs = getAttr(pool, bit_i);
 
 
-        // Set or retrieve attributes as appropriate
-        auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
-        if (attrs) {
-            clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
-            setAttr(pool, bit_i, attrs);
+    void* blk_base_addr = pool.findBase(p);
+    size_t blk_size = pool.findSize(p);
+    bool has_pm = has_pointermap(attrs);
+    size_t pm_bitmask_size = 0;
+    if (has_pm) {
+        pm_bitmask_size = size_t.sizeof;
+        // Retrieve pointer map bit mask if appropriate
+        if (pm_bitmask is null) {
+            auto end_of_blk = cast(size_t**)(
+                    blk_base_addr + blk_size - size_t.sizeof);
+            pm_bitmask = *end_of_blk;
         }
         }
-        else
-            attrs = getAttr(pool, bit_i);
-
-        void* blk_base_addr = pool.findBase(p);
-        size_t blk_size = pool.findSize(p);
-        bool has_pm = has_pointermap(attrs);
-        size_t pm_bitmask_size = 0;
-        if (has_pm) {
-            pm_bitmask_size = size_t.sizeof;
-            // Retrieve pointer map bit mask if appropriate
-            if (pm_bitmask is null) {
-                auto end_of_blk = cast(size_t**)(blk_base_addr +
-                        blk_size - size_t.sizeof);
-                pm_bitmask = *end_of_blk;
-            }
+    }
+
+    if (opts.options.sentinel) {
+        sentinel_Invariant(p);
+        size_t sentinel_stored_size = *sentinel_size(p);
+        if (sentinel_stored_size != size) {
+            void* p2 = malloc(size, attrs, pm_bitmask);
+            if (sentinel_stored_size < size)
+                size = sentinel_stored_size;
+            cstring.memcpy(p2, p, size);
+            p = p2;
         }
         }
+        return p;
+    }
 
 
-        if (opts.options.sentinel)
-        {
-            sentinel_Invariant(p);
-            size_t sentinel_stored_size = *sentinel_size(p);
-            if (sentinel_stored_size != size)
-            {
-                void* p2 = malloc(size, attrs, pm_bitmask);
-                if (sentinel_stored_size < size)
-                    size = sentinel_stored_size;
-                cstring.memcpy(p2, p, size);
-                p = p2;
+    size += pm_bitmask_size;
+    if (blk_size >= PAGESIZE && size >= PAGESIZE) {
+        auto psz = blk_size / PAGESIZE;
+        auto newsz = round_up(size, PAGESIZE);
+        if (newsz == psz)
+            return p;
+
+        auto pagenum = (p - pool.baseAddr) / PAGESIZE;
+
+        if (newsz < psz) {
+            // Shrink in place
+            if (opts.options.mem_stomp)
+                memset(p + size - pm_bitmask_size, 0xF2,
+                        blk_size - size - pm_bitmask_size);
+            pool.freePages(pagenum + newsz, psz - newsz);
+            auto new_blk_size = (PAGESIZE * newsz);
+            gc.free_mem += blk_size - new_blk_size;
+            // update the size cache, assuming that is very likely the
+            // size of this block will be queried in the near future
+            pool.update_cache(p, new_blk_size);
+            if (has_pm) {
+                auto end_of_blk = cast(size_t**)(blk_base_addr +
+                        new_blk_size - pm_bitmask_size);
+                *end_of_blk = pm_bitmask;
             }
             }
+            return p;
         }
         }
-        else
-        {
-            size += pm_bitmask_size;
-            if (blk_size >= PAGESIZE && size >= PAGESIZE)
-            {
-                auto psz = blk_size / PAGESIZE;
-                auto newsz = round_up(size, PAGESIZE);
-                if (newsz == psz)
-                    return p;
 
 
-                auto pagenum = (p - pool.baseAddr) / PAGESIZE;
-
-                if (newsz < psz)
-                {
-                    // Shrink in place
+        if (pagenum + newsz <= pool.npages) {
+            // Attempt to expand in place
+            for (size_t i = pagenum + psz; 1;) {
+                if (i == pagenum + newsz) {
                     if (opts.options.mem_stomp)
                     if (opts.options.mem_stomp)
-                        memset(p + size - pm_bitmask_size, 0xF2,
-                                blk_size - size - pm_bitmask_size);
-                    pool.freePages(pagenum + newsz, psz - newsz);
+                        memset(p + blk_size - pm_bitmask_size, 0xF0,
+                                size - blk_size - pm_bitmask_size);
+                    memset(pool.pagetable + pagenum + psz, B_PAGEPLUS,
+                            newsz - psz);
                     auto new_blk_size = (PAGESIZE * newsz);
                     auto new_blk_size = (PAGESIZE * newsz);
-                    // update the size cache, assuming that is very likely the
-                    // size of this block will be queried in the near future
+                    gc.free_mem -= new_blk_size - blk_size;
+                    // update the size cache, assuming that is very
+                    // likely the size of this block will be queried in
+                    // the near future
                     pool.update_cache(p, new_blk_size);
                     if (has_pm) {
                     pool.update_cache(p, new_blk_size);
                     if (has_pm) {
-                        auto end_of_blk = cast(size_t**)(blk_base_addr +
-                                new_blk_size - pm_bitmask_size);
+                        auto end_of_blk = cast(size_t**)(
+                                blk_base_addr + new_blk_size - pm_bitmask_size);
                         *end_of_blk = pm_bitmask;
                     }
                     return p;
                 }
                         *end_of_blk = pm_bitmask;
                     }
                     return p;
                 }
-                else if (pagenum + newsz <= pool.npages)
-                {
-                    // Attempt to expand in place
-                    for (size_t i = pagenum + psz; 1;)
-                    {
-                        if (i == pagenum + newsz)
-                        {
-                            if (opts.options.mem_stomp)
-                                memset(p + blk_size - pm_bitmask_size,
-                                        0xF0, size - blk_size
-                                        - pm_bitmask_size);
-                            memset(pool.pagetable + pagenum +
-                                    psz, B_PAGEPLUS, newsz - psz);
-                            auto new_blk_size = (PAGESIZE * newsz);
-                            // update the size cache, assuming that is very
-                            // likely the size of this block will be queried in
-                            // the near future
-                            pool.update_cache(p, new_blk_size);
-                            if (has_pm) {
-                                auto end_of_blk = cast(size_t**)(
-                                        blk_base_addr + new_blk_size -
-                                        pm_bitmask_size);
-                                *end_of_blk = pm_bitmask;
-                            }
-                            return p;
-                        }
-                        if (i == pool.npages)
-                        {
-                            break;
-                        }
-                        if (pool.pagetable[i] != B_FREE)
-                            break;
-                        i++;
-                    }
-                }
-            }
-            // if new size is bigger or less than half
-            if (blk_size < size || blk_size > size * 2)
-            {
-                size -= pm_bitmask_size;
-                blk_size -= pm_bitmask_size;
-                void* p2 = malloc(size, attrs, pm_bitmask);
-                if (blk_size < size)
-                    size = blk_size;
-                cstring.memcpy(p2, p, size);
-                p = p2;
+                if (i == pool.npages)
+                    break;
+                if (pool.pagetable[i] != B_FREE)
+                    break;
+                i++;
             }
         }
     }
             }
         }
     }
+
+    // if new size is bigger or less than half
+    if (blk_size < size || blk_size > size * 2) {
+        size -= pm_bitmask_size;
+        blk_size -= pm_bitmask_size;
+        void* p2 = malloc(size, attrs, pm_bitmask);
+        if (blk_size < size)
+            size = blk_size;
+        cstring.memcpy(p2, p, size);
+        p = p2;
+    }
+
     return p;
 }
 
     return p;
 }
 
@@ -1616,6 +1663,7 @@ body
     memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
     gc.p_cache = null;
     gc.size_cache = 0;
     memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
     gc.p_cache = null;
     gc.size_cache = 0;
+    gc.free_mem -= new_size - blk_size;
     // update the size cache, assuming that is very likely the size of this
     // block will be queried in the near future
     pool.update_cache(p, new_size);
     // update the size cache, assuming that is very likely the size of this
     // block will be queried in the near future
     pool.update_cache(p, new_size);
@@ -1662,9 +1710,11 @@ private void free(void *p)
         pool.freebits.set_group(bit_i, PAGESIZE / 16);
         while (++n < pool.npages && pool.pagetable[n] == B_PAGEPLUS)
             npages++;
         pool.freebits.set_group(bit_i, PAGESIZE / 16);
         while (++n < pool.npages && pool.pagetable[n] == B_PAGEPLUS)
             npages++;
+        size_t size = npages * PAGESIZE;
         if (opts.options.mem_stomp)
         if (opts.options.mem_stomp)
-            memset(p, 0xF2, npages * PAGESIZE);
+            memset(p, 0xF2, size);
         pool.freePages(pagenum, npages);
         pool.freePages(pagenum, npages);
+        gc.free_mem += size;
         // just in case we were caching this pointer
         pool.clear_cache(p);
     }
         // just in case we were caching this pointer
         pool.clear_cache(p);
     }
@@ -1680,7 +1730,11 @@ private void free(void *p)
         list.pool = pool;
         gc.free_list[bin] = list;
         pool.freebits.set(bit_i);
         list.pool = pool;
         gc.free_list[bin] = list;
         pool.freebits.set(bit_i);
+        gc.free_mem += binsize[bin];
     }
     }
+    double percent_free = gc.free_mem * 100.0 / gc.total_mem;
+    if (percent_free > opts.options.min_free)
+        minimize(false);
 }
 
 
 }
 
 
@@ -1993,7 +2047,7 @@ struct Pool
         freebits.set_all();
 
         // avoid accidental sweeping of new pools while using eager allocation
         freebits.set_all();
 
         // avoid accidental sweeping of new pools while using eager allocation
-        if (gc.mark_proc_pid)
+        if (collect_in_progress())
             mark.set_all();
 
         pagetable = cast(ubyte*) cstdlib.malloc(npages);
             mark.set_all();
 
         pagetable = cast(ubyte*) cstdlib.malloc(npages);