]> git.llucax.com Git - software/dgc/cdgc.git/blobdiff - rt/gc/cdgc/gc.d
Change the min_free default to 5%
[software/dgc/cdgc.git] / rt / gc / cdgc / gc.d
index 5805ea5045e02d2fb3cd14979be3c42ba82947b4..2ec72c4b9ac273a046ca96eef200e5dce60fa5a7 100644 (file)
@@ -216,6 +216,11 @@ struct GC
     /// max(pool.topAddr)
     byte *max_addr;
 
+    /// Total heap memory
+    size_t total_mem;
+    /// Free heap memory
+    size_t free_mem;
+
     /// Free list for each size
     List*[B_MAX] free_list;
 
@@ -237,10 +242,19 @@ private T locked(T, alias Code)()
 
 private GC* gc;
 
+
+bool collect_in_progress()
+{
+    return gc.mark_proc_pid != 0;
+}
+
+
 bool Invariant()
 {
     assert (gc !is null);
     if (gc.inited) {
+        size_t total_mem = 0;
+        size_t free_mem = 0;
         for (size_t i = 0; i < gc.pools.length; i++) {
             Pool* pool = gc.pools[i];
             pool.Invariant();
@@ -250,6 +264,10 @@ bool Invariant()
                 assert(*pool < *gc.pools[i + 1]);
             else if (i + 1 == gc.pools.length)
                 assert(gc.max_addr == pool.topAddr);
+            total_mem += pool.npages * PAGESIZE;
+            for (size_t pn = 0; pn < pool.npages; ++pn)
+                if (pool.pagetable[pn] == B_FREE)
+                    free_mem += PAGESIZE;
         }
 
         gc.roots.Invariant();
@@ -269,8 +287,11 @@ bool Invariant()
                 assert (p >= pool.baseAddr);
                 assert (p < pool.topAddr);
                 assert (pool.freebits.test((p - pool.baseAddr) / 16));
+                free_mem += binsize[i];
             }
         }
+        assert (gc.total_mem == total_mem);
+        assert (gc.free_mem == free_mem);
     }
     return true;
 }
@@ -408,21 +429,23 @@ size_t reserve(size_t size)
 
 /**
  * Minimizes physical memory usage by returning free pools to the OS.
+ *
+ * If full is false, keep some pools alive if the resulting free memory would
+ * be too small.
  */
-void minimize()
+void minimize(bool full = true)
 {
-    // Disabled if a parallel collection is in progress because the shared mark
-    // bits of the freed pool might be used by the mark process
-    if (gc.mark_proc_pid != 0)
+    // The shared mark bits of the freed pool might be used by the mark process
+    if (collect_in_progress())
         return;
 
-    size_t n;
-    size_t pn;
-    Pool* pool;
+    if (gc.pools.length == 0)
+        return;
 
-    for (n = 0; n < gc.pools.length; n++)
+    for (size_t n = 0; n < gc.pools.length; n++)
     {
-        pool = gc.pools[n];
+        Pool* pool = gc.pools[n];
+        size_t pn;
         for (pn = 0; pn < pool.npages; pn++)
         {
             if (cast(Bins)pool.pagetable[pn] != B_FREE)
@@ -430,6 +453,16 @@ void minimize()
         }
         if (pn < pool.npages)
             continue;
+        // Free pool
+        size_t pool_size = pool.npages * PAGESIZE;
+        if (!full) {
+            double percent_free = (gc.free_mem - pool_size) * 100.0 /
+                    (gc.total_mem - pool_size);
+            if (percent_free < opts.options.min_free)
+                continue; // not enough free, don't remove this pool
+        }
+        gc.total_mem -= pool_size;
+        gc.free_mem -= pool_size;
         pool.Dtor();
         cstdlib.free(pool);
         gc.pools.remove_at(n);
@@ -444,89 +477,50 @@ void minimize()
  * Allocate a chunk of memory that is larger than a page.
  * Return null if out of memory.
  */
-void* bigAlloc(size_t size, out Pool* pool)
+void* bigAlloc(size_t npages, out Pool* pool, size_t* pn, bool* collected)
 {
-    size_t npages;
-    size_t n;
-    size_t pn;
-    size_t freedpages;
-    void*  p;
-    int    state;
+    *collected = false;
+    // This code could use some refinement when repeatedly
+    // allocating very large arrays.
 
-    npages = round_up(size, PAGESIZE);
-
-    for (state = 0; ; )
+    void* find_block()
     {
-        // This code could use some refinement when repeatedly
-        // allocating very large arrays.
-
-        for (n = 0; n < gc.pools.length; n++)
+        for (size_t n = 0; n < gc.pools.length; n++)
         {
             pool = gc.pools[n];
-            pn = pool.allocPages(npages);
-            if (pn != OPFAIL)
-                goto L1;
+            *pn = pool.allocPages(npages);
+            if (*pn != OPFAIL)
+                return pool.baseAddr + *pn * PAGESIZE;
         }
+        return null;
+    }
 
-        // Failed
-        switch (state)
-        {
-        case 0:
-            if (gc.disabled)
-            {
-                state = 1;
-                continue;
-            }
-            // Try collecting
-            freedpages = fullcollectshell();
-            if (freedpages >= gc.pools.length * ((POOLSIZE / PAGESIZE) / 4))
-            {
-                state = 1;
-                continue;
-            }
-            // Release empty pools to prevent bloat
-            minimize();
-            // Allocate new pool
-            pool = newPool(npages);
-            if (!pool)
-            {
-                state = 2;
-                continue;
-            }
-            pn = pool.allocPages(npages);
-            assert(pn != OPFAIL);
-            goto L1;
-        case 1:
-            // Release empty pools to prevent bloat
-            minimize();
-            // Allocate new pool
-            pool = newPool(npages);
-            if (!pool)
-                goto Lnomemory;
-            pn = pool.allocPages(npages);
-            assert(pn != OPFAIL);
-            goto L1;
-        case 2:
-            goto Lnomemory;
-        default:
-            assert(false);
-        }
+    void* alloc_more()
+    {
+        // Allocate new pool
+        pool = newPool(npages);
+        if (!pool)
+            return null; // let malloc handle the error
+        *pn = pool.allocPages(npages);
+        assert(*pn != OPFAIL);
+        return pool.baseAddr + *pn * PAGESIZE;
     }
 
-  L1:
-    size_t bit_i = pn * (PAGESIZE / 16);
-    pool.freebits.clear(bit_i);
-    pool.pagetable[pn] = B_PAGE;
-    if (npages > 1)
-        memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
-    p = pool.baseAddr + pn * PAGESIZE;
-    memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
-    if (opts.options.mem_stomp)
-        memset(p, 0xF1, size);
-    return p;
+    if (void* p = find_block())
+        return p;
+
+    if (gc.disabled)
+        return alloc_more();
 
-  Lnomemory:
-    return null; // let mallocNoSync handle the error
+    // Try collecting
+    size_t freedpages = fullcollectshell();
+    *collected = true;
+    if (freedpages >= npages) {
+        if (void* p = find_block())
+            return p;
+    }
+
+    return alloc_more();
 }
 
 
@@ -577,6 +571,9 @@ Pool *newPool(size_t npages)
     assert (inserted_pool is pool);
     gc.min_addr = gc.pools[0].baseAddr;
     gc.max_addr = gc.pools[gc.pools.length - 1].topAddr;
+    size_t pool_size = pool.topAddr - pool.baseAddr;
+    gc.total_mem += pool_size;
+    gc.free_mem += pool_size;
     return pool;
 }
 
@@ -808,7 +805,7 @@ size_t fullcollect(void *stackTop)
     // memory is freed (if that not the case, the caller will allocate more
     // memory and the next time it's exhausted it will run a new collection).
     if (opts.options.eager_alloc) {
-        if (gc.mark_proc_pid != 0) { // there is a mark process in progress
+        if (collect_in_progress()) {
             os.WRes r = os.wait_pid(gc.mark_proc_pid, false); // don't block
             assert (r != os.WRes.ERROR);
             switch (r) {
@@ -1021,6 +1018,7 @@ size_t sweep()
     debug(COLLECT_PRINTF) printf("\tsweep\n");
     gc.p_cache = null;
     gc.size_cache = 0;
+    gc.free_mem = 0; // will be recalculated
     size_t freedpages = 0;
     size_t freed = 0;
     for (size_t n = 0; n < gc.pools.length; n++)
@@ -1110,6 +1108,7 @@ version(none) // BUG: doesn't work because freebits() must also be cleared
                     pool.pagetable[pn] = B_FREE;
                     pool.freebits.set_group(bit_i, PAGESIZE / 16);
                     freedpages++;
+                    gc.free_mem += PAGESIZE;
                     if (opts.options.mem_stomp)
                         memset(p, 0xF3, PAGESIZE);
                     while (pn + 1 < pool.npages && pool.pagetable[pn + 1] == B_PAGEPLUS)
@@ -1119,6 +1118,7 @@ version(none) // BUG: doesn't work because freebits() must also be cleared
                         bit_i += bit_stride;
                         pool.freebits.set_group(bit_i, PAGESIZE / 16);
                         freedpages++;
+                        gc.free_mem += PAGESIZE;
 
                         if (opts.options.mem_stomp)
                         {
@@ -1128,6 +1128,9 @@ version(none) // BUG: doesn't work because freebits() must also be cleared
                     }
                 }
             }
+            else if (bin == B_FREE) {
+                gc.free_mem += PAGESIZE;
+            }
         }
     }
 
@@ -1163,6 +1166,7 @@ version(none) // BUG: doesn't work because freebits() must also be cleared
                 pool.pagetable[pn] = B_FREE;
                 pool.freebits.set_group(bit_base, PAGESIZE / 16);
                 recoveredpages++;
+                gc.free_mem += PAGESIZE;
                 continue;
 
              Lnotfree:
@@ -1181,6 +1185,7 @@ version(none) // BUG: doesn't work because freebits() must also be cleared
                         if (list.pool != pool)
                             list.pool = pool;
                         gc.free_list[bin] = list;
+                        gc.free_mem += binsize[bin];
                     }
                 }
             }
@@ -1329,6 +1334,7 @@ private void *malloc(size_t size, uint attrs, size_t* pm_bitmask)
     Pool* pool = void;
     size_t bit_i = void;
     size_t capacity = void; // to figure out where to store the bitmask
+    bool collected = false;
     if (bin < B_PAGE)
     {
         p = gc.free_list[bin];
@@ -1351,6 +1357,7 @@ private void *malloc(size_t size, uint attrs, size_t* pm_bitmask)
                 {
                     //newPool(1);
                 }
+                collected = true;
             }
             if (!gc.free_list[bin] && !allocPage(bin))
             {
@@ -1380,13 +1387,24 @@ private void *malloc(size_t size, uint attrs, size_t* pm_bitmask)
     }
     else
     {
-        p = bigAlloc(size, pool);
+        size_t pn;
+        size_t npages = round_up(size, PAGESIZE);
+        p = bigAlloc(npages, pool, &pn, &collected);
         if (!p)
             onOutOfMemoryError();
         assert (pool !is null);
-        size_t npages = round_up(size, PAGESIZE);
+
         capacity = npages * PAGESIZE;
-        bit_i = (p - pool.baseAddr) / 16;
+        bit_i = pn * (PAGESIZE / 16);
+        pool.freebits.clear(bit_i);
+        pool.pagetable[pn] = B_PAGE;
+        if (npages > 1)
+            memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
+        p = pool.baseAddr + pn * PAGESIZE;
+        memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
+        if (opts.options.mem_stomp)
+            memset(p, 0xF1, size);
+
     }
 
     // Store the bit mask AFTER SENTINEL_POST
@@ -1408,6 +1426,21 @@ private void *malloc(size_t size, uint attrs, size_t* pm_bitmask)
         assert (bin >= B_PAGE || !pool.freebits.test(bit_i));
     }
 
+    gc.free_mem -= capacity;
+    if (collected) {
+        // If there is not enough free memory, allocate a new pool big enough
+        // to have at least the min_free% of the total heap free. If there is
+        // too much free memory, try to free some empty pools.
+        double percent_free = gc.free_mem * 100.0 / gc.total_mem;
+        if (percent_free < opts.options.min_free) {
+            auto pool_size = gc.total_mem * 1.0 / opts.options.min_free
+                    - gc.free_mem;
+            newPool(round_up(cast(size_t)pool_size, PAGESIZE));
+        }
+        else
+            minimize(false);
+    }
+
     return p;
 }
 
@@ -1431,139 +1464,125 @@ private void *calloc(size_t size, uint attrs, size_t* pm_bitmask)
 private void *realloc(void *p, size_t size, uint attrs,
         size_t* pm_bitmask)
 {
-    if (!size)
-    {
+    if (!size) {
         if (p)
-        {
             free(p);
-            p = null;
-        }
+        return null;
     }
-    else if (!p)
-    {
-        p = malloc(size, attrs, pm_bitmask);
+
+    if (p is null)
+        return malloc(size, attrs, pm_bitmask);
+
+    Pool* pool = findPool(p);
+    if (pool is null)
+        return null;
+
+    // Set or retrieve attributes as appropriate
+    auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
+    if (attrs) {
+        clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
+        setAttr(pool, bit_i, attrs);
     }
     else
-    {
-        Pool* pool = findPool(p);
-        if (pool is null)
-            return null;
+        attrs = getAttr(pool, bit_i);
 
-        // Set or retrieve attributes as appropriate
-        auto bit_i = cast(size_t)(p - pool.baseAddr) / 16;
-        if (attrs) {
-            clrAttr(pool, bit_i, BlkAttr.ALL_BITS);
-            setAttr(pool, bit_i, attrs);
-        }
-        else
-            attrs = getAttr(pool, bit_i);
-
-        void* blk_base_addr = pool.findBase(p);
-        size_t blk_size = pool.findSize(p);
-        bool has_pm = has_pointermap(attrs);
-        size_t pm_bitmask_size = 0;
-        if (has_pm) {
-            pm_bitmask_size = size_t.sizeof;
-            // Retrieve pointer map bit mask if appropriate
-            if (pm_bitmask is null) {
-                auto end_of_blk = cast(size_t**)(blk_base_addr +
-                        blk_size - size_t.sizeof);
-                pm_bitmask = *end_of_blk;
-            }
+    void* blk_base_addr = pool.findBase(p);
+    size_t blk_size = pool.findSize(p);
+    bool has_pm = has_pointermap(attrs);
+    size_t pm_bitmask_size = 0;
+    if (has_pm) {
+        pm_bitmask_size = size_t.sizeof;
+        // Retrieve pointer map bit mask if appropriate
+        if (pm_bitmask is null) {
+            auto end_of_blk = cast(size_t**)(
+                    blk_base_addr + blk_size - size_t.sizeof);
+            pm_bitmask = *end_of_blk;
         }
+    }
 
-        if (opts.options.sentinel)
-        {
-            sentinel_Invariant(p);
-            size_t sentinel_stored_size = *sentinel_size(p);
-            if (sentinel_stored_size != size)
-            {
-                void* p2 = malloc(size, attrs, pm_bitmask);
-                if (sentinel_stored_size < size)
-                    size = sentinel_stored_size;
-                cstring.memcpy(p2, p, size);
-                p = p2;
+    if (opts.options.sentinel) {
+        sentinel_Invariant(p);
+        size_t sentinel_stored_size = *sentinel_size(p);
+        if (sentinel_stored_size != size) {
+            void* p2 = malloc(size, attrs, pm_bitmask);
+            if (sentinel_stored_size < size)
+                size = sentinel_stored_size;
+            cstring.memcpy(p2, p, size);
+            p = p2;
+        }
+        return p;
+    }
+
+    size += pm_bitmask_size;
+    if (blk_size >= PAGESIZE && size >= PAGESIZE) {
+        auto psz = blk_size / PAGESIZE;
+        auto newsz = round_up(size, PAGESIZE);
+        if (newsz == psz)
+            return p;
+
+        auto pagenum = (p - pool.baseAddr) / PAGESIZE;
+
+        if (newsz < psz) {
+            // Shrink in place
+            if (opts.options.mem_stomp)
+                memset(p + size - pm_bitmask_size, 0xF2,
+                        blk_size - size - pm_bitmask_size);
+            pool.freePages(pagenum + newsz, psz - newsz);
+            auto new_blk_size = (PAGESIZE * newsz);
+            gc.free_mem += blk_size - new_blk_size;
+            // update the size cache, assuming that is very likely the
+            // size of this block will be queried in the near future
+            pool.update_cache(p, new_blk_size);
+            if (has_pm) {
+                auto end_of_blk = cast(size_t**)(blk_base_addr +
+                        new_blk_size - pm_bitmask_size);
+                *end_of_blk = pm_bitmask;
             }
+            return p;
         }
-        else
-        {
-            size += pm_bitmask_size;
-            if (blk_size >= PAGESIZE && size >= PAGESIZE)
-            {
-                auto psz = blk_size / PAGESIZE;
-                auto newsz = round_up(size, PAGESIZE);
-                if (newsz == psz)
-                    return p;
-
-                auto pagenum = (p - pool.baseAddr) / PAGESIZE;
 
-                if (newsz < psz)
-                {
-                    // Shrink in place
+        if (pagenum + newsz <= pool.npages) {
+            // Attempt to expand in place
+            for (size_t i = pagenum + psz; 1;) {
+                if (i == pagenum + newsz) {
                     if (opts.options.mem_stomp)
-                        memset(p + size - pm_bitmask_size, 0xF2,
-                                blk_size - size - pm_bitmask_size);
-                    pool.freePages(pagenum + newsz, psz - newsz);
+                        memset(p + blk_size - pm_bitmask_size, 0xF0,
+                                size - blk_size - pm_bitmask_size);
+                    memset(pool.pagetable + pagenum + psz, B_PAGEPLUS,
+                            newsz - psz);
                     auto new_blk_size = (PAGESIZE * newsz);
-                    // update the size cache, assuming that is very likely the
-                    // size of this block will be queried in the near future
+                    gc.free_mem -= new_blk_size - blk_size;
+                    // update the size cache, assuming that is very
+                    // likely the size of this block will be queried in
+                    // the near future
                     pool.update_cache(p, new_blk_size);
                     if (has_pm) {
-                        auto end_of_blk = cast(size_t**)(blk_base_addr +
-                                new_blk_size - pm_bitmask_size);
+                        auto end_of_blk = cast(size_t**)(
+                                blk_base_addr + new_blk_size - pm_bitmask_size);
                         *end_of_blk = pm_bitmask;
                     }
                     return p;
                 }
-                else if (pagenum + newsz <= pool.npages)
-                {
-                    // Attempt to expand in place
-                    for (size_t i = pagenum + psz; 1;)
-                    {
-                        if (i == pagenum + newsz)
-                        {
-                            if (opts.options.mem_stomp)
-                                memset(p + blk_size - pm_bitmask_size,
-                                        0xF0, size - blk_size
-                                        - pm_bitmask_size);
-                            memset(pool.pagetable + pagenum +
-                                    psz, B_PAGEPLUS, newsz - psz);
-                            auto new_blk_size = (PAGESIZE * newsz);
-                            // update the size cache, assuming that is very
-                            // likely the size of this block will be queried in
-                            // the near future
-                            pool.update_cache(p, new_blk_size);
-                            if (has_pm) {
-                                auto end_of_blk = cast(size_t**)(
-                                        blk_base_addr + new_blk_size -
-                                        pm_bitmask_size);
-                                *end_of_blk = pm_bitmask;
-                            }
-                            return p;
-                        }
-                        if (i == pool.npages)
-                        {
-                            break;
-                        }
-                        if (pool.pagetable[i] != B_FREE)
-                            break;
-                        i++;
-                    }
-                }
-            }
-            // if new size is bigger or less than half
-            if (blk_size < size || blk_size > size * 2)
-            {
-                size -= pm_bitmask_size;
-                blk_size -= pm_bitmask_size;
-                void* p2 = malloc(size, attrs, pm_bitmask);
-                if (blk_size < size)
-                    size = blk_size;
-                cstring.memcpy(p2, p, size);
-                p = p2;
+                if (i == pool.npages)
+                    break;
+                if (pool.pagetable[i] != B_FREE)
+                    break;
+                i++;
             }
         }
     }
+
+    // if new size is bigger or less than half
+    if (blk_size < size || blk_size > size * 2) {
+        size -= pm_bitmask_size;
+        blk_size -= pm_bitmask_size;
+        void* p2 = malloc(size, attrs, pm_bitmask);
+        if (blk_size < size)
+            size = blk_size;
+        cstring.memcpy(p2, p, size);
+        p = p2;
+    }
+
     return p;
 }
 
@@ -1644,6 +1663,7 @@ body
     memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
     gc.p_cache = null;
     gc.size_cache = 0;
+    gc.free_mem -= new_size - blk_size;
     // update the size cache, assuming that is very likely the size of this
     // block will be queried in the near future
     pool.update_cache(p, new_size);
@@ -1690,9 +1710,11 @@ private void free(void *p)
         pool.freebits.set_group(bit_i, PAGESIZE / 16);
         while (++n < pool.npages && pool.pagetable[n] == B_PAGEPLUS)
             npages++;
+        size_t size = npages * PAGESIZE;
         if (opts.options.mem_stomp)
-            memset(p, 0xF2, npages * PAGESIZE);
+            memset(p, 0xF2, size);
         pool.freePages(pagenum, npages);
+        gc.free_mem += size;
         // just in case we were caching this pointer
         pool.clear_cache(p);
     }
@@ -1708,7 +1730,11 @@ private void free(void *p)
         list.pool = pool;
         gc.free_list[bin] = list;
         pool.freebits.set(bit_i);
+        gc.free_mem += binsize[bin];
     }
+    double percent_free = gc.free_mem * 100.0 / gc.total_mem;
+    if (percent_free > opts.options.min_free)
+        minimize(false);
 }
 
 
@@ -2021,7 +2047,7 @@ struct Pool
         freebits.set_all();
 
         // avoid accidental sweeping of new pools while using eager allocation
-        if (gc.mark_proc_pid)
+        if (collect_in_progress())
             mark.set_all();
 
         pagetable = cast(ubyte*) cstdlib.malloc(npages);