[pypy-commit] stmgc gc-small-uniform: Tests pass so far

arigo noreply at buildbot.pypy.org
Sat Apr 5 22:18:13 CEST 2014


Author: Armin Rigo <arigo at tunes.org>
Branch: gc-small-uniform
Changeset: r1132:2943d03c84e8
Date: 2014-04-05 22:18 +0200
http://bitbucket.org/pypy/stmgc/changeset/2943d03c84e8/

Log:	Tests pass so far

diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c
--- a/c7/stm/gcpage.c
+++ b/c7/stm/gcpage.c
@@ -64,12 +64,7 @@
 object_t *_stm_allocate_old(ssize_t size_rounded_up)
 {
     /* only for tests xxx but stm_setup_prebuilt() uses this now too */
-    char *p;
-    if (size_rounded_up > GC_LAST_SMALL_SIZE)
-        p = allocate_outside_nursery_large(size_rounded_up);
-    else
-        p = allocate_outside_nursery_small(size_rounded_up);
-
+    char *p = allocate_outside_nursery_large(size_rounded_up);
     memset(p, 0, size_rounded_up);
 
     object_t *o = (object_t *)(p - stm_object_pages);
diff --git a/c7/stm/smallmalloc.c b/c7/stm/smallmalloc.c
--- a/c7/stm/smallmalloc.c
+++ b/c7/stm/smallmalloc.c
@@ -19,18 +19,27 @@
    technically full yet, it will be very soon in this case).
 */
 
-static fpsz_t *get_fp_sz(char *smallpage)
+static fpsz_t *get_fpsz(char *smallpage)
 {
     uintptr_t pagenum = (((char *)smallpage) - stm_object_pages) / 4096;
+    assert(PAGE_SMSIZE_START <= pagenum && pagenum < PAGE_SMSIZE_END);
     return &full_pages_object_size[pagenum - PAGE_SMSIZE_START];
 }
 
 
+#ifdef STM_TESTS
+bool (*_stm_smallmalloc_keep)(char *data);   /* a hook for tests */
+#endif
+
 static void teardown_smallmalloc(void)
 {
     memset(small_page_lists, 0, sizeof(small_page_lists));
     assert(free_uniform_pages == NULL);   /* done by the previous line */
     first_small_uniform_loc = (uintptr_t) -1;
+#ifdef STM_TESTS
+    _stm_smallmalloc_keep = NULL;
+#endif
+    memset(full_pages_object_size, 0, sizeof(full_pages_object_size));
 }
 
 static void grab_more_free_pages_for_small_allocations(void)
@@ -59,8 +68,8 @@
         char *p = uninitialized_page_stop;
         long i;
         for (i = 0; i < GCPAGE_NUM_PAGES; i++) {
-            ((struct small_page_list_s *)p)->nextpage = free_uniform_pages;
-            free_uniform_pages = (struct small_page_list_s *)p;
+            ((struct small_free_loc_s *)p)->nextpage = free_uniform_pages;
+            free_uniform_pages = (struct small_free_loc_s *)p;
             p += 4096;
         }
     }
@@ -75,7 +84,7 @@
 static char *_allocate_small_slowpath(uint64_t size)
 {
     long n = size / 8;
-    struct small_page_list_s *smallpage;
+    struct small_free_loc_s *smallpage;
     struct small_free_loc_s *TLPREFIX *fl =
         &STM_PSEGMENT->small_malloc_data.loc_free[n];
     assert(*fl == NULL);
@@ -91,8 +100,8 @@
             goto retry;
 
         /* Succeeded: we have a page in 'smallpage' */
-        *fl = smallpage->header.next;
-        get_fp_sz((char *)smallpage)->sz = n;
+        *fl = smallpage->next;
+        get_fpsz((char *)smallpage)->sz = n;
         return (char *)smallpage;
     }
 
@@ -110,22 +119,24 @@
            initialized so far, apart from the 'nextpage' field read
            above.  Initialize it.
         */
+        struct small_free_loc_s *p, **previous;
         assert(!(((uintptr_t)smallpage) & 4095));
-        struct small_free_loc_s *p, *following = NULL;
+        previous = (struct small_free_loc_s **)
+            REAL_ADDRESS(STM_SEGMENT->segment_base, fl);
 
         /* Initialize all slots from the second one to the last one to
            contain a chained list */
         uintptr_t i = size;
         while (i <= 4096 - size) {
             p = (struct small_free_loc_s *)(((char *)smallpage) + i);
-            p->next = following;
-            following = p;
+            *previous = p;
+            previous = &p->next;
             i += size;
         }
+        *previous = NULL;
 
         /* The first slot is immediately returned */
-        *fl = following;
-        get_fp_sz((char *)smallpage)->sz = n;
+        get_fpsz((char *)smallpage)->sz = n;
         return (char *)smallpage;
     }
 
@@ -153,22 +164,97 @@
     return (char *)result;
 }
 
+object_t *_stm_allocate_old_small(ssize_t size_rounded_up)
+{
+    char *p = allocate_outside_nursery_small(size_rounded_up);
+    return (object_t *)(p - stm_object_pages);
+}
+
+/************************************************************/
+
+static inline bool _smallmalloc_sweep_keep(char *p)
+{
+#ifdef STM_TESTS
+    if (_stm_smallmalloc_keep != NULL)
+        return _stm_smallmalloc_keep(p);
+#endif
+    abort();
+    //return smallmalloc_keep_object_at(p);
+}
+
+void check_order_inside_small_page(struct small_free_loc_s *page)
+{
+#ifndef NDEBUG
+    /* the free locations are supposed to be in increasing order */
+    while (page->next != NULL) {
+        assert(page->next > page);
+        page = page->next;
+    }
+#endif
+}
+
 void sweep_small_page_full(char *page, long szword)
 {
     abort();
 }
 
-void sweep_small_page_partial(struct small_free_loc_s *free_loc, long szword)
+void sweep_small_page_partial(struct small_free_loc_s *page, long szword)
 {
-    abort();
+    check_order_inside_small_page(page);
+
+    /* for every non-free location, ask if we must free it */
+    char *baseptr = (char *)(((uintptr_t)page) & ~4095);
+    uintptr_t i, size = szword * 8;
+    bool any_object_remaining = false;
+    struct small_free_loc_s *fl = page;
+    struct small_free_loc_s *flprev = NULL;
+
+    /* XXX could optimize for the case where all objects die: we don't
+       need to painfully rebuild the free list in the whole page, just
+       to have it ignored in the end because we put the page into
+       'free_uniform_pages' */
+
+    for (i = 0; i <= 4096 - size; i += size) {
+        char *p = baseptr + i;
+        if (p == (char *)fl) {
+            /* location is already free */
+            flprev = fl;
+            fl = fl->next;
+        }
+        else if (_smallmalloc_sweep_keep(p)) {
+            /* the location should be freed now */
+            if (flprev == NULL) {
+                flprev = (struct small_free_loc_s *)p;
+                flprev->next = fl;
+                page = flprev;
+            }
+            else {
+                assert(flprev->next == fl);
+                flprev->next = (struct small_free_loc_s *)p;
+                flprev->next->next = fl;
+            }
+        }
+        else {
+            any_object_remaining = true;
+        }
+    }
+    if (any_object_remaining) {
+        check_order_inside_small_page(page);
+        page->nextpage = small_page_lists[szword];
+        small_page_lists[szword] = page;
+    }
+    else {
+        ((struct small_free_loc_s *)baseptr)->nextpage = free_uniform_pages;
+        free_uniform_pages = (struct small_free_loc_s *)baseptr;
+    }
 }
 
 void _stm_smallmalloc_sweep(void)
 {
     long i, szword;
     for (szword = 2; szword < GC_N_SMALL_REQUESTS; szword++) {
-        struct small_page_list_s *page = small_page_lists[szword];
-        struct small_page_list_s *nextpage;
+        struct small_free_loc_s *page = small_page_lists[szword];
+        struct small_free_loc_s *nextpage;
         small_page_lists[szword] = NULL;
 
         /* process the pages that the various segments are busy filling */
@@ -179,7 +265,7 @@
             if (*fl != NULL) {
                 /* the entry in full_pages_object_size[] should already be
                    szword.  We reset it to 0. */
-                fpsz_t *fpsz = get_fp_sz((char *)*fl);
+                fpsz_t *fpsz = get_fpsz((char *)*fl);
                 assert(fpsz->sz == szword);
                 fpsz->sz = 0;
                 sweep_small_page_partial(*fl, szword);
@@ -191,15 +277,17 @@
         while (page != NULL) {
             /* for every page in small_page_lists: assert that the
                corresponding full_pages_object_size[] entry is 0 */
-            assert(get_fp_sz((char *)page)->sz == 0);
+            assert(get_fpsz((char *)page)->sz == 0);
             nextpage = page->nextpage;
-            sweep_small_page_partial(&page->header, szword);
+            sweep_small_page_partial(page, szword);
             page = nextpage;
         }
     }
 
+    /* process the really full pages, which are the ones which still
+       have a non-zero full_pages_object_size[] entry */
     char *pageptr = uninitialized_page_stop;
-    fpsz_t *fpsz_start = get_fp_sz(pageptr);
+    fpsz_t *fpsz_start = get_fpsz(pageptr);
     fpsz_t *fpsz_end = &full_pages_object_size[PAGE_SMSIZE_END -
                                                PAGE_SMSIZE_START];
     fpsz_t *fpsz;
diff --git a/c7/stm/smallmalloc.h b/c7/stm/smallmalloc.h
--- a/c7/stm/smallmalloc.h
+++ b/c7/stm/smallmalloc.h
@@ -12,18 +12,16 @@
 
 
 struct small_free_loc_s {
-    struct small_free_loc_s *next;
-};
-
-struct small_page_list_s {
     /* A chained list of locations within the same page which are
        free. */
-    struct small_free_loc_s header;
+    struct small_free_loc_s *next;
 
     /* A chained list of all small pages containing objects of a given
        small size, and that have at least one free object.  It points
-       *inside* the next page, to another struct small_page_list_s. */
-    struct small_page_list_s *nextpage;
+       *inside* the next page, to another struct small_free_loc_s.  This
+       field is only meaningful on the first small_free_loc_s of a given
+       page! */
+    struct small_free_loc_s *nextpage;
 
     /* This structure is only two words, so it always fits inside one
        free slot inside the page. */
@@ -36,7 +34,7 @@
    is a chained list of fully-free pages (which can be reused for a
    different size than the one they originally contained).
 */
-static struct small_page_list_s *small_page_lists[GC_N_SMALL_REQUESTS];
+static struct small_free_loc_s *small_page_lists[GC_N_SMALL_REQUESTS];
 
 #define free_uniform_pages   (small_page_lists[0])
 
diff --git a/c7/stmgc.h b/c7/stmgc.h
--- a/c7/stmgc.h
+++ b/c7/stmgc.h
@@ -125,6 +125,7 @@
 void _stm_large_dump(void);
 bool (*_stm_largemalloc_keep)(char *data);
 void _stm_largemalloc_sweep(void);
+object_t *_stm_allocate_old_small(ssize_t size_rounded_up);
 bool (*_stm_smallmalloc_keep)(char *data);
 void _stm_smallmalloc_sweep(void);
 void _stm_start_safe_point(void);
diff --git a/c7/test/support.py b/c7/test/support.py
--- a/c7/test/support.py
+++ b/c7/test/support.py
@@ -81,6 +81,7 @@
 void *memset(void *s, int c, size_t n);
 bool (*_stm_largemalloc_keep)(char *data);
 void _stm_largemalloc_sweep(void);
+object_t *_stm_allocate_old_small(ssize_t size_rounded_up);
 bool (*_stm_smallmalloc_keep)(char *data);
 void _stm_smallmalloc_sweep(void);
 
@@ -317,6 +318,12 @@
     lib._set_type_id(o, tid)
     return o
 
+def stm_allocate_old_small(size):
+    o = lib._stm_allocate_old_small(size)
+    tid = 42 + size
+    lib._set_type_id(o, tid)
+    return o
+
 def stm_allocate(size):
     o = lib.stm_allocate(size)
     tid = 42 + size
diff --git a/c7/test/test_smallmalloc.py b/c7/test/test_smallmalloc.py
--- a/c7/test/test_smallmalloc.py
+++ b/c7/test/test_smallmalloc.py
@@ -5,13 +5,22 @@
     return int(ffi.cast("uintptr_t", p)) >> 12
 
 
-class TestLargeMalloc(BaseTest):
+class TestSmallMalloc(BaseTest):
+
+    def setup_method(self, method):
+        BaseTest.setup_method(self, method)
+        @ffi.callback("bool(char *)")
+        def keep(data):
+            return data in self.keep_me
+        lib._stm_smallmalloc_keep = keep
+        self._keepalive_keep_function = keep
+        self.keep_me = set()
 
     def test_simple_uniform(self):
-        page0 = [stm_allocate_old(16) for i in range(0, 4096, 16)]
+        page0 = [stm_allocate_old_small(16) for i in range(0, 4096, 16)]
         assert len(set(map(pageof, page0))) == 1
         #
-        page1 = [stm_allocate_old(16) for i in range(0, 4096, 16)]
+        page1 = [stm_allocate_old_small(16) for i in range(0, 4096, 16)]
         assert len(set(map(pageof, page1))) == 1
         #
         assert len(set(map(pageof, page0 + page1))) == 2
@@ -19,14 +28,14 @@
     def test_different_sizes_different_pages(self):
         seen = []
         for i in range(2, GC_N_SMALL_REQUESTS):
-            p = pageof(stm_allocate_old(8 * i))
+            p = pageof(stm_allocate_old_small(8 * i))
             assert p not in seen
             seen.append(p)
         for i in range(2, GC_N_SMALL_REQUESTS):
-            p = pageof(stm_allocate_old(8 * i))
+            p = pageof(stm_allocate_old_small(8 * i))
             assert p == seen[0]
             seen.pop(0)
 
     def test_sweep_freeing(self):
-        p1 = stm_allocate_old(16)
+        p1 = stm_allocate_old_small(16)
         lib._stm_smallmalloc_sweep()


More information about the pypy-commit mailing list