[pypy-commit] stmgc weakref: merge default

Raemi noreply at buildbot.pypy.org
Fri Jul 19 14:35:37 CEST 2013


Author: Remi Meier <remi.meier at gmail.com>
Branch: weakref
Changeset: r424:e9a1be2c8ade
Date: 2013-07-19 14:34 +0200
http://bitbucket.org/pypy/stmgc/changeset/e9a1be2c8ade/

Log:	merge default

diff --git a/c4/demo_random.c b/c4/demo_random.c
--- a/c4/demo_random.c
+++ b/c4/demo_random.c
@@ -72,11 +72,6 @@
 time_t default_seed;
 gcptr shared_roots[SHARED_ROOTS];
 
-#define CACHE_MASK 65535
-#define CACHE_ENTRIES ((CACHE_MASK + 1) / sizeof(char *))
-#define CACHE_AT(cache, obj) (*(gcptr *)((char *)(cache)               \
-                                         + ((revision_t)(obj) & CACHE_MASK)))
-
 struct thread_data {
     unsigned int thread_seed;
     gcptr roots[MAXROOTS];
@@ -86,7 +81,6 @@
     int steps_left;
     int interruptible;
     int atomic;
-    revision_t writeable[CACHE_ENTRIES];
 };
 __thread struct thread_data td;
 
@@ -94,7 +88,7 @@
 // helper functions
 int classify(gcptr p);
 void check(gcptr p);
-
+int in_nursery(gcptr obj);
 static int is_private(gcptr P)
 {
   return (P->h_revision == stm_private_rev_num) ||
@@ -171,7 +165,7 @@
     return x;
 }
 
-void push_roots(int with_cache)
+void push_roots()
 {
     int i;
     for (i = 0; i < td.num_roots; i++) {
@@ -179,30 +173,11 @@
         if (td.roots[i])
             stm_push_root(td.roots[i]);
     }
-
-    if (with_cache) {
-        stm_push_root(NULL);
-        for (i = 0; i < CACHE_ENTRIES; i++) {
-            if (td.writeable[i])
-                stm_push_root((gcptr)td.writeable[i]);
-        }
-    }
 }
 
-void pop_roots(int with_cache)
+void pop_roots()
 {
     int i;
-    /* some objects may have changed positions */
-    memset(td.writeable, 0, sizeof(td.writeable));
-
-    if (with_cache) {
-        gcptr obj = stm_pop_root();
-        while (obj) {
-            CACHE_AT(td.writeable, obj) = obj;
-            obj = stm_pop_root();
-        }
-    }
-
     for (i = td.num_roots - 1; i >= 0; i--) {
         if (td.roots[i])
             td.roots[i] = stm_pop_root();
@@ -220,9 +195,9 @@
 nodeptr allocate_node()
 {
     nodeptr r;
-    push_roots(1);
+    push_roots();
     r = (nodeptr)stm_allocate(sizeof(struct node), GCTID_STRUCT_NODE);
-    pop_roots(1);
+    pop_roots();
     return r;
 }
 
@@ -281,8 +256,7 @@
         if (p->h_original && !(p->h_tid & GCFLAG_PREBUILT_ORIGINAL)) {
             // must point to valid old object
             gcptr id = (gcptr)p->h_original;
-            assert(id->h_tid & GCFLAG_OLD);
-            check_not_free(id);
+            assert(!in_nursery(id));
 #ifdef _GC_DEBUG
             if (!is_shared_prebuilt(id) && !(id->h_tid & GCFLAG_PREBUILT))
                 assert(!is_free_old(id));
@@ -308,7 +282,6 @@
     if (p != NULL) {
         check(p);
         w = stm_write_barrier(p);
-        CACHE_AT(td.writeable, w) = w;
         check(w);
         assert(is_private(w));
     }
@@ -425,22 +398,22 @@
 {
     int k = get_rand(100);
     if (k < 10) {
-        push_roots(1);
+        push_roots();
         stm_push_root(p);
         stm_become_inevitable("fun");
         p = stm_pop_root();
-        pop_roots(1);
+        pop_roots();
     } 
     else if (k < 40) {
-        push_roots(1);
+        push_roots();
         stmgc_minor_collect();
-        pop_roots(1);
+        pop_roots();
         p = NULL;
     } else if (k < 41 && DO_MAJOR_COLLECTS) {
         fprintf(stdout, "major collect\n");
-        push_roots(1);
+        push_roots();
         stmgcpage_possibly_major_collect(1);
-        pop_roots(1);
+        pop_roots();
         p = NULL;
     }
     return p;
@@ -479,10 +452,7 @@
         break;
     case 7: // set 'p' as *next in one of the roots
         check(_r);
-        if (CACHE_AT(td.writeable, _r) == _r)
-            w_r = (nodeptr)_r;
-        else
-            w_r = (nodeptr)write_barrier(_r);
+        w_r = (nodeptr)write_barrier(_r);
         check((gcptr)w_r);
         check(p);
         w_r->next = (struct node*)p;
@@ -582,10 +552,7 @@
             assert(w_t->id == stm_id((gcptr)_t));
         }
         else {
-            if (CACHE_AT(td.writeable, _t) == _t)
-                w_t = (nodeptr)_t;
-            else
-                w_t = (nodeptr)write_barrier(_t);
+            w_t = (nodeptr)write_barrier(_t);
             w_t->id = stm_id((gcptr)w_t);
             assert(w_t->id == stm_id((gcptr)_t));
         }
@@ -601,10 +568,7 @@
             assert(w_t->hash == stm_hash((gcptr)_t));
         }
         else {
-            if (CACHE_AT(td.writeable, _t) == _t)
-                w_t = (nodeptr)_t;
-            else
-                w_t = (nodeptr)write_barrier(_t);
+            w_t = (nodeptr)write_barrier(_t);
             w_t->hash = stm_hash((gcptr)w_t);
             assert(w_t->hash == stm_hash((gcptr)_t));
         }
@@ -656,7 +620,7 @@
 
 void transaction_break()
 {
-    push_roots(0);
+    push_roots();
     td.interruptible = 1;
     
     copy_roots(td.roots, td.roots_outside_perform, td.num_roots);
@@ -668,9 +632,7 @@
     copy_roots(td.roots_outside_perform, td.roots, td.num_roots);
     
     td.interruptible = 0;
-    pop_roots(0);
-
-    /* done by pop_roots() memset(&td.writeable, 0, sizeof(td.writeable)); */
+    pop_roots();
 }
 
 
@@ -685,8 +647,8 @@
     assert(end_marker == END_MARKER_ON || end_marker == END_MARKER_OFF);
     arg1 = stm_pop_root();
     assert(arg1 == NULL);
-    pop_roots(0);
-    push_roots(0);
+    pop_roots();
+    push_roots();
     stm_push_root(arg1);
     stm_push_root(end_marker);
 
@@ -702,9 +664,6 @@
 {
     gcptr p = NULL;
 
-    // clear cache of writeables:
-    memset(&td.writeable, 0, sizeof(td.writeable));
-
     while (td.steps_left-->0 || td.atomic) {
         if (td.steps_left % 8 == 0)
             fprintf(stdout, "#");
diff --git a/c4/et.c b/c4/et.c
--- a/c4/et.c
+++ b/c4/et.c
@@ -1115,7 +1115,7 @@
 #endif
       L->h_revision = new_revision;
 
-      gcptr stub = stm_stub_malloc(d->public_descriptor);
+      gcptr stub = stm_stub_malloc(d->public_descriptor, 0);
       stub->h_tid = (L->h_tid & STM_USER_TID_MASK) | GCFLAG_PUBLIC
                                                    | GCFLAG_STUB
                                                    | GCFLAG_OLD;
diff --git a/c4/extra.c b/c4/extra.c
--- a/c4/extra.c
+++ b/c4/extra.c
@@ -107,10 +107,12 @@
     else {
         /* must create shadow original object XXX: or use
            backup, if exists */
-        
-        /* XXX use stmgcpage_malloc() directly, we don't need to copy
-         * the contents yet */
-        gcptr O = stmgc_duplicate_old(p);
+        gcptr O = (gcptr)stmgcpage_malloc(stmgc_size(p));
+        memcpy(O, p, stmgc_size(p)); /* at least major collections
+                                      depend on some content of id_copy.
+                                      remove after fixing that XXX */
+        O->h_tid |= GCFLAG_OLD;
+
         p->h_original = (revision_t)O;
         p->h_tid |= GCFLAG_HAS_ID;
         
diff --git a/c4/nursery.c b/c4/nursery.c
--- a/c4/nursery.c
+++ b/c4/nursery.c
@@ -133,9 +133,6 @@
 }
 
 /************************************************************/
-/* list for private/protected, old roots that need to be
-   kept in old_objects_to_trace */
-static __thread struct GcPtrList private_or_protected_roots = {0, 0, NULL};
 
 static inline gcptr create_old_object_copy(gcptr obj)
 {
@@ -215,22 +212,6 @@
                                    (revision_t)END_MARKER_ON)) {
             /* 'item' is a regular, non-null pointer */
             visit_if_young(end);
-            item = *end;
-            /* if private or protected, this object needs to be
-               traced again in the next minor_collect if it is
-               currently in old_objects_to_trace. Because then
-               it may be seen as write-ready in the view of
-               someone:
-               pw = write_barrier(); push_root(pw);
-               minor_collect(); pw = pop_root(); // pw still write-ready
-            */
-            if (item
-                && !(item->h_tid & GCFLAG_WRITE_BARRIER) /* not set in
-                                                          obj_to_trace*/
-                && (item->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED
-                    || item->h_revision == stm_private_rev_num)) {
-                gcptrlist_insert(&private_or_protected_roots, item);
-            }
         }
         else if (item != NULL) {
             if (item == END_MARKER_OFF)
@@ -385,19 +366,6 @@
 
         stmgc_trace(obj, &visit_if_young);
     }
-
-    while (gcptrlist_size(&private_or_protected_roots) > 0) {
-        gcptr obj = gcptrlist_pop(&private_or_protected_roots);
-        /* if it has the write_barrier flag, clear it so that
-           it doesn't get inserted twice by a later write-barrier */
-        if (obj->h_tid & GCFLAG_WRITE_BARRIER) {
-            /* only insert those that were in old_obj_to_trace
-               and that we didn't insert already */
-            obj->h_tid &= ~GCFLAG_WRITE_BARRIER;
-            gcptrlist_insert(&d->old_objects_to_trace, obj);
-            dprintf(("re-add %p to old_objects_to_trace\n", obj));
-        }
-    }
 }
 
 static void fix_list_of_read_objects(struct tx_descriptor *d)
@@ -446,7 +414,7 @@
 
 static void teardown_minor_collect(struct tx_descriptor *d)
 {
-    //assert(gcptrlist_size(&d->old_objects_to_trace) == 0);
+    assert(gcptrlist_size(&d->old_objects_to_trace) == 0);
     assert(gcptrlist_size(&d->public_with_young_copy) == 0);
     assert(gcptrlist_size(&d->young_weakrefs) == 0);
     assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0);
diff --git a/c4/steal.c b/c4/steal.c
--- a/c4/steal.c
+++ b/c4/steal.c
@@ -1,11 +1,13 @@
 #include "stmimpl.h"
 
 
-gcptr stm_stub_malloc(struct tx_public_descriptor *pd)
+gcptr stm_stub_malloc(struct tx_public_descriptor *pd, size_t minsize)
 {
     assert(pd->collection_lock != 0);
+    if (minsize < sizeof(struct stm_stub_s))
+        minsize = sizeof(struct stm_stub_s);
 
-    gcptr p = stmgcpage_malloc(sizeof(struct stm_stub_s));
+    gcptr p = stmgcpage_malloc(minsize);
     STUB_THREAD(p) = pd;
     return p;
 }
@@ -85,8 +87,20 @@
     assert(stub->h_revision == (((revision_t)obj) | 2));
     goto done;
 
- not_found:
-    stub = stm_stub_malloc(sd->foreign_pd);
+ not_found:;
+    size_t size = 0;
+    if (!obj->h_original && !(obj->h_tid & GCFLAG_OLD)) {
+        /* There shouldn't be a public, young object without
+           a h_original. But there can be priv/protected ones.
+           We have a young protected copy without an h_original
+           The stub we allocate will be the h_original, but
+           it must be big enough to be copied over by a major
+           collection later. */
+        assert(!(obj->h_tid & GCFLAG_PUBLIC));
+        
+        size = stmgc_size(obj);
+    }
+    stub = stm_stub_malloc(sd->foreign_pd, size);
     stub->h_tid = (obj->h_tid & STM_USER_TID_MASK) | GCFLAG_PUBLIC
                                                    | GCFLAG_STUB
                                                    | GCFLAG_OLD;
@@ -98,10 +112,9 @@
         stub->h_original = (revision_t)obj;
     }
     else {
-        /* There shouldn't be a public, young object without
-           a h_original. But there can be protected ones. */
-        assert(!(obj->h_tid & GCFLAG_PUBLIC));
-        obj->h_original = (revision_t)stub;        
+        /* this is the big-stub case described above */
+        obj->h_original = (revision_t)stub; 
+        stub->h_original = 0;   /* stub_malloc does not set to 0... */
         if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) {
             ((gcptr)obj->h_revision)->h_original = (revision_t)stub;
         }
diff --git a/c4/steal.h b/c4/steal.h
--- a/c4/steal.h
+++ b/c4/steal.h
@@ -9,7 +9,7 @@
 
 #define STUB_THREAD(h)    (((struct stm_stub_s *)(h))->s_thread)
 
-gcptr stm_stub_malloc(struct tx_public_descriptor *);
+gcptr stm_stub_malloc(struct tx_public_descriptor *, size_t minsize);
 void stm_steal_stub(gcptr);
 gcptr stm_get_stolen_obj(long index);   /* debugging */
 void stm_normalize_stolen_objects(struct tx_descriptor *);
diff --git a/c4/stmgc.h b/c4/stmgc.h
--- a/c4/stmgc.h
+++ b/c4/stmgc.h
@@ -42,8 +42,10 @@
 _Bool stm_pointer_equal(gcptr, gcptr);
 
 /* to push/pop objects into the local shadowstack */
-static inline void stm_push_root(gcptr);
-static inline gcptr stm_pop_root(void);
+#if 0     // (optimized version below)
+void stm_push_root(gcptr);
+gcptr stm_pop_root(void);
+#endif
 
 /* initialize/deinitialize the stm framework in the current thread */
 void stm_initialize(void);
@@ -55,15 +57,25 @@
 int stm_enter_callback_call(void);
 void stm_leave_callback_call(int);
 
-/* read/write barriers (the most general versions only for now) */
+/* read/write barriers (the most general versions only for now).
+
+   - the read barrier must be applied before reading from an object.
+     the result is valid as long as we're in the same transaction,
+     and stm_write_barrier() is not called on the same object.
+
+   - the write barrier must be applied before writing to an object.
+     the result is valid for a shorter period of time: we have to
+     do stm_write_barrier() again if we ended the transaction, or
+     if we did a potential collection (e.g. stm_allocate()).
+*/
 static inline gcptr stm_read_barrier(gcptr);
 static inline gcptr stm_write_barrier(gcptr);
 
 /* start a new transaction, calls callback(), and when it returns
    finish that transaction.  callback() is called with the 'arg'
    provided, and with a retry_counter number.  Must save roots around
-   this call.  If callback() returns a value > 0, it is called
-   again. */
+   this call.  The callback() is called repeatedly as long as it
+   returns a value > 0. */
 void stm_perform_transaction(gcptr arg, int (*callback)(gcptr, int));
 
 /* finish the current transaction, start a new one, or turn the current
diff --git a/c4/test/test_et.py b/c4/test/test_et.py
--- a/c4/test/test_et.py
+++ b/c4/test/test_et.py
@@ -205,6 +205,8 @@
     assert list_of_read_objects() == [p2]
 
 def test_write_barrier_after_minor_collect():
+    # should fail
+    py.test.skip("should fail now")
     p = oalloc_refs(1)
     pw = lib.stm_write_barrier(p)
 
@@ -220,8 +222,10 @@
     assert pw.h_tid & GCFLAG_OLD
     rawsetptr(pw, 0, r)
 
-    # pw needs to be readded to old_objects_to_trace
-    # before the next minor gc in order for this test to pass
+    # pw not in old_objects_to_trace. A
+    # repeated write_barrier before
+    # rawsetptr() would fix that
+    
     lib.stm_push_root(r)
     minor_collect()
     minor_collect()
@@ -232,6 +236,10 @@
     
     pr = lib.stm_read_barrier(p)
     assert r != r2
+    # these will fail because pw/pr was
+    # not traced in the last minor_collect,
+    # because they were not registered in
+    # old_objects_to_trace.
     assert getptr(pr, 0) != r
     assert getptr(pr, 0) == r2
 
@@ -251,6 +259,7 @@
     assert getptr(pr, 0) != q2
 
 def test_write_barrier_after_minor_collect_young_to_old():
+    py.test.skip("should fail now")
     p = nalloc_refs(1)
     pw = lib.stm_write_barrier(p)
 
diff --git a/c4/test/test_nursery.py b/c4/test/test_nursery.py
--- a/c4/test/test_nursery.py
+++ b/c4/test/test_nursery.py
@@ -200,6 +200,84 @@
     check_not_free(p2)
     assert classify(p2) == "private"
 
+def test_old_private_from_protected_to_young_private_2():
+    py.test.skip("not valid")
+    p0 = nalloc_refs(1)
+    lib.stm_commit_transaction()
+    lib.stm_begin_inevitable_transaction()
+    lib.setptr(p0, 0, ffi.NULL)
+    assert classify(p0) == "private_from_protected"
+    assert lib.in_nursery(p0)      # a young private_from_protected
+    #
+    lib.stm_push_root(p0)
+    minor_collect()
+    p0 = lib.stm_pop_root()
+    assert classify(p0) == "private_from_protected"
+    assert not lib.in_nursery(p0)  # becomes an old private_from_protected
+    #
+    # Because it's a private_from_protected, its h_revision is a pointer
+    # to the backup copy, and not stm_private_rev_num.  It means that the
+    # write barrier will always enter its slow path, even though the
+    # GCFLAG_WRITE_BARRIER is not set.
+    assert p0.h_revision != lib.get_private_rev_num()
+    assert not (p0.h_tid & GCFLAG_WRITE_BARRIER)
+    #
+    p1 = nalloc(HDR)
+    lib.setptr(p0, 0, p1)          # should trigger the write barrier again
+    assert classify(p0) == "private_from_protected"
+    lib.stm_push_root(p0)
+    minor_collect()
+    p0b = lib.stm_pop_root()
+    assert p0b == p0
+    check_nursery_free(p1)
+    assert classify(p0) == "private_from_protected"
+    p2 = lib.getptr(p0, 0)
+    assert not lib.in_nursery(p2)
+    check_not_free(p2)
+    assert classify(p2) == "private"
+
+def test_old_private_from_protected_to_young_private_3():
+    p0 = palloc_refs(1)
+    pw = lib.stm_write_barrier(p0)
+    lib.stm_commit_transaction()
+    lib.stm_begin_inevitable_transaction()
+    pr = lib.stm_read_barrier(p0)
+    assert classify(pr) == "protected"
+    assert lib.in_nursery(pr) # a young protected
+    #
+    minor_collect()
+    # each minor collect adds WRITE_BARRIER to protected/private
+    # objects it moves out of the nursery
+    pr = lib.stm_read_barrier(p0)
+    assert pr.h_tid & GCFLAG_WRITE_BARRIER
+    pw = lib.stm_write_barrier(pr)
+    # added to old_obj_to_trace
+    assert not (pw.h_tid & GCFLAG_WRITE_BARRIER)
+
+    lib.setptr(pw, 0, ffi.NULL)
+    assert classify(pw) == "private_from_protected"
+    assert not lib.in_nursery(pw)
+
+    assert pw.h_revision != lib.get_private_rev_num()
+    assert not (pw.h_tid & GCFLAG_WRITE_BARRIER)
+    # #
+    
+    lib.stm_push_root(pw)
+    minor_collect()
+    p1 = nalloc(HDR)
+    pw = lib.stm_pop_root()
+    assert pw.h_tid & GCFLAG_WRITE_BARRIER
+    lib.setptr(pw, 0, p1)          # should trigger the write barrier again
+    assert classify(pr) == "private_from_protected"
+    minor_collect()
+    check_nursery_free(p1)
+    pr = lib.stm_read_barrier(p0)
+    assert classify(pr) == "private_from_protected"
+    p2 = lib.getptr(pr, 0)
+    assert not lib.in_nursery(p2)
+    check_not_free(p2)
+    assert classify(p2) == "private"
+
 def test_new_version():
     p1 = oalloc(HDR)
     assert lib.stm_write_barrier(p1) == p1
diff --git a/duhton/listobject.c b/duhton/listobject.c
--- a/duhton/listobject.c
+++ b/duhton/listobject.c
@@ -75,7 +75,7 @@
 
 void _list_append(DuListObject *ob, DuObject *x)
 {
-    _du_write1(ob);
+    _du_read1(ob);
     DuTupleObject *olditems = ob->ob_tuple;
 
     _du_read1(olditems);
@@ -85,6 +85,8 @@
     DuTupleObject *newitems = DuTuple_New(newcount);
     _du_restore3(ob, x, olditems);
 
+    _du_write1(ob);
+
     for (i=0; i<newcount-1; i++)
         newitems->ob_items[i] = olditems->ob_items[i];
     newitems->ob_items[newcount-1] = x;


More information about the pypy-commit mailing list