[pypy-commit] stmgc c7: try to implement more general safe-points. not sure if successful

Remi Meier noreply at buildbot.pypy.org
Wed Jan 29 15:07:26 CET 2014


Author: Remi Meier
Branch: c7
Changeset: r689:4d91804e80ed
Date: 2014-01-29 15:07 +0100
http://bitbucket.org/pypy/stmgc/changeset/4d91804e80ed/

Log:	try to implement more general safe-points. not sure if successful

diff --git a/c7/core.c b/c7/core.c
--- a/c7/core.c
+++ b/c7/core.c
@@ -138,10 +138,10 @@
         if (_STM_TL->active == 2) {
             /* we must succeed! */
             _stm_dbg_get_tl(prev_owner - 1)->need_abort = 1;
-            _stm_start_no_collect_safe_point();
-            /* XXX: not good */
+            _stm_start_safe_point(0);
+            /* XXX: not good, maybe should be signalled by other thread */
             usleep(1);
-            _stm_stop_no_collect_safe_point();
+            _stm_stop_safe_point(0);
             goto retry;
         }
         /* XXXXXX */
@@ -353,27 +353,21 @@
 
     uint8_t our_lock = _STM_TL->thread_num + 1;
     do {
-        _stm_start_safe_point();
-
-        stm_start_exclusive_lock();
-        if (_STM_TL->need_abort) {
-            stm_stop_exclusive_lock();
-            stm_start_shared_lock();
-            stm_abort_transaction();
-        }
+        _stm_start_safe_point(LOCK_COLLECT);
+        _stm_stop_safe_point(LOCK_COLLECT|LOCK_EXCLUSIVE);
 
         if (!inevitable_lock)
             break;
 
-        stm_stop_exclusive_lock();
-        _stm_stop_safe_point();
+        _stm_start_safe_point(LOCK_EXCLUSIVE|LOCK_COLLECT);
+        _stm_stop_safe_point(LOCK_COLLECT);
     } while (1);
 
     inevitable_lock = our_lock;
     _STM_TL->active = 2;
-    stm_stop_exclusive_lock();
     
-    _stm_stop_safe_point();
+    _stm_start_safe_point(LOCK_EXCLUSIVE|LOCK_COLLECT);
+    _stm_stop_safe_point(LOCK_COLLECT);
 }
 
 void stm_start_inevitable_transaction()
@@ -385,8 +379,8 @@
 void stm_start_transaction(jmpbufptr_t *jmpbufptr)
 {
     assert(!_STM_TL->active);
-
-    stm_start_shared_lock();
+    
+    _stm_stop_safe_point(LOCK_COLLECT);
     
     uint8_t old_rv = _STM_TL->transaction_read_version;
     _STM_TL->transaction_read_version = old_rv + 1;
@@ -415,28 +409,23 @@
     /* Some operations require us to have the EXCLUSIVE lock */
     if (_STM_TL->active == 1) {
         while (1) {
-            _stm_start_safe_point();
+            _stm_start_safe_point(LOCK_COLLECT);
             usleep(1);          /* XXX: better algorithm that allows
                                    for waiting on a mutex */
-            stm_start_exclusive_lock();
-            if (_STM_TL->need_abort) {
-                stm_stop_exclusive_lock();
-                stm_start_shared_lock();
-                stm_abort_transaction();
-            }
+            _stm_stop_safe_point(LOCK_COLLECT|LOCK_EXCLUSIVE);
             
             if (!inevitable_lock)
                 break;
-            stm_stop_exclusive_lock();
-            _stm_stop_safe_point();
+
+            _stm_start_safe_point(LOCK_COLLECT|LOCK_EXCLUSIVE);
+            _stm_stop_safe_point(LOCK_COLLECT);
         }
         /* we have the exclusive lock */
     } else {
         /* inevitable! no other transaction could have committed
            or aborted us */
-        stm_stop_shared_lock();
-        stm_start_exclusive_lock();
-        assert(!_STM_TL->need_abort);
+        _stm_start_safe_point(LOCK_COLLECT);
+        _stm_stop_safe_point(LOCK_EXCLUSIVE|LOCK_COLLECT);
         inevitable_lock = 0;
     }
 
@@ -451,7 +440,7 @@
 
  
     _STM_TL->active = 0;
-    stm_stop_exclusive_lock();
+    _stm_start_safe_point(LOCK_EXCLUSIVE|LOCK_COLLECT);
     fprintf(stderr, "%c", 'C'+_STM_TL->thread_num*32);
 }
 
@@ -504,13 +493,15 @@
     assert(_STM_TL->jmpbufptr != NULL);
     assert(_STM_TL->jmpbufptr != (jmpbufptr_t *)-1);   /* for tests only */
     _STM_TL->active = 0;
-    stm_stop_shared_lock();
+    /* _STM_TL->need_abort = 0; */
+
+    _stm_start_safe_point(LOCK_COLLECT);
+
     fprintf(stderr, "%c", 'A'+_STM_TL->thread_num*32);
 
     /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */
     reset_modified_from_other_threads();
     stm_list_clear(_STM_TL->modified_objects);
 
-    
     __builtin_longjmp(*_STM_TL->jmpbufptr, 1);
 }
diff --git a/c7/nursery.c b/c7/nursery.c
--- a/c7/nursery.c
+++ b/c7/nursery.c
@@ -176,9 +176,14 @@
 
 localchar_t *collect_and_reserve(size_t size)
 {
-    _stm_start_safe_point();
+    _stm_start_safe_point(0);    /* don't release the COLLECT lock,
+                                   that needs to be done afterwards if
+                                   we want a major collection */
     minor_collect();
-    _stm_stop_safe_point();
+    _stm_stop_safe_point(0);
+
+    /* XXX: if we_want_major_collect: acquire EXCLUSIVE & COLLECT lock
+       and do it */
 
     localchar_t *current = _STM_TL->nursery_current;
     _STM_TL->nursery_current = current + size;
@@ -188,8 +193,10 @@
 
 object_t *stm_allocate(size_t size)
 {
-    _stm_start_safe_point();
-    _stm_stop_safe_point();
+    _stm_start_safe_point(LOCK_COLLECT);
+    /* all collections may happen here */
+    _stm_stop_safe_point(LOCK_COLLECT);
+    
     assert(_STM_TL->active);
     assert(size % 8 == 0);
     assert(16 <= size && size < NB_NURSERY_PAGES * 4096);//XXX
diff --git a/c7/stmsync.c b/c7/stmsync.c
--- a/c7/stmsync.c
+++ b/c7/stmsync.c
@@ -1,8 +1,11 @@
+#include <assert.h>
+#include <string.h>
+#include <unistd.h>
+
+
 #include "stmsync.h"
 #include "core.h"
 #include "reader_writer_lock.h"
-#include <assert.h>
-#include <string.h>
 
 
 /* a multi-reader, single-writer lock: transactions normally take a reader
@@ -10,6 +13,8 @@
    we take a writer lock to "stop the world". */
 
 rwticket rw_shared_lock;        /* the "GIL" */
+rwticket rw_collection_lock;    /* for major collections */
+
 
 void _stm_reset_shared_lock()
 {
@@ -17,16 +22,45 @@
     assert(!rwticket_wrunlock(&rw_shared_lock));
 
     memset(&rw_shared_lock, 0, sizeof(rwticket));
+
+    assert(!rwticket_wrtrylock(&rw_collection_lock));
+    assert(!rwticket_wrunlock(&rw_collection_lock));
+
+    memset(&rw_collection_lock, 0, sizeof(rwticket));
+}
+
+void stm_acquire_collection_lock()
+{
+    /* we must have the exclusive lock here and
+       not the colletion lock!! */
+    while (1) {
+        if (!rwticket_wrtrylock(&rw_collection_lock))
+            break;              /* acquired! */
+        
+        stm_stop_exclusive_lock();
+        usleep(1);
+        stm_start_exclusive_lock();
+        if (_STM_TL->need_abort) {
+            stm_stop_exclusive_lock();
+            stm_start_shared_lock();
+            stm_abort_transaction();
+        }
+    }
 }
 
 void stm_start_shared_lock(void)
 {
-    rwticket_rdlock(&rw_shared_lock);
+    rwticket_rdlock(&rw_shared_lock); 
 }
 
-void stm_stop_shared_lock(void)
+void stm_stop_shared_lock()
 {
-    rwticket_rdunlock(&rw_shared_lock);
+    rwticket_rdunlock(&rw_shared_lock); 
+}
+
+void stm_start_exclusive_lock(void)
+{
+    rwticket_wrlock(&rw_shared_lock);
 }
 
 void stm_stop_exclusive_lock(void)
@@ -34,20 +68,52 @@
     rwticket_wrunlock(&rw_shared_lock);
 }
 
-void stm_start_exclusive_lock(void)
+/* _stm_start_safe_point(LOCK_EXCLUSIVE|LOCK_COLLECT)
+   -> release the exclusive lock and also the collect-read-lock */
+void _stm_start_safe_point(uint8_t flags)
 {
-    rwticket_wrlock(&rw_shared_lock);
+    if (flags & LOCK_EXCLUSIVE)
+        stm_stop_exclusive_lock();
+    else
+        stm_stop_shared_lock();
+    
+    if (flags & LOCK_COLLECT)
+        rwticket_rdunlock(&rw_collection_lock);
 }
 
-void _stm_start_safe_point(void)
+/*
+  _stm_stop_safe_point(LOCK_COLLECT|LOCK_EXCLUSIVE);
+  -> reacquire the collect-read-lock and the exclusive lock
+ */
+void _stm_stop_safe_point(uint8_t flags)
 {
-    assert(!_STM_TL->need_abort);
-    stm_stop_shared_lock();
+    if (flags & LOCK_EXCLUSIVE)
+        stm_start_exclusive_lock();
+    else
+        stm_start_shared_lock();
+    
+    if (!(flags & LOCK_COLLECT)) { /* if we released the collection lock */
+        /* acquire read-collection. always succeeds because
+           if there was a write-collection holder we would
+           also not have gotten the shared_lock */
+        rwticket_rdlock(&rw_collection_lock);
+    }
+    
+    if (_STM_TL->active && _STM_TL->need_abort) {
+        if (flags & LOCK_EXCLUSIVE) {
+            /* restore to shared-mode with the collection lock */
+            stm_stop_exclusive_lock();
+            stm_start_shared_lock();
+            if (flags & LOCK_COLLECT)
+                rwticket_rdlock(&rw_collection_lock);
+            stm_abort_transaction();
+        } else {
+            if (flags & LOCK_COLLECT)
+                rwticket_rdlock(&rw_collection_lock);
+            stm_abort_transaction();
+        }
+    }
 }
 
-void _stm_stop_safe_point(void)
-{
-    stm_start_shared_lock();
-    if (_STM_TL->need_abort)
-        stm_abort_transaction();
-}
+
+
diff --git a/c7/stmsync.h b/c7/stmsync.h
--- a/c7/stmsync.h
+++ b/c7/stmsync.h
@@ -1,14 +1,16 @@
 
+#include <stdint.h>
 
 void stm_start_shared_lock(void);
 void stm_stop_shared_lock(void);
 void stm_stop_exclusive_lock(void);
 void stm_start_exclusive_lock(void);
-void _stm_start_safe_point(void);
-void _stm_stop_safe_point(void);
+void _stm_start_safe_point(uint8_t flags);
+void _stm_stop_safe_point(uint8_t flags);
 void _stm_reset_shared_lock(void);
 
-/* XXX: major collections must not be possible here: */
-#define _stm_start_no_collect_safe_point(void) _stm_start_safe_point()
-#define _stm_stop_no_collect_safe_point(void) _stm_stop_safe_point()
+enum {
+    LOCK_COLLECT = (1 << 0),
+    LOCK_EXCLUSIVE = (1 << 1),
+};
 
diff --git a/c7/test/support.py b/c7/test/support.py
--- a/c7/test/support.py
+++ b/c7/test/support.py
@@ -58,8 +58,8 @@
 bool _stm_is_young(object_t *o);
 object_t *_stm_allocate_old(size_t size);
 
-void _stm_start_safe_point(void);
-void _stm_stop_safe_point(void);
+void _stm_start_safe_point(uint8_t);
+void _stm_stop_safe_point(uint8_t);
 bool _stm_check_stop_safe_point(void);
 
 void _set_type_id(object_t *obj, uint32_t h);
@@ -91,7 +91,12 @@
 enum {
     GCFLAG_WRITE_BARRIER = 1,
     GCFLAG_NOT_COMMITTED = 2,
-    GCFLAG_MOVED = 4
+    GCFLAG_MOVED = 4,
+};
+
+enum {
+    LOCK_COLLECT = 1,
+    LOCK_EXCLUSIVE = 2,
 };
 
 
@@ -185,7 +190,7 @@
     if (__builtin_setjmp(here) == 0) { // returned directly
          assert(_STM_TL->jmpbufptr == (jmpbufptr_t*)-1);
          _STM_TL->jmpbufptr = &here;
-         _stm_stop_safe_point();
+         _stm_stop_safe_point(LOCK_COLLECT);
          _STM_TL->jmpbufptr = (jmpbufptr_t*)-1;
          return 0;
     }
@@ -360,7 +365,7 @@
 
 
 def stm_start_safe_point():
-    lib._stm_start_safe_point()
+    lib._stm_start_safe_point(lib.LOCK_COLLECT)
 
 def stm_stop_safe_point():
     if lib._stm_check_stop_safe_point():
diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py
--- a/c7/test/test_basic.py
+++ b/c7/test/test_basic.py
@@ -314,7 +314,7 @@
     def test_many_allocs(self):
         # assumes NB_NURSERY_PAGES    1024
         obj_size = 1024
-        num = 5000 # more than what fits in the nursery (4MB)
+        num = 9000 # more than what fits in the nursery (4MB)
         
         stm_start_transaction()
         for i in range(num):


More information about the pypy-commit mailing list