[pypy-commit] pypy stmgc-c8-gil-like: import stmgc/9c72d7f52305

arigo noreply at buildbot.pypy.org
Sun Jun 14 18:05:21 CEST 2015


Author: Armin Rigo <arigo at tunes.org>
Branch: stmgc-c8-gil-like
Changeset: r78091:3be5cbbb7313
Date: 2015-06-14 18:00 +0200
http://bitbucket.org/pypy/pypy/changeset/3be5cbbb7313/

Log:	import stmgc/9c72d7f52305

diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision
--- a/rpython/translator/stm/src_stm/revision
+++ b/rpython/translator/stm/src_stm/revision
@@ -1,1 +1,1 @@
-8009f12c327b
+9c72d7f52305
diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c
--- a/rpython/translator/stm/src_stm/stm/core.c
+++ b/rpython/translator/stm/src_stm/stm/core.c
@@ -1155,6 +1155,8 @@
     STM_PSEGMENT->shadowstack_at_start_of_transaction = tl->shadowstack;
     STM_PSEGMENT->threadlocal_at_start_of_transaction = tl->thread_local_obj;
     STM_PSEGMENT->total_throw_away_nursery = 0;
+    assert(tl->self_or_0_if_atomic == (intptr_t)tl);   /* not atomic */
+    assert(STM_PSEGMENT->atomic_nesting_levels == 0);
 
     assert(list_is_empty(STM_PSEGMENT->modified_old_objects));
     assert(list_is_empty(STM_PSEGMENT->large_overflow_objects));
@@ -1319,6 +1321,12 @@
         stm_fatalerror("cannot commit between stm_stop_all_other_threads "
                        "and stm_resume_all_other_threads");
     }
+    if (STM_PSEGMENT->atomic_nesting_levels > 0) {
+        stm_fatalerror("cannot commit between stm_enable_atomic "
+                       "and stm_disable_atomic");
+    }
+    assert(STM_SEGMENT->running_thread->self_or_0_if_atomic ==
+           (intptr_t)(STM_SEGMENT->running_thread));
 
     dprintf(("> stm_commit_transaction(external=%d)\n", (int)external));
     minor_collection(/*commit=*/ true, external);
@@ -1513,6 +1521,8 @@
     abort_data_structures_from_segment_num(STM_SEGMENT->segment_num);
 
     stm_thread_local_t *tl = STM_SEGMENT->running_thread;
+    tl->self_or_0_if_atomic = (intptr_t)tl;   /* clear the 'atomic' flag */
+    STM_PSEGMENT->atomic_nesting_levels = 0;
 
     if (tl->mem_clear_on_abort)
         memset(tl->mem_clear_on_abort, 0, tl->mem_bytes_to_clear_on_abort);
diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h
--- a/rpython/translator/stm/src_stm/stm/core.h
+++ b/rpython/translator/stm/src_stm/stm/core.h
@@ -155,6 +155,9 @@
 
     /* For nursery_mark */
     uintptr_t total_throw_away_nursery;
+
+    /* For stm_enable_atomic() */
+    uintptr_t atomic_nesting_levels;
 };
 
 enum /* safe_point */ {
diff --git a/rpython/translator/stm/src_stm/stm/detach.c b/rpython/translator/stm/src_stm/stm/detach.c
--- a/rpython/translator/stm/src_stm/stm/detach.c
+++ b/rpython/translator/stm/src_stm/stm/detach.c
@@ -22,6 +22,14 @@
    originally detached), and at the point where we know the original
    stm_thread_local_t is no longer relevant, we reset
    _stm_detached_inevitable_from_thread to 0.
+
+   The value that stm_leave_transactional_zone() sticks inside
+   _stm_detached_inevitable_from_thread is actually
+   'tl->self_or_0_if_atomic'.  This value is 0 if and only if 'tl' is
+   current running a transaction *and* this transaction is atomic.  So
+   if we're running an atomic transaction, then
+   _stm_detached_inevitable_from_thread remains 0 across
+   leave/enter_transactional.
 */
 
 volatile intptr_t _stm_detached_inevitable_from_thread;
@@ -36,19 +44,35 @@
 void _stm_leave_noninevitable_transactional_zone(void)
 {
     int saved_errno = errno;
-    dprintf(("leave_noninevitable_transactional_zone\n"));
-    _stm_become_inevitable(MSG_INEV_DONT_SLEEP);
 
-    /* did it work? */
-    if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) {   /* yes */
-        dprintf(("leave_noninevitable_transactional_zone: now inevitable\n"));
-        stm_thread_local_t *tl = STM_SEGMENT->running_thread;
-        _stm_detach_inevitable_transaction(tl);
+    if (STM_PSEGMENT->atomic_nesting_levels == 0) {
+        dprintf(("leave_noninevitable_transactional_zone\n"));
+        _stm_become_inevitable(MSG_INEV_DONT_SLEEP);
+
+        /* did it work? */
+        if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) {   /* yes */
+            dprintf((
+                "leave_noninevitable_transactional_zone: now inevitable\n"));
+            stm_thread_local_t *tl = STM_SEGMENT->running_thread;
+            _stm_detach_inevitable_transaction(tl);
+        }
+        else {   /* no */
+            dprintf(("leave_noninevitable_transactional_zone: commit\n"));
+            _stm_commit_transaction();
+        }
     }
-    else {   /* no */
-        dprintf(("leave_noninevitable_transactional_zone: commit\n"));
-        _stm_commit_transaction();
+    else {
+        /* we're atomic, so we can't commit at all */
+        dprintf(("leave_noninevitable_transactional_zone atomic\n"));
+        _stm_become_inevitable("leave_noninevitable_transactional_zone atomic");
+        assert(STM_PSEGMENT->transaction_state == TS_INEVITABLE);
+        assert(_stm_detached_inevitable_from_thread == 0);
+        assert(STM_SEGMENT->running_thread->self_or_0_if_atomic == 0);
+        /* no point in calling _stm_detach_inevitable_transaction()
+           because it would store 0 into a place that is already 0, as
+           checked by the asserts above */
     }
+
     errno = saved_errno;
 }
 
@@ -173,3 +197,54 @@
         goto restart;
     }
 }
+
+uintptr_t stm_is_atomic(stm_thread_local_t *tl)
+{
+    assert(STM_SEGMENT->running_thread == tl);
+    if (tl->self_or_0_if_atomic != 0) {
+        assert(tl->self_or_0_if_atomic == (intptr_t)tl);
+        assert(STM_PSEGMENT->atomic_nesting_levels == 0);
+    }
+    else {
+        assert(STM_PSEGMENT->atomic_nesting_levels > 0);
+    }
+    return STM_PSEGMENT->atomic_nesting_levels;
+}
+
+#define HUGE_INTPTR_VALUE  0x3000000000000000L
+
+void stm_enable_atomic(stm_thread_local_t *tl)
+{
+    if (!stm_is_atomic(tl)) {
+        tl->self_or_0_if_atomic = 0;
+        /* increment 'nursery_mark' by HUGE_INTPTR_VALUE, so that
+           stm_should_break_transaction() returns always false */
+        intptr_t mark = (intptr_t)STM_SEGMENT->nursery_mark;
+        if (mark < 0)
+            mark = 0;
+        if (mark >= HUGE_INTPTR_VALUE)
+            mark = HUGE_INTPTR_VALUE - 1;
+        mark += HUGE_INTPTR_VALUE;
+        STM_SEGMENT->nursery_mark = (stm_char *)mark;
+    }
+    STM_PSEGMENT->atomic_nesting_levels++;
+}
+
+void stm_disable_atomic(stm_thread_local_t *tl)
+{
+    if (!stm_is_atomic(tl))
+        stm_fatalerror("stm_disable_atomic(): already not atomic");
+
+    STM_PSEGMENT->atomic_nesting_levels--;
+
+    if (STM_PSEGMENT->atomic_nesting_levels == 0) {
+        tl->self_or_0_if_atomic = (intptr_t)tl;
+        /* decrement 'nursery_mark' by HUGE_INTPTR_VALUE, to cancel
+           what was done in stm_enable_atomic() */
+        intptr_t mark = (intptr_t)STM_SEGMENT->nursery_mark;
+        mark -= HUGE_INTPTR_VALUE;
+        if (mark < 0)
+            mark = 0;
+        STM_SEGMENT->nursery_mark = (stm_char *)mark;
+    }
+}
diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c
--- a/rpython/translator/stm/src_stm/stm/nursery.c
+++ b/rpython/translator/stm/src_stm/stm/nursery.c
@@ -471,7 +471,6 @@
     }
     OPT_ASSERT((nursery_used & 7) == 0);
 
-#ifndef NDEBUG
     /* reset the nursery by zeroing it */
     char *realnursery;
     realnursery = REAL_ADDRESS(pseg->pub.segment_base, _stm_nursery_start);
@@ -483,8 +482,9 @@
                        (NURSERY_END - _stm_nursery_start) - nursery_used);
 
 #else
+# ifndef NDEBUG
     memset(realnursery, 0xa0, nursery_used);
-#endif
+# endif
 #endif
 
     pseg->total_throw_away_nursery += nursery_used;
diff --git a/rpython/translator/stm/src_stm/stm/setup.c b/rpython/translator/stm/src_stm/stm/setup.c
--- a/rpython/translator/stm/src_stm/stm/setup.c
+++ b/rpython/translator/stm/src_stm/stm/setup.c
@@ -233,8 +233,7 @@
 {
     int num;
     s_mutex_lock();
-    tl->self = tl;    /* for faster access to &stm_thread_local (and easier
-                         from the PyPy JIT, too) */
+    tl->self_or_0_if_atomic = (intptr_t)tl;    /* 'not atomic' */
     if (stm_all_thread_locals == NULL) {
         stm_all_thread_locals = tl->next = tl->prev = tl;
         num = 0;
diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h
--- a/rpython/translator/stm/src_stm/stmgc.h
+++ b/rpython/translator/stm/src_stm/stmgc.h
@@ -71,7 +71,8 @@
     /* the next fields are handled internally by the library */
     int last_associated_segment_num;   /* always a valid seg num */
     int thread_local_counter;
-    struct stm_thread_local_s *self, *prev, *next;
+    struct stm_thread_local_s *prev, *next;
+    intptr_t self_or_0_if_atomic;
     void *creating_pthread[2];
 } stm_thread_local_t;
 
@@ -90,7 +91,7 @@
 #define _stm_detach_inevitable_transaction(tl)  do {                    \
     write_fence();                                                      \
     assert(_stm_detached_inevitable_from_thread == 0);                  \
-    _stm_detached_inevitable_from_thread = (intptr_t)(tl->self);        \
+    _stm_detached_inevitable_from_thread = tl->self_or_0_if_atomic;     \
 } while (0)
 void _stm_reattach_transaction(stm_thread_local_t *tl);
 void _stm_become_inevitable(const char*);
@@ -427,7 +428,7 @@
 #endif
 static inline void stm_enter_transactional_zone(stm_thread_local_t *tl) {
     if (__sync_bool_compare_and_swap(&_stm_detached_inevitable_from_thread,
-                                     (intptr_t)tl, 0)) {
+                                     tl->self_or_0_if_atomic, 0)) {
 #ifdef STM_DEBUGPRINT
         fprintf(stderr, "stm_enter_transactional_zone fast path\n");
 #endif
@@ -505,6 +506,15 @@
    in the last attempt.
 */
 
+/* "atomic" transaction: a transaction where stm_should_break_transaction()
+   always returns false, and where stm_leave_transactional_zone() never
+   detach nor terminates the transaction.  (stm_force_transaction_break()
+   crashes if called with an atomic transaction.)
+*/
+uintptr_t stm_is_atomic(stm_thread_local_t *tl);
+void stm_enable_atomic(stm_thread_local_t *tl);
+void stm_disable_atomic(stm_thread_local_t *tl);
+
 
 /* Prepare an immortal "prebuilt" object managed by the GC.  Takes a
    pointer to an 'object_t', which should not actually be a GC-managed


More information about the pypy-commit mailing list