[pypy-commit] stmgc c8-gil-like: in-progress

arigo noreply at buildbot.pypy.org
Tue Jun 9 17:13:08 CEST 2015


Author: Armin Rigo <arigo at tunes.org>
Branch: c8-gil-like
Changeset: r1794:2545c3033c9b
Date: 2015-06-09 17:13 +0200
http://bitbucket.org/pypy/stmgc/changeset/2545c3033c9b/

Log:	in-progress

diff --git a/c8/CALL_RELEASE_GIL b/c8/CALL_RELEASE_GIL
--- a/c8/CALL_RELEASE_GIL
+++ b/c8/CALL_RELEASE_GIL
@@ -50,12 +50,12 @@
   same or a different thread.
 
 - we add a global variable, "stm_detached_inevitable_from_thread".  It
-  is equal to the shadowstack pointer of the thread that detached
+  is equal to the stm_thread_local pointer of the thread that detached
   inevitable transaction (like rpy_fastgil == 0), or NULL if there is
   no detached inevitable transaction (like rpy_fastgil == 1).
 
 - the macro stm_detach_inevitable_transaction() simply writes the
-  current thread's shadowstack pointer into the global variable
+  current thread's stm_thread_local pointer into the global variable
   stm_detached_inevitable_from_thread.  It can only be used if the
   current transaction is inevitable (and in particular the inevitable
   transaction was not detached already, because we're running it).
@@ -65,7 +65,7 @@
 
 - the macro stm_reattach_transaction() does an atomic swap on
   stm_detached_inevitable_from_thread to change it to NULL.  If the
-  old value was equal to our own shadowstack pointer, we are done.  If
+  old value was equal to our own stm_thread_local pointer, we are done.  If
   not, we call a helper, _stm_reattach_transaction().
 
 - we also add the macro stm_detach_transation().  If the current
@@ -76,7 +76,7 @@
   stm_detached_inevitable_from_thread (which was swapped to be NULL just
   now).  If old != NULL, this swap had the effect that we took over
   the inevitable transaction originally detached from a different
-  thread; we need to fix a few things like the shadowstack and %gs but
+  thread; we need to fix a few things like the stm_thread_local and %gs but
   then we can continue running this reattached inevitable transaction.
   If old == NULL, we need to fall back to the current
   stm_start_transaction().  (A priori, there is no need to wait at
diff --git a/c8/stm/core.c b/c8/stm/core.c
--- a/c8/stm/core.c
+++ b/c8/stm/core.c
@@ -509,7 +509,8 @@
 static void readd_wb_executed_flags(void);
 static void check_all_write_barrier_flags(char *segbase, struct list_s *list);
 
-static void _validate_and_attach(struct stm_commit_log_entry_s *new)
+static bool _validate_and_attach(struct stm_commit_log_entry_s *new,
+                                 bool can_sleep)
 {
     struct stm_commit_log_entry_s *old;
 
@@ -571,6 +572,8 @@
             /* XXXXXX for now just sleep.  We should really ask to inev
                transaction to do the commit for us, and then we can
                continue running. */
+            if (!can_sleep)
+                return false;
             dprintf(("_validate_and_attach(%p) failed, "
                      "waiting for inevitable\n", new));
             wait_for_other_inevitable(old);
@@ -598,11 +601,13 @@
         STM_PSEGMENT->last_commit_log_entry = new;
         release_modification_lock_wr(STM_SEGMENT->segment_num);
     }
+    return true;
 }
 
-static void _validate_and_turn_inevitable(void)
+static bool _validate_and_turn_inevitable(bool can_sleep)
 {
-    _validate_and_attach((struct stm_commit_log_entry_s *)INEV_RUNNING);
+    return _validate_and_attach((struct stm_commit_log_entry_s *)INEV_RUNNING,
+                                can_sleep);
 }
 
 static void _validate_and_add_to_commit_log(void)
@@ -631,7 +636,7 @@
         OPT_ASSERT(yes);
     }
     else {
-        _validate_and_attach(new);
+        _validate_and_attach(new, /*can_sleep=*/true);
     }
 }
 
@@ -1123,7 +1128,7 @@
 
 
 
-static void _stm_start_transaction(stm_thread_local_t *tl)
+static void _do_start_transaction(stm_thread_local_t *tl)
 {
     assert(!_stm_in_transaction(tl));
 
@@ -1181,7 +1186,7 @@
     stm_validate();
 }
 
-long stm_start_transaction(stm_thread_local_t *tl)
+long _stm_start_transaction(stm_thread_local_t *tl)
 {
     s_mutex_lock();
 #ifdef STM_NO_AUTOMATIC_SETJMP
@@ -1189,23 +1194,10 @@
 #else
     long repeat_count = stm_rewind_jmp_setjmp(tl);
 #endif
-    _stm_start_transaction(tl);
+    _do_start_transaction(tl);
     return repeat_count;
 }
 
-void stm_start_inevitable_transaction(stm_thread_local_t *tl)
-{
-    /* used to be more efficient, starting directly an inevitable transaction,
-       but there is no real point any more, I believe */
-    rewind_jmp_buf rjbuf;
-    stm_rewind_jmp_enterframe(tl, &rjbuf);
-
-    stm_start_transaction(tl);
-    stm_become_inevitable(tl, "start_inevitable_transaction");
-
-    stm_rewind_jmp_leaveframe(tl, &rjbuf);
-}
-
 #ifdef STM_NO_AUTOMATIC_SETJMP
 void _test_run_abort(stm_thread_local_t *tl) __attribute__((noreturn));
 int stm_is_inevitable(void)
@@ -1280,7 +1272,7 @@
 }
 
 
-void stm_commit_transaction(void)
+void _stm_commit_transaction(void)
 {
     exec_local_finalizers();
 
@@ -1502,20 +1494,23 @@
 
 void _stm_become_inevitable(const char *msg)
 {
-    if (STM_PSEGMENT->transaction_state == TS_REGULAR) {
+    assert(STM_PSEGMENT->transaction_state == TS_REGULAR);
+    _stm_collectable_safe_point();
+
+    if (msg != MSG_INEV_DONT_SLEEP) {
         dprintf(("become_inevitable: %s\n", msg));
-        _stm_collectable_safe_point();
         timing_become_inevitable();
-
-        _validate_and_turn_inevitable();
-        STM_PSEGMENT->transaction_state = TS_INEVITABLE;
-
-        stm_rewind_jmp_forget(STM_SEGMENT->running_thread);
-        invoke_and_clear_user_callbacks(0);   /* for commit */
+        _validate_and_turn_inevitable(/*can_sleep=*/true);
     }
     else {
-        assert(STM_PSEGMENT->transaction_state == TS_INEVITABLE);
+        if (!_validate_and_turn_inevitable(/*can_sleep=*/false))
+            return;
+        timing_become_inevitable();
     }
+    STM_PSEGMENT->transaction_state = TS_INEVITABLE;
+
+    stm_rewind_jmp_forget(STM_SEGMENT->running_thread);
+    invoke_and_clear_user_callbacks(0);   /* for commit */
 }
 
 void stm_become_globally_unique_transaction(stm_thread_local_t *tl,
diff --git a/c8/stm/core.h b/c8/stm/core.h
--- a/c8/stm/core.h
+++ b/c8/stm/core.h
@@ -170,6 +170,8 @@
     TS_INEVITABLE,
 };
 
+#define MSG_INEV_DONT_SLEEP  ((const char *)1)
+
 #define in_transaction(tl)                                              \
     (get_segment((tl)->last_associated_segment_num)->running_thread == (tl))
 
diff --git a/c8/stm/detach.c b/c8/stm/detach.c
new file mode 100644
--- /dev/null
+++ b/c8/stm/detach.c
@@ -0,0 +1,76 @@
+#ifndef _STM_CORE_H_
+# error "must be compiled via stmgc.c"
+#endif
+
+
+#define DETACHED_NO_THREAD  ((stm_thread_local_t *)-1)
+
+
+stm_thread_local_t *volatile _stm_detached_inevitable_from_thread;
+
+
+static void setup_detach(void)
+{
+    _stm_detached_inevitable_from_thread = NULL;
+}
+
+
+void _stm_leave_noninevitable_transactional_zone(void)
+{
+    _stm_become_inevitable(MSG_INEV_DONT_SLEEP);
+
+    /* did it work? */
+    if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) {   /* yes */
+        _stm_detach_inevitable_transaction(STM_SEGMENT->running_thread);
+    }
+    else {   /* no */
+        _stm_commit_transaction();
+    }
+}
+
+void _stm_reattach_transaction(stm_thread_local_t *old, stm_thread_local_t *tl)
+{
+    if (old != NULL) {
+        /* We took over the inevitable transaction originally detached
+           from a different thread.  We have to fix the %gs register if
+           it is incorrect.  Careful, 'old' might be DETACHED_NO_THREAD.
+        */
+        int mysegnum = tl->last_associated_segment_num;
+
+        if (STM_SEGMENT->segment_num != mysegnum) {
+            set_gs_register(get_segment_base(mysegnum));
+            assert(STM_SEGMENT->segment_num == mysegnum);
+        }
+        assert(old == DETACHED_NO_THREAD || STM_SEGMENT->running_thread == old);
+        STM_SEGMENT->running_thread = tl;
+
+        stm_safe_point();
+    }
+    else {
+        /* there was no detached inevitable transaction */
+        _stm_start_transaction(tl);
+    }
+}
+
+static void fully_detach_thread(void)
+{
+    /* If there is a detached inevitable transaction, then make sure
+       that it is "fully" detached.  The point is to make sure that
+       the fast path of stm_enter_transactional_zone() will fail, and
+       we'll call _stm_reattach_transaction(), which will in turn call
+       stm_safe_point().  So a "fully detached" transaction will enter
+       a safe point as soon as it is reattached.
+
+       XXX THINK about concurrent threads here!
+    */
+    assert(_has_mutex());
+
+ restart:
+    stm_thread_local_t *old = stm_detached_inevitable_from_thread;
+    if (old == NULL || old == DETACHED_NO_THREAD)
+        return;
+
+    if (!__sync_bool_compare_and_swap(&stm_detached_inevitable_from_thread,
+                                      old, DETACHED_NO_THREAD))
+        goto restart;
+}
diff --git a/c8/stm/detach.h b/c8/stm/detach.h
new file mode 100644
--- /dev/null
+++ b/c8/stm/detach.h
@@ -0,0 +1,3 @@
+
+static void setup_detach(void);
+static void fully_detach_thread(void);
diff --git a/c8/stm/finalizer.c b/c8/stm/finalizer.c
--- a/c8/stm/finalizer.c
+++ b/c8/stm/finalizer.c
@@ -494,11 +494,11 @@
 
     rewind_jmp_buf rjbuf;
     stm_rewind_jmp_enterframe(tl, &rjbuf);
-    stm_start_transaction(tl);
+    _stm_start_transaction(tl);
 
     _execute_finalizers(&g_finalizers);
 
-    stm_commit_transaction();
+    _stm_commit_transaction();
     stm_rewind_jmp_leaveframe(tl, &rjbuf);
 
     __sync_lock_release(&lock);
diff --git a/c8/stm/forksupport.c b/c8/stm/forksupport.c
--- a/c8/stm/forksupport.c
+++ b/c8/stm/forksupport.c
@@ -40,7 +40,7 @@
 
     bool was_in_transaction = _stm_in_transaction(this_tl);
     if (!was_in_transaction)
-        stm_start_transaction(this_tl);
+        _stm_start_transaction(this_tl);
     assert(in_transaction(this_tl));
 
     stm_become_inevitable(this_tl, "fork");
@@ -73,7 +73,7 @@
     s_mutex_unlock();
 
     if (!was_in_transaction) {
-        stm_commit_transaction();
+        _stm_commit_transaction();
     }
 
     dprintf(("forksupport_parent: continuing to run\n"));
@@ -159,7 +159,7 @@
     assert(STM_SEGMENT->segment_num == segnum);
 
     if (!fork_was_in_transaction) {
-        stm_commit_transaction();
+        _stm_commit_transaction();
     }
 
     /* Done */
diff --git a/c8/stm/setup.c b/c8/stm/setup.c
--- a/c8/stm/setup.c
+++ b/c8/stm/setup.c
@@ -134,6 +134,7 @@
     setup_pages();
     setup_forksupport();
     setup_finalizer();
+    setup_detach();
 
     set_gs_register(get_segment_base(0));
 }
diff --git a/c8/stm/sync.c b/c8/stm/sync.c
--- a/c8/stm/sync.c
+++ b/c8/stm/sync.c
@@ -103,6 +103,7 @@
 /************************************************************/
 
 
+#if 0
 void stm_wait_for_current_inevitable_transaction(void)
 {
  restart:
@@ -125,7 +126,7 @@
     }
     s_mutex_unlock();
 }
-
+#endif
 
 
 static bool acquire_thread_segment(stm_thread_local_t *tl)
@@ -263,6 +264,7 @@
     }
     assert(!pause_signalled);
     pause_signalled = true;
+    fully_detach_thread();
 }
 
 static inline long count_other_threads_sp_running(void)
diff --git a/c8/stmgc.c b/c8/stmgc.c
--- a/c8/stmgc.c
+++ b/c8/stmgc.c
@@ -18,6 +18,7 @@
 #include "stm/rewind_setjmp.h"
 #include "stm/finalizer.h"
 #include "stm/locks.h"
+#include "stm/detach.h"
 
 #include "stm/misc.c"
 #include "stm/list.c"
@@ -41,3 +42,4 @@
 #include "stm/rewind_setjmp.c"
 #include "stm/finalizer.c"
 #include "stm/hashtable.c"
+#include "stm/detach.c"
diff --git a/c8/stmgc.h b/c8/stmgc.h
--- a/c8/stmgc.h
+++ b/c8/stmgc.h
@@ -13,6 +13,7 @@
 #include <limits.h>
 #include <unistd.h>
 
+#include "stm/atomic.h"
 #include "stm/rewind_setjmp.h"
 
 #if LONG_MAX == 2147483647
@@ -82,6 +83,16 @@
 void _stm_write_slowpath_card(object_t *, uintptr_t);
 object_t *_stm_allocate_slowpath(ssize_t);
 object_t *_stm_allocate_external(ssize_t);
+
+extern stm_thread_local_t *volatile _stm_detached_inevitable_from_thread;
+long _stm_start_transaction(stm_thread_local_t *tl);
+void _stm_commit_transaction(void);
+void _stm_leave_noninevitable_transactional_zone(void);
+#define _stm_detach_inevitable_transaction(tl)  do {    \
+    write_fence();                                      \
+    _stm_detached_inevitable_from_thread = (tl);        \
+} while (0)
+void _stm_reattach_transaction(stm_thread_local_t *old, stm_thread_local_t *tl);
 void _stm_become_inevitable(const char*);
 void _stm_collectable_safe_point(void);
 
@@ -379,23 +390,6 @@
     rewind_jmp_enum_shadowstack(&(tl)->rjthread, callback)
 
 
-/* Starting and ending transactions.  stm_read(), stm_write() and
-   stm_allocate() should only be called from within a transaction.
-   The stm_start_transaction() call returns the number of times it
-   returned, starting at 0.  If it is > 0, then the transaction was
-   aborted and restarted this number of times. */
-long stm_start_transaction(stm_thread_local_t *tl);
-void stm_start_inevitable_transaction(stm_thread_local_t *tl);
-void stm_commit_transaction(void);
-
-/* Temporary fix?  Call this outside a transaction.  If there is an
-   inevitable transaction running somewhere else, wait until it finishes. */
-void stm_wait_for_current_inevitable_transaction(void);
-
-/* Abort the currently running transaction.  This function never
-   returns: it jumps back to the stm_start_transaction(). */
-void stm_abort_transaction(void) __attribute__((noreturn));
-
 #ifdef STM_NO_AUTOMATIC_SETJMP
 int stm_is_inevitable(void);
 #else
@@ -404,6 +398,54 @@
 }
 #endif
 
+
+/* Entering and leaving a "transactional code zone": a (typically very
+   large) section in the code where we are running a transaction.
+   This is the STM equivalent to "acquire the GIL" and "release the
+   GIL", respectively.  stm_read(), stm_write(), stm_allocate(), and
+   other functions should only be called from within a transaction.
+
+   Note that transactions, in the STM sense, cover _at least_ one
+   transactional code zone.  They may be longer; for example, if one
+   thread does a lot of stm_enter_transactional_zone() +
+   stm_become_inevitable() + stm_leave_transactional_zone(), as is
+   typical in a thread that does a lot of C function calls, then we
+   get only a few bigger inevitable transactions that cover the many
+   short transactional zones.  This is done by having
+   stm_leave_transactional_zone() turn the current transaction
+   inevitable and detach it from the running thread (if there is no
+   other inevitable transaction running so far).  Then
+   stm_enter_transactional_zone() will try to reattach to it.  This is
+   far more efficient than constantly starting and committing
+   transactions.
+*/
+inline void stm_enter_transactional_zone(stm_thread_local_t *tl) {
+    stm_thread_local_t *old = __sync_lock_test_and_set(    /* XCHG */
+        &_stm_detached_inevitable_from_thread, NULL);
+    if (old != (tl))
+        _stm_reattach_transaction(old, tl);
+}
+inline void stm_leave_transactional_zone(stm_thread_local_t *tl) {
+    assert(STM_SEGMENT->running_thread == tl);
+    if (stm_is_inevitable())
+        _stm_detach_inevitable_transaction(tl);
+    else
+        _stm_leave_noninevitable_transactional_zone();
+}
+
+/* stm_break_transaction() is in theory equivalent to
+   stm_leave_transactional_zone() immediately followed by
+   stm_enter_transactional_zone(); however, it is supposed to be
+   called in CPU-heavy threads that had a transaction run for a while,
+   and so it *always* forces a commit and starts the next transaction.
+   The new transaction is never inevitable. */
+void stm_break_transaction(stm_thread_local_t *tl);
+
+/* Abort the currently running transaction.  This function never
+   returns: it jumps back to the start of the transaction (which must
+   not be inevitable). */
+void stm_abort_transaction(void) __attribute__((noreturn));
+
 /* Turn the current transaction inevitable.
    stm_become_inevitable() itself may still abort the transaction instead
    of returning. */
@@ -412,6 +454,8 @@
     assert(STM_SEGMENT->running_thread == tl);
     if (!stm_is_inevitable())
         _stm_become_inevitable(msg);
+    /* now, we're running the inevitable transaction, so: */
+    assert(_stm_detached_inevitable_from_thread == NULL);
 }
 
 /* Forces a safe-point if needed.  Normally not needed: this is
diff --git a/c8/test/support.py b/c8/test/support.py
--- a/c8/test/support.py
+++ b/c8/test/support.py
@@ -275,7 +275,7 @@
 }
 
 bool _check_commit_transaction(void) {
-    CHECKED(stm_commit_transaction());
+    CHECKED(_stm_commit_transaction());
 }
 
 bool _check_stm_collect(long level) {
@@ -285,7 +285,7 @@
 long _check_start_transaction(stm_thread_local_t *tl) {
    void **jmpbuf = tl->rjthread.jmpbuf;                         \
     if (__builtin_setjmp(jmpbuf) == 0) { /* returned directly */\
-        stm_start_transaction(tl);                              \
+        stm_enter_transactional_zone(tl);                       \
         clear_jmpbuf(tl);                                       \
         return 0;                                               \
     }                                                           \


More information about the pypy-commit mailing list