[pypy-commit] pypy stmgc-c8-gil-like: import stmgc/7a87c63be4d2. adapt the JIT

arigo noreply at buildbot.pypy.org
Sun Jun 14 18:28:08 CEST 2015


Author: Armin Rigo <arigo at tunes.org>
Branch: stmgc-c8-gil-like
Changeset: r78094:c6856f2622dd
Date: 2015-06-14 18:21 +0200
http://bitbucket.org/pypy/pypy/changeset/c6856f2622dd/

Log:	import stmgc/7a87c63be4d2. adapt the JIT

diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py
--- a/rpython/jit/backend/x86/assembler.py
+++ b/rpython/jit/backend/x86/assembler.py
@@ -146,8 +146,8 @@
         mc.SUB_ri(esp.value, 3 * WORD)     # 3 instead of 2 to align the stack
         mc.MOV_sr(0, eax.value)     # not edx, we're not running 32-bit
         mc.MOVSD_sx(1, xmm0.value)
-        # load the value of tl (== tl->self) into edi as argument
-        mc.MOV(edi, self.heap_stm_thread_local_self())
+        # load the value of 'tl->self_or_0_if_atomic' into edi as argument
+        mc.MOV(edi, self.heap_stm_thread_local_self_or_0_if_atomic())
         mc.CALL(imm(rstm.adr_stm_reattach_transaction))
         # pop
         mc.MOVSD_xs(xmm0.value, 1)
@@ -930,10 +930,10 @@
         """STM: AddressLoc for '&stm_thread_local.rjthread.moved_off_base'."""
         return self.heap_tl(rstm.adr_rjthread_moved_off_base)
 
-    def heap_stm_thread_local_self(self):
+    def heap_stm_thread_local_self_or_0_if_atomic(self):
         """STM: AddressLoc for '&stm_thread_local.self', i.e. such that
         reading it returns the (absolute) address of 'stm_thread_local'."""
-        return self.heap_tl(rstm.adr_stm_thread_local_self)
+        return self.heap_tl(rstm.adr_stm_thread_local_self_or_0_if_atomic)
 
     def heap_stm_detached_inevitable_from_thread(self):
         """STM: AddressLoc for '&stm_detached_inevitable_from_thread'."""
diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py
--- a/rpython/jit/backend/x86/callbuilder.py
+++ b/rpython/jit/backend/x86/callbuilder.py
@@ -709,8 +709,8 @@
         # Fast path: inline _stm_detach_inevitable_transaction()
         # <- Here comes the write_fence(), which is not needed in x86 assembler
         # assert(_stm_detached_inevitable_from_thread == 0): dropped
-        # _stm_detached_inevitable_from_thread = tl (== tl->self):
-        mc.MOV(eax, self.asm.heap_stm_thread_local_self())
+        # _stm_detached_inevitable_from_thread = tl->self_or_0_if_atomic:
+        mc.MOV(eax, self.asm.heap_stm_thread_local_self_or_0_if_atomic())
         mc.MOV(self.asm.heap_stm_detached_inevitable_from_thread(), eax)
         #
         offset = mc.get_relative_pos() - jmp_location
@@ -727,8 +727,8 @@
         mc = self.mc
         mc.MOV(edi, eax)
         #
-        # compare_and_swap(&_stm_detached_inevitable_from_thread, tl, 0)
-        mc.MOV(eax, self.asm.heap_stm_thread_local_self())
+        # compare_and_swap(&_stm_detached_inevitable_from_thread, self_or_0, 0)
+        mc.MOV(eax, self.asm.heap_stm_thread_local_self_or_0_if_atomic())
         mc.XOR(esi, esi)
         adr = self.asm.heap_stm_detached_inevitable_from_thread()
         m_address = mc._addr_as_reg_offset(adr.value_j())
diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py
--- a/rpython/rlib/rstm.py
+++ b/rpython/rlib/rstm.py
@@ -46,8 +46,8 @@
     CFlexSymbolic('((long)&pypy__rewind_jmp_copy_stack_slice)'))
 adr_stm_detached_inevitable_from_thread = (
     CFlexSymbolic('((long)&_stm_detached_inevitable_from_thread)'))
-adr_stm_thread_local_self = (
-    CFlexSymbolic('((long)&stm_thread_local.self)'))
+adr_stm_thread_local_self_or_0_if_atomic = (
+    CFlexSymbolic('((long)&stm_thread_local.self_or_0_if_atomic)'))
 adr_stm_leave_noninevitable_transactional_zone = (
     CFlexSymbolic('((long)&_stm_leave_noninevitable_transactional_zone)'))
 adr_stm_reattach_transaction = (
diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision
--- a/rpython/translator/stm/src_stm/revision
+++ b/rpython/translator/stm/src_stm/revision
@@ -1,1 +1,1 @@
-9c72d7f52305
+7a87c63be4d2
diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h
--- a/rpython/translator/stm/src_stm/stm/core.h
+++ b/rpython/translator/stm/src_stm/stm/core.h
@@ -16,7 +16,7 @@
 #endif
 
 
-#define NB_PAGES            (7500*256)    // 7500MB
+#define NB_PAGES            (2500*256)    // 2500MB
 #define NB_SEGMENTS         (STM_NB_SEGMENTS+1) /* +1 for sharing seg 0 */
 #define NB_SEGMENTS_MAX     240    /* don't increase NB_SEGMENTS past this */
 #define NB_NURSERY_PAGES    (STM_GC_NURSERY/4)
diff --git a/rpython/translator/stm/src_stm/stm/detach.c b/rpython/translator/stm/src_stm/stm/detach.c
--- a/rpython/translator/stm/src_stm/stm/detach.c
+++ b/rpython/translator/stm/src_stm/stm/detach.c
@@ -82,10 +82,23 @@
     _core_commit_transaction(/*external=*/ true);
 }
 
-void _stm_reattach_transaction(stm_thread_local_t *tl)
+void _stm_reattach_transaction(intptr_t self)
 {
     intptr_t old;
     int saved_errno = errno;
+    stm_thread_local_t *tl = (stm_thread_local_t *)self;
+
+    /* if 'self_or_0_if_atomic == 0', it means that we are trying to
+       reattach in a thread that is currently running a transaction
+       that is atomic.  That should only be possible if we're
+       inevitable too.  And in that case,
+       '_stm_detached_inevitable_from_thread' must always be 0, and
+       the previous call to compare_and_swap(0, 0) should have worked
+       (and done nothing), so we should not end here.
+    */
+    if (self == 0)
+        stm_fatalerror("atomic inconsistency");
+
  restart:
     old = _stm_detached_inevitable_from_thread;
     if (old != 0) {
diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h
--- a/rpython/translator/stm/src_stm/stmgc.h
+++ b/rpython/translator/stm/src_stm/stmgc.h
@@ -93,7 +93,7 @@
     assert(_stm_detached_inevitable_from_thread == 0);                  \
     _stm_detached_inevitable_from_thread = tl->self_or_0_if_atomic;     \
 } while (0)
-void _stm_reattach_transaction(stm_thread_local_t *tl);
+void _stm_reattach_transaction(intptr_t);
 void _stm_become_inevitable(const char*);
 void _stm_collectable_safe_point(void);
 
@@ -427,14 +427,15 @@
 #include <stdio.h>
 #endif
 static inline void stm_enter_transactional_zone(stm_thread_local_t *tl) {
+    intptr_t self = tl->self_or_0_if_atomic;
     if (__sync_bool_compare_and_swap(&_stm_detached_inevitable_from_thread,
-                                     tl->self_or_0_if_atomic, 0)) {
+                                     self, 0)) {
 #ifdef STM_DEBUGPRINT
         fprintf(stderr, "stm_enter_transactional_zone fast path\n");
 #endif
     }
     else {
-        _stm_reattach_transaction(tl);
+        _stm_reattach_transaction(self);
         /* _stm_detached_inevitable_from_thread should be 0 here, but
            it can already have been changed from a parallel thread
            (assuming we're not inevitable ourselves) */


More information about the pypy-commit mailing list