[pypy-commit] stmgc default: hg merge c8-locking

arigo noreply at buildbot.pypy.org
Mon Jun 1 17:25:34 CEST 2015


Author: Armin Rigo <arigo at tunes.org>
Branch: 
Changeset: r1785:5cea4f9c70af
Date: 2015-06-01 17:26 +0200
http://bitbucket.org/pypy/stmgc/changeset/5cea4f9c70af/

Log:	hg merge c8-locking

diff --git a/c8/LOCKS b/c8/LOCKS
new file mode 100644
--- /dev/null
+++ b/c8/LOCKS
@@ -0,0 +1,93 @@
+
+
+main lock-free operation
+========================
+
+The main lock-free operation is at commit time: the compare-and-swap
+that attaches a new log entry after 'last_commit_log_entry'.
+
+
+
+modification_lock
+=================
+
+one per segment.
+
+acquired on segment N when we want to read or write the segment N's
+copy of 'modified_old_objects', the backup copies, etc.
+
+an important user is _stm_validate(): it locks the current segment,
+and all the other segments out of which it is going to read data
+
+could be improved, because _stm_validate() writes into the current
+segment but only reads the other ones.  So far it mostly serializes
+calls to _stm_validate(): if we have two of them starting at roughly
+the same time, they need both to acquire the modification_lock of at
+least the segment that did the most recent commit --- even though it
+could proceed in parallel if they could both realize that they only
+want to read from that same segment.
+
+same, handle_segfault_in_page() acquires two modification_locks: the
+current segment (which needs to be written to), and the
+'copy_from_segnum' (which only needs to be read from).
+
+the current segment modification_lock is also acquired briefly
+whenever we change our segment's 'modified_old_objects'.
+
+_validate_and_attach() needs to have its own segment's
+modification_lock *around* the compare-and-swap, so that
+_stm_validate() sees either the commit not done and the backup copies
+still in modified_old_objects, or the commit done and no backup copies
+any more.
+
+
+--- UPDATE: modification_lock is now done with pthread_rwlock_xxx().
+
+
+
+privatization_lock
+==================
+
+one per segment.  Works like a single "reader-writer" lock: each
+segment acquires either only its copy ("reader") or all of them
+("writer").
+
+"Reader" status is needed to call get_page_status_in().
+"Writer" status is needed to call set_page_status_in/page_mark_(in)accessible.
+
+Essential "writers":
+- handle_segfault_in_page(), but it only writes the status for the current seg
+
+Essential "readers":
+- _stm_validate()
+- push_large_overflow_objects_to_other_segments()
+- nursery.c calling synchronize_object_enqueue()
+
+
+
+mutex and conditions
+====================
+
+There is also one global mutex and a few condition codes.  It's
+unclear if these are still the best solution.
+
+The mutex is acquired in stm_start_transaction() and in
+stm_commit_transaction().  The main purpose is to wait for or signal
+the C_SEGMENT_FREE condition code.
+
+The C_AT_SAFE_POINT and C_REQUEST_REMOVED condition codes are used by
+synchronize_all_threads().  That's used only in rare cases, for
+example because we want to start a major collection.
+
+The mutex also needs to be acquired for rewind_longjmp's setjmp() and
+longjmp() equivalent.
+
+
+
+usleep loop
+===========
+
+core.c: wait_for_other_inevitable()
+sync.c: stm_wait_for_current_inevitable_transaction()
+
+Must be fixed!
diff --git a/c8/stm/core.c b/c8/stm/core.c
--- a/c8/stm/core.c
+++ b/c8/stm/core.c
@@ -50,8 +50,8 @@
     char *src_segment_base = (from_segnum >= 0 ? get_segment_base(from_segnum)
                                                : NULL);
 
-    assert(IMPLY(from_segnum >= 0, get_priv_segment(from_segnum)->modification_lock));
-    assert(STM_PSEGMENT->modification_lock);
+    assert(IMPLY(from_segnum >= 0, modification_lock_check_rdlock(from_segnum)));
+    assert(modification_lock_check_wrlock(STM_SEGMENT->segment_num));
 
     long my_segnum = STM_SEGMENT->segment_num;
     DEBUG_EXPECT_SEGFAULT(false);
@@ -131,7 +131,7 @@
                            struct stm_commit_log_entry_s *from,
                            struct stm_commit_log_entry_s *to)
 {
-    assert(STM_PSEGMENT->modification_lock);
+    assert(modification_lock_check_wrlock(STM_SEGMENT->segment_num));
     assert(from->rev_num >= to->rev_num);
     /* walk BACKWARDS the commit log and update the page 'pagenum',
        initially at revision 'from', until we reach the revision 'to'. */
@@ -199,8 +199,8 @@
 
     /* before copying anything, acquire modification locks from our and
        the other segment */
-    uint64_t to_lock = (1UL << copy_from_segnum)| (1UL << my_segnum);
-    acquire_modification_lock_set(to_lock);
+    uint64_t to_lock = (1UL << copy_from_segnum);
+    acquire_modification_lock_set(to_lock, my_segnum);
     pagecopy(get_virtual_page(my_segnum, pagenum),
              get_virtual_page(copy_from_segnum, pagenum));
 
@@ -223,7 +223,7 @@
     if (src_version->rev_num > target_version->rev_num)
         go_to_the_past(pagenum, src_version, target_version);
 
-    release_modification_lock_set(to_lock);
+    release_modification_lock_set(to_lock, my_segnum);
     release_all_privatization_locks();
 }
 
@@ -357,7 +357,7 @@
         }
 
         /* Find the set of segments we need to copy from and lock them: */
-        uint64_t segments_to_lock = 1UL << my_segnum;
+        uint64_t segments_to_lock = 0;
         cl = first_cl;
         while ((next_cl = cl->next) != NULL) {
             if (next_cl == INEV_RUNNING) {
@@ -375,8 +375,8 @@
 
         /* HERE */
 
-        acquire_privatization_lock(STM_SEGMENT->segment_num);
-        acquire_modification_lock_set(segments_to_lock);
+        acquire_privatization_lock(my_segnum);
+        acquire_modification_lock_set(segments_to_lock, my_segnum);
 
 
         /* import objects from first_cl to last_cl: */
@@ -466,8 +466,8 @@
         }
 
         /* done with modifications */
-        release_modification_lock_set(segments_to_lock);
-        release_privatization_lock(STM_SEGMENT->segment_num);
+        release_modification_lock_set(segments_to_lock, my_segnum);
+        release_privatization_lock(my_segnum);
     }
 
     return !needs_abort;
@@ -545,7 +545,7 @@
                time" as the attach to commit log. Otherwise, another thread may
                see the new CL entry, import it, look for backup copies in this
                segment and find the old backup copies! */
-            acquire_modification_lock(STM_SEGMENT->segment_num);
+            acquire_modification_lock_wr(STM_SEGMENT->segment_num);
         }
 
         /* try to attach to commit log: */
@@ -559,7 +559,7 @@
         }
 
         if (is_commit) {
-            release_modification_lock(STM_SEGMENT->segment_num);
+            release_modification_lock_wr(STM_SEGMENT->segment_num);
             /* XXX: unfortunately, if we failed to attach our CL entry,
                we have to re-add the WB_EXECUTED flags before we try to
                validate again because of said condition (s.a) */
@@ -596,7 +596,7 @@
 
         list_clear(STM_PSEGMENT->modified_old_objects);
         STM_PSEGMENT->last_commit_log_entry = new;
-        release_modification_lock(STM_SEGMENT->segment_num);
+        release_modification_lock_wr(STM_SEGMENT->segment_num);
     }
 }
 
@@ -692,7 +692,7 @@
         increment_total_allocated(slice_sz);
         memcpy(bk_slice, realobj + slice_off, slice_sz);
 
-        acquire_modification_lock(STM_SEGMENT->segment_num);
+        acquire_modification_lock_wr(STM_SEGMENT->segment_num);
         /* !! follows layout of "struct stm_undo_s" !! */
         STM_PSEGMENT->modified_old_objects = list_append3(
             STM_PSEGMENT->modified_old_objects,
@@ -700,7 +700,7 @@
             (uintptr_t)bk_slice,  /* bk_addr */
             NEW_SLICE(slice_off, slice_sz));
         dprintf(("> append slice %p, off=%lu, sz=%lu\n", bk_slice, slice_off, slice_sz));
-        release_modification_lock(STM_SEGMENT->segment_num);
+        release_modification_lock_wr(STM_SEGMENT->segment_num);
 
         slice_off += slice_sz;
     }
@@ -896,6 +896,8 @@
 
 static void touch_all_pages_of_obj(object_t *obj, size_t obj_size)
 {
+    /* XXX should it be simpler, just really trying to read a dummy
+       byte in each page? */
     int my_segnum = STM_SEGMENT->segment_num;
     uintptr_t end_page, first_page = ((uintptr_t)obj) / 4096UL;
 
@@ -1345,7 +1347,7 @@
 #pragma push_macro("STM_SEGMENT")
 #undef STM_PSEGMENT
 #undef STM_SEGMENT
-    assert(get_priv_segment(segment_num)->modification_lock);
+    assert(modification_lock_check_wrlock(segment_num));
 
     struct stm_priv_segment_info_s *pseg = get_priv_segment(segment_num);
     struct list_s *list = pseg->modified_old_objects;
@@ -1407,9 +1409,9 @@
             _reset_object_cards(pseg, item, CARD_CLEAR, false, false);
         });
 
-    acquire_modification_lock(segment_num);
+    acquire_modification_lock_wr(segment_num);
     reset_modified_from_backup_copies(segment_num);
-    release_modification_lock(segment_num);
+    release_modification_lock_wr(segment_num);
     _verify_cards_cleared_in_all_lists(pseg);
 
     stm_thread_local_t *tl = pseg->pub.running_thread;
diff --git a/c8/stm/core.h b/c8/stm/core.h
--- a/c8/stm/core.h
+++ b/c8/stm/core.h
@@ -74,11 +74,6 @@
 struct stm_priv_segment_info_s {
     struct stm_segment_info_s pub;
 
-    /* lock protecting from concurrent modification of
-       'modified_old_objects', page-revision-changes, ...
-       Always acquired in global order of segments to avoid deadlocks. */
-    uint8_t modification_lock;
-
     /* All the old objects (older than the current transaction) that
        the current transaction attempts to modify.  This is used to
        track the STM status: these are old objects that where written
@@ -359,53 +354,3 @@
         release_privatization_lock(l);
     }
 }
-
-
-
-/* Modification locks are used to prevent copying from a segment
-   where either the revision of some pages is inconsistent with the
-   rest, or the modified_old_objects list is being modified (bk_copys).
-
-   Lock ordering: acquire privatization lock around acquiring a set
-   of modification locks!
-*/
-
-static inline void acquire_modification_lock(int segnum)
-{
-    spinlock_acquire(get_priv_segment(segnum)->modification_lock);
-}
-
-static inline void release_modification_lock(int segnum)
-{
-    spinlock_release(get_priv_segment(segnum)->modification_lock);
-}
-
-static inline void acquire_modification_lock_set(uint64_t seg_set)
-{
-    assert(NB_SEGMENTS <= 64);
-    OPT_ASSERT(seg_set < (1 << NB_SEGMENTS));
-
-    /* acquire locks in global order */
-    int i;
-    for (i = 0; i < NB_SEGMENTS; i++) {
-        if ((seg_set & (1 << i)) == 0)
-            continue;
-
-        spinlock_acquire(get_priv_segment(i)->modification_lock);
-    }
-}
-
-static inline void release_modification_lock_set(uint64_t seg_set)
-{
-    assert(NB_SEGMENTS <= 64);
-    OPT_ASSERT(seg_set < (1 << NB_SEGMENTS));
-
-    int i;
-    for (i = 0; i < NB_SEGMENTS; i++) {
-        if ((seg_set & (1 << i)) == 0)
-            continue;
-
-        assert(get_priv_segment(i)->modification_lock);
-        spinlock_release(get_priv_segment(i)->modification_lock);
-    }
-}
diff --git a/c8/stm/forksupport.c b/c8/stm/forksupport.c
--- a/c8/stm/forksupport.c
+++ b/c8/stm/forksupport.c
@@ -120,6 +120,9 @@
        just release these locks early */
     s_mutex_unlock();
 
+    /* Re-init these locks; might be needed after a fork() */
+    setup_modification_locks();
+
 
     /* Unregister all other stm_thread_local_t, mostly as a way to free
        the memory used by the shadowstacks
diff --git a/c8/stm/locks.h b/c8/stm/locks.h
new file mode 100644
--- /dev/null
+++ b/c8/stm/locks.h
@@ -0,0 +1,124 @@
+
+/* Modification locks protect from concurrent modification of
+   'modified_old_objects', page-revision-changes, ...
+
+   Modification locks are used to prevent copying from a segment
+   where either the revision of some pages is inconsistent with the
+   rest, or the modified_old_objects list is being modified (bk_copys).
+
+   Lock ordering: acquire privatization lock around acquiring a set
+   of modification locks!
+*/
+
+typedef struct {
+    pthread_rwlock_t lock;
+#ifndef NDEBUG
+    volatile bool write_locked;
+#endif
+} modification_lock_t __attribute__((aligned(64)));
+
+static modification_lock_t _modlocks[NB_SEGMENTS - 1];
+
+
+static void setup_modification_locks(void)
+{
+    int i;
+    for (i = 1; i < NB_SEGMENTS; i++) {
+        if (pthread_rwlock_init(&_modlocks[i - 1].lock, NULL) != 0)
+            stm_fatalerror("pthread_rwlock_init: %m");
+    }
+}
+
+static void teardown_modification_locks(void)
+{
+    int i;
+    for (i = 1; i < NB_SEGMENTS; i++)
+        pthread_rwlock_destroy(&_modlocks[i - 1].lock);
+    memset(_modlocks, 0, sizeof(_modlocks));
+}
+
+
+static inline void acquire_modification_lock_wr(int segnum)
+{
+    if (UNLIKELY(pthread_rwlock_wrlock(&_modlocks[segnum - 1].lock) != 0))
+        stm_fatalerror("pthread_rwlock_wrlock: %m");
+#ifndef NDEBUG
+    assert(!_modlocks[segnum - 1].write_locked);
+    _modlocks[segnum - 1].write_locked = true;
+#endif
+}
+
+static inline void release_modification_lock_wr(int segnum)
+{
+#ifndef NDEBUG
+    assert(_modlocks[segnum - 1].write_locked);
+    _modlocks[segnum - 1].write_locked = false;
+#endif
+    if (UNLIKELY(pthread_rwlock_unlock(&_modlocks[segnum - 1].lock) != 0))
+        stm_fatalerror("pthread_rwlock_unlock(wr): %m");
+}
+
+static void acquire_modification_lock_set(uint64_t readset, int write)
+{
+    /* acquire the modification lock in 'read' mode for all segments
+       in 'readset', plus the modification lock in 'write' mode for
+       the segment number 'write'.
+    */
+    assert(NB_SEGMENTS <= 64);
+    OPT_ASSERT(readset < (1 << NB_SEGMENTS));
+    assert((readset & 1) == 0);       /* segment numbers normally start at 1 */
+    assert(0 <= write && write < NB_SEGMENTS);     /* use 0 to mean "nobody" */
+
+    /* acquire locks in global order */
+    readset |= (1UL << write);
+    int i;
+    for (i = 1; i < NB_SEGMENTS; i++) {
+        if ((readset & (1UL << i)) == 0)
+            continue;
+        if (i == write) {
+            acquire_modification_lock_wr(write);
+        }
+        else {
+            if (UNLIKELY(pthread_rwlock_rdlock(&_modlocks[i - 1].lock) != 0))
+                stm_fatalerror("pthread_rwlock_rdlock: %m");
+        }
+    }
+}
+
+static void release_modification_lock_set(uint64_t readset, int write)
+{
+    assert(NB_SEGMENTS <= 64);
+    OPT_ASSERT(readset < (1 << NB_SEGMENTS));
+
+    /* release lock order does not matter; prefer early release of
+       the write lock */
+    if (write > 0) {
+        release_modification_lock_wr(write);
+        readset &= ~(1UL << write);
+    }
+    int i;
+    for (i = 1; i < NB_SEGMENTS; i++) {
+        if ((readset & (1UL << i)) == 0)
+            continue;
+        if (UNLIKELY(pthread_rwlock_unlock(&_modlocks[i - 1].lock) != 0))
+            stm_fatalerror("pthread_rwlock_unlock(rd): %m");
+    }
+}
+
+#ifndef NDEBUG
+static bool modification_lock_check_rdlock(int segnum)
+{
+    assert(segnum > 0);
+    if (_modlocks[segnum - 1].write_locked)
+        return false;
+    if (pthread_rwlock_trywrlock(&_modlocks[segnum - 1].lock) == 0) {
+        pthread_rwlock_unlock(&_modlocks[segnum - 1].lock);
+        return false;
+    }
+    return true;
+}
+static bool modification_lock_check_wrlock(int segnum)
+{
+    return segnum == 0 || _modlocks[segnum - 1].write_locked;
+}
+#endif
diff --git a/c8/stm/setup.c b/c8/stm/setup.c
--- a/c8/stm/setup.c
+++ b/c8/stm/setup.c
@@ -127,6 +127,7 @@
        private range of addresses.
     */
 
+    setup_modification_locks();
     setup_sync();
     setup_nursery();
     setup_gcpage();
@@ -174,6 +175,7 @@
     teardown_gcpage();
     teardown_smallmalloc();
     teardown_pages();
+    teardown_modification_locks();
 }
 
 static void _shadowstack_trap_page(char *start, int prot)
diff --git a/c8/stmgc.c b/c8/stmgc.c
--- a/c8/stmgc.c
+++ b/c8/stmgc.c
@@ -17,6 +17,7 @@
 #include "stm/marker.h"
 #include "stm/rewind_setjmp.h"
 #include "stm/finalizer.h"
+#include "stm/locks.h"
 
 #include "stm/misc.c"
 #include "stm/list.c"


More information about the pypy-commit mailing list