[pypy-commit] stmgc c7-more-segments: in-progress

arigo noreply at buildbot.pypy.org
Sun Mar 16 09:29:56 CET 2014


Author: Armin Rigo <arigo at tunes.org>
Branch: c7-more-segments
Changeset: r1027:67d4e6e71904
Date: 2014-03-16 09:29 +0100
http://bitbucket.org/pypy/stmgc/changeset/67d4e6e71904/

Log:	in-progress

diff --git a/c7/stm/core.c b/c7/stm/core.c
--- a/c7/stm/core.c
+++ b/c7/stm/core.c
@@ -29,6 +29,8 @@
        safepoints that may be issued in write_write_contention_management(). */
     stm_read(obj);
 
+    /* XXX XXX XXX make the logic of write-locking objects optional! */
+
     /* claim the write-lock for this object.  In case we're running the
        same transaction since a long while, the object can be already in
        'modified_old_objects' (but, because it had GCFLAG_WRITE_BARRIER,
@@ -59,12 +61,12 @@
            the common case. Otherwise, we need to compute it based on
            its location and size. */
         if ((obj->stm_flags & GCFLAG_SMALL_UNIFORM) != 0) {
-            pages_privatize(first_page, 1);
+            page_privatize(first_page);
         }
         else {
             char *realobj;
             size_t obj_size;
-            uintptr_t end_page;
+            uintptr_t i, end_page;
 
             /* get the size of the object */
             realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj);
@@ -73,7 +75,9 @@
             /* that's the page *following* the last page with the object */
             end_page = (((uintptr_t)obj) + obj_size + 4095) / 4096UL;
 
-            pages_privatize(first_page, end_page - first_page);
+            for (i = first_page; i < end_page; i++) {
+                page_privatize(i);
+            }
         }
     }
     else if (write_locks[lock_idx] == lock_num) {
@@ -108,7 +112,7 @@
     /* for sanity, check that all other segment copies of this object
        still have the flag */
     long i;
-    for (i = 0; i < NB_SEGMENTS; i++) {
+    for (i = 1; i <= NB_SEGMENTS; i++) {
         if (i != STM_SEGMENT->segment_num)
             assert(((struct object_s *)REAL_ADDRESS(get_segment_base(i), obj))
                    ->stm_flags & GCFLAG_WRITE_BARRIER);
@@ -193,43 +197,51 @@
 
 /************************************************************/
 
-#if NB_SEGMENTS != 2
-# error "The logic in the functions below only works with two segments"
-#endif
 
 static bool detect_write_read_conflicts(void)
 {
-    long remote_num = 1 - STM_SEGMENT->segment_num;
-    char *remote_base = get_segment_base(remote_num);
-    uint8_t remote_version = get_segment(remote_num)->transaction_read_version;
+    /* Detect conflicts of the form: we want to commit a write to an object,
+       but the same object was also read in a different thread.
+    */
+    long i;
+    for (i = 1; i <= NB_SEGMENTS; i++) {
 
-    if (get_priv_segment(remote_num)->transaction_state == TS_NONE)
-        return false;    /* no need to check */
+        if (i == STM_SEGMENT->segment_num)
+            continue;
 
-    if (is_aborting_now(remote_num))
-        return false;    /* no need to check: is pending immediate abort */
+        if (get_priv_segment(i)->transaction_state == TS_NONE)
+            continue;    /* no need to check */
 
-    LIST_FOREACH_R(
-        STM_PSEGMENT->modified_old_objects,
-        object_t * /*item*/,
-        ({
-            if (was_read_remote(remote_base, item, remote_version)) {
-                /* A write-read conflict! */
-                write_read_contention_management(remote_num);
+        if (is_aborting_now(i))
+            continue;    /* no need to check: is pending immediate abort */
 
-                /* If we reach this point, we didn't abort, but maybe we
-                   had to wait for the other thread to commit.  If we
-                   did, then we have to restart committing from our call
-                   to synchronize_all_threads(). */
-                return true;
-            }
-        }));
+        char *remote_base = get_segment_base(i);
+        uint8_t remote_version = get_segment(i)->transaction_read_version;
+
+        LIST_FOREACH_R(
+            STM_PSEGMENT->modified_old_objects,
+            object_t * /*item*/,
+            ({
+                if (was_read_remote(remote_base, item, remote_version)) {
+                    /* A write-read conflict! */
+                    write_read_contention_management(i);
+
+                    /* If we reach this point, we didn't abort, but maybe we
+                       had to wait for the other thread to commit.  If we
+                       did, then we have to restart committing from our call
+                       to synchronize_all_threads(). */
+                    return true;
+                }
+            }));
+    }
 
     return false;
 }
 
 static void synchronize_overflow_object_now(object_t *obj)
 {
+    abort();//XXX
+#if 0
     assert(!_is_young(obj));
     assert((obj->stm_flags & GCFLAG_SMALL_UNIFORM) == 0);
     assert(obj->stm_flags & GCFLAG_WRITE_BARRIER);
@@ -264,6 +276,7 @@
             long i;
             char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start);
             for (i = 0; i < NB_SEGMENTS; i++) {
+                abort();//XXX
                 if (i != STM_SEGMENT->segment_num) {
                     char *dst = REAL_ADDRESS(get_segment_base(i), start);
                     memcpy(dst, src, copy_size);
@@ -273,6 +286,68 @@
 
         start = (start + 4096) & ~4095;
     } while (first_page++ < last_page);
+#endif
+}
+
+static void synchronize_object_now(object_t *obj)
+{
+    /* Assume that the version of 'obj' in the shared pages is up-to-date.
+       Assume also that the version in our own private page is up-to-date.
+       This function updates the private page of other threads.
+    */
+    assert(!_is_young(obj));
+    assert(obj->stm_flags & GCFLAG_WRITE_BARRIER);
+
+    uintptr_t start = (uintptr_t)obj;
+    uintptr_t first_page = start / 4096UL;
+    long i;
+
+    if (obj->stm_flags & GCFLAG_SMALL_UNIFORM) {
+        abort();//XXX WRITE THE FAST CASE
+    }
+    else {
+        char *realobj = REAL_ADDRESS(stm_object_pages, obj);
+        ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)realobj);
+        assert(obj_size >= 16);
+        uintptr_t end = start + obj_size;
+        uintptr_t last_page = (end - 1) / 4096UL;
+
+        for (; first_page <= last_page; first_page++) {
+
+            for (i = 1; i < NB_SEGMENTS; i++) {
+
+                if (i == STM_SEGMENT->segment_num)
+                    continue;
+
+                if (!is_private_page(i, first_page))
+                    continue;
+
+                /* The page is a PRIVATE_PAGE.  We need to diffuse this
+                   fragment of object from the shared page to this private
+                   page. */
+
+                uintptr_t copy_size;
+                if (first_page == last_page) {
+                    /* this is the final fragment */
+                    copy_size = end - start;
+                }
+                else {
+                    /* this is a non-final fragment, going up to the
+                       page's end */
+                    copy_size = 4096 - (start & 4095);
+                }
+
+                /* double-check that the result fits in one page */
+                assert(copy_size > 0);
+                assert(copy_size + (start & 4095) <= 4096);
+
+                char *src = REAL_ADDRESS(stm_object_pages, start);
+                char *dst = REAL_ADDRESS(get_segment_base(i), start);
+                memcpy(dst, src, copy_size);
+            }
+            start = (start + 4096) & ~4095;
+        }
+    }
 }
 
 static void push_overflow_objects_from_privatized_pages(void)
@@ -286,22 +361,12 @@
 
 static void push_modified_to_other_segments(void)
 {
-    long remote_num = 1 - STM_SEGMENT->segment_num;
     char *local_base = STM_SEGMENT->segment_base;
-    char *remote_base = get_segment_base(remote_num);
-    bool remote_active =
-        (get_priv_segment(remote_num)->transaction_state != TS_NONE &&
-         get_segment(remote_num)->nursery_end != NSE_SIGABORT);
 
     LIST_FOREACH_R(
         STM_PSEGMENT->modified_old_objects,
         object_t * /*item*/,
         ({
-            if (remote_active) {
-                assert(!was_read_remote(remote_base, item,
-                    get_segment(remote_num)->transaction_read_version));
-            }
-
             /* clear the write-lock (note that this runs with all other
                threads paused, so no need to be careful about ordering) */
             uintptr_t lock_idx = (((uintptr_t)item) >> 4) - WRITELOCK_START;
@@ -313,11 +378,14 @@
                minor_collection() */
             assert((item->stm_flags & GCFLAG_WRITE_BARRIER) != 0);
 
-            /* copy the modified object to the other segment */
+            /* copy the modified object to the shared copy */
             char *src = REAL_ADDRESS(local_base, item);
-            char *dst = REAL_ADDRESS(remote_base, item);
+            char *dst = REAL_ADDRESS(stm_object_pages, item);
             ssize_t size = stmcb_size_rounded_up((struct object_s *)src);
             memcpy(dst, src, size);
+
+            /* copy the object to the other private pages as needed */
+            synchronize_object_now(item);
         }));
 
     list_clear(STM_PSEGMENT->modified_old_objects);
diff --git a/c7/stm/core.h b/c7/stm/core.h
--- a/c7/stm/core.h
+++ b/c7/stm/core.h
@@ -20,7 +20,7 @@
 #define MAP_PAGES_FLAGS     (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE)
 #define NB_NURSERY_PAGES    (STM_GC_NURSERY/4)
 
-#define TOTAL_MEMORY          (NB_PAGES * 4096UL * NB_SEGMENTS)
+#define TOTAL_MEMORY          (NB_PAGES * 4096UL * (1 + NB_SEGMENTS))
 #define READMARKER_END        ((NB_PAGES * 4096UL) >> 4)
 #define FIRST_OBJECT_PAGE     ((READMARKER_END + 4095) / 4096UL)
 #define FIRST_NURSERY_PAGE    FIRST_OBJECT_PAGE
@@ -75,6 +75,13 @@
 struct stm_priv_segment_info_s {
     struct stm_segment_info_s pub;
 
+    /* Dict whose keys are shared page numbers, and whose values are
+       the corresponding private page number. */
+    struct tree_s *private_page_mapping;
+
+    /* Head of a free list of private pages. */
+    uintptr_t private_free_page_num;
+
     /* List of old objects (older than the current transaction) that the
        current transaction attempts to modify.  This is used to track
        the STM status: they are old objects that where written to and
@@ -178,10 +185,6 @@
 static char *stm_object_pages;
 static stm_thread_local_t *stm_all_thread_locals = NULL;
 
-#ifdef STM_TESTS
-static char *stm_other_pages;
-#endif
-
 static uint8_t write_locks[WRITELOCK_END - WRITELOCK_START];
 
 
diff --git a/c7/stm/misc.c b/c7/stm/misc.c
--- a/c7/stm/misc.c
+++ b/c7/stm/misc.c
@@ -43,7 +43,7 @@
 #ifdef STM_TESTS
 uint8_t _stm_get_page_flag(uintptr_t index)
 {
-    return flag_page_private[index];
+    abort();//XXX
 }
 
 long _stm_count_modified_old_objects(void)
diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c
--- a/c7/stm/nursery.c
+++ b/c7/stm/nursery.c
@@ -26,7 +26,7 @@
     _stm_nursery_start = NURSERY_START;
 
     long i;
-    for (i = 0; i < NB_SEGMENTS; i++) {
+    for (i = 1; i <= NB_SEGMENTS; i++) {
         get_segment(i)->nursery_current = (stm_char *)NURSERY_START;
         get_segment(i)->nursery_end = NURSERY_END;
     }
@@ -378,7 +378,7 @@
     _stm_nursery_start = NURSERY_END - free_count;
 
     long i;
-    for (i = 0; i < NB_SEGMENTS; i++) {
+    for (i = 1; i <= NB_SEGMENTS; i++) {
         if ((uintptr_t)get_segment(i)->nursery_current < _stm_nursery_start)
             get_segment(i)->nursery_current = (stm_char *)_stm_nursery_start;
     }
@@ -411,7 +411,7 @@
     int original_num = STM_SEGMENT->segment_num;
     long i;
 
-    for (i = 0; i < NB_SEGMENTS; i++) {
+    for (i = 1; i <= NB_SEGMENTS; i++) {
         struct stm_priv_segment_info_s *pseg = get_priv_segment(i);
         if (MINOR_NOTHING_TO_DO(pseg))  /*TS_NONE segments have NOTHING_TO_DO*/
             continue;
diff --git a/c7/stm/pages.c b/c7/stm/pages.c
--- a/c7/stm/pages.c
+++ b/c7/stm/pages.c
@@ -102,85 +102,44 @@
        segment 0. */
     uintptr_t i;
     assert(_has_mutex_pages());
-    for (i = 1; i < NB_SEGMENTS; i++) {
+    for (i = 1; i <= NB_SEGMENTS; i++) {
         char *segment_base = get_segment_base(i);
         d_remap_file_pages(segment_base + pagenum * 4096UL,
                            count * 4096UL, pagenum);
     }
-    for (i = 0; i < count; i++)
-        flag_page_private[pagenum + i] = SHARED_PAGE;
 }
 
-#if 0
-static void pages_make_shared_again(uintptr_t pagenum, uintptr_t count)
+static void page_privatize(uintptr_t pagenum)
 {
-    /* Same as pages_initialize_shared(), but tries hard to minimize the
-       total number of pages that remap_file_pages() must handle, by
-       fragmenting calls as much as possible (the overhead of one system
-       call appears smaller as the overhead per page). */
-    uintptr_t start, i = 0;
-    while (i < count) {
-        if (flag_page_private[pagenum + (i++)] == SHARED_PAGE)
-            continue;
-        start = i;    /* first index of a private page */
-        while (1) {
-            i++;
-            if (i == count || flag_page_private[pagenum + i] == SHARED_PAGE)
-                break;
-        }
-        pages_initialize_shared(pagenum + start, i - start);
+    wlog_t *item;
+    TREE_FIND(*STM_PSEGMENT->private_page_mapping, pagenum, item,
+              goto not_found);
+
+    /* the page is already privatized */
+    return;
+
+ not_found:;
+    /* look up the next free page */
+    uintptr_t free_page_num = STM_PSEGMENT->private_free_page_num;
+
+    /* "mount" it in the segment */
+    char *new_page = STM_SEGMENT->segment_base + pagenum * 4096UL;
+    d_remap_file_pages(new_page, 4096,
+                       NB_PAGES * STM_SEGMENT->segment_num + free_page_num);
+    increment_total_allocated(4096);
+
+    /* update private_free_page_num */
+    uintptr_t future_page = *(uintptr_t *)new_page;
+    if (future_page == 0) {
+        future_page = free_page_num + 1;
     }
-}
-#endif
+    STM_PSEGMENT->private_free_page_num = future_page;
 
-static void privatize_range(uintptr_t pagenum, uintptr_t count)
-{
-    ssize_t pgoff1 = pagenum;
-    ssize_t pgoff2 = pagenum + NB_PAGES;
-    ssize_t localpgoff = pgoff1 + NB_PAGES * STM_SEGMENT->segment_num;
-    ssize_t otherpgoff = pgoff1 + NB_PAGES * (1 - STM_SEGMENT->segment_num);
+    /* copy the content from the shared (segment 0) source */
+    pagecopy(new_page, stm_object_pages + pagenum * 4096UL);
 
-    void *localpg = stm_object_pages + localpgoff * 4096UL;
-    void *otherpg = stm_object_pages + otherpgoff * 4096UL;
-
-    memset(flag_page_private + pagenum, REMAPPING_PAGE, count);
-    d_remap_file_pages(localpg, count * 4096, pgoff2);
-    uintptr_t i;
-    for (i = 0; i < count; i++) {
-        pagecopy(localpg + 4096 * i, otherpg + 4096 * i);
-    }
-    write_fence();
-    memset(flag_page_private + pagenum, PRIVATE_PAGE, count);
-    increment_total_allocated(4096 * count);
-}
-
-static void _pages_privatize(uintptr_t pagenum, uintptr_t count)
-{
-    mutex_pages_lock();
-
-    uintptr_t page_start_range = pagenum;
-    uintptr_t pagestop = pagenum + count;
-
-    for (; pagenum < pagestop; pagenum++) {
-        uint8_t prev = flag_page_private[pagenum];
-        if (prev == PRIVATE_PAGE) {
-            if (pagenum > page_start_range) {
-                privatize_range(page_start_range,
-                                pagenum - page_start_range);
-            }
-            page_start_range = pagenum + 1;
-        }
-        else {
-            assert(prev == SHARED_PAGE);
-        }
-    }
-
-    if (pagenum > page_start_range) {
-        privatize_range(page_start_range,
-                        pagenum - page_start_range);
-    }
-
-    mutex_pages_unlock();
+    /* update private_page_mapping */
+    tree_insert(STM_PSEGMENT->private_page_mapping, pagenum, free_page_num);
 }
 
 #if 0
diff --git a/c7/stm/pages.h b/c7/stm/pages.h
--- a/c7/stm/pages.h
+++ b/c7/stm/pages.h
@@ -4,8 +4,8 @@
 
    The shared copy of a page is stored in the mmap at the file offset
    corresponding to the segment 0 offset (with all other segments
-   remapping to the segment 0 offset).  Private copies are made in the
-   offset from segment 1 (and if full, more segments afterwards),
+   remapping to the segment 0 offset).  Private copies for segment N are
+   made in the offset from segment N (for 1 <= N <= NB_SEGMENTS),
    picking file offsets that are simply the next free ones.  Each
    segment maintains a tree 'private_page_mapping', which maps shared
    pages to private copies.
@@ -15,14 +15,14 @@
    The pages thus freed are recorded into a free list, and can be reused
    as the private copies of the following (unrelated) pages.
 
-   Note that this page manipulation logic is independent from actually
-   tracking which objects are uncommitted, which occurs at the level of
-   segment-relative offsets; and propagating changes during commit,
-   which is done by copying objects (not pages) to the same offset
-   relative to a different segment.
+   Note that this page manipulation logic uses remap_file_pages() to
+   fully hide its execution cost behind the CPU's memory management unit.
+   It should not be confused with the logic of tracking which objects
+   are old-and-committed, old-but-modified, overflow objects, and so on
+   (which works at the object granularity, not the page granularity).
 */
 
-static void _pages_privatize(uintptr_t pagenum, uintptr_t count);
+static void page_privatize(uintptr_t pagenum);
 static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count);
 
 static void mutex_pages_lock(void);
@@ -32,14 +32,8 @@
 static void force_major_collection_request(void);
 static void reset_major_collection_requested(void);
 
-inline static void pages_privatize(uintptr_t pagenum, uintptr_t count) {
-    /* This is written a bit carefully so that a call with a constant
-       count == 1 will turn this loop into just one "if". */
-    while (flag_page_private[pagenum] == PRIVATE_PAGE) {
-        if (!--count) {
-            return;
-        }
-        pagenum++;
-    }
-    _pages_privatize(pagenum, count);
+static inline bool is_private_page(long segnum, uintptr_t pagenum)
+{
+    return tree_contains(get_priv_segment(segnum)->private_page_mapping,
+                         pagenum);
 }
diff --git a/c7/stm/setup.c b/c7/stm/setup.c
--- a/c7/stm/setup.c
+++ b/c7/stm/setup.c
@@ -26,12 +26,15 @@
     if (stm_object_pages == MAP_FAILED)
         stm_fatalerror("initial stm_object_pages mmap() failed: %m\n");
 
+    /* The segment 0 is not used to run transactions, but to contain the
+       shared copy of the pages.  We mprotect all pages before so that
+       accesses fail, up to and including the pages corresponding to the
+       nurseries of the other segments. */
+    mprotect(stm_object_pages, END_NURSERY_PAGE * 4096UL, PROT_NONE);
+
     long i;
-    for (i = 0; i < NB_SEGMENTS; i++) {
+    for (i = 1; i <= NB_SEGMENTS; i++) {
         char *segment_base = get_segment_base(i);
-#ifdef STM_TESTS
-        stm_other_pages = segment_base;
-#endif
 
         /* In each segment, the first page is where TLPREFIX'ed
            NULL accesses land.  We mprotect it so that accesses fail. */
@@ -39,7 +42,7 @@
 
         /* Fill the TLS page (page 1) with 0xDC, for debugging */
         memset(REAL_ADDRESS(segment_base, 4096), 0xDC, 4096);
-        /* Make a "hole" at STM_PSEGMENT */
+        /* Make a "hole" at STM_PSEGMENT (which includes STM_SEGMENT) */
         memset(REAL_ADDRESS(segment_base, STM_PSEGMENT), 0,
                sizeof(*STM_PSEGMENT));
 
@@ -49,11 +52,14 @@
                      (FIRST_READMARKER_PAGE - 2) * 4096UL,
                      PROT_NONE);
 
+        /* Initialize STM_PSEGMENT */
         struct stm_priv_segment_info_s *pr = get_priv_segment(i);
-        assert(i + 1 < 255);   /* 255 is WL_VISITED in gcpage.c */
-        pr->write_lock_num = i + 1;
+        assert(1 <= i && i < 255);   /* 255 is WL_VISITED in gcpage.c */
+        pr->write_lock_num = i;
         pr->pub.segment_num = i;
         pr->pub.segment_base = segment_base;
+        pr->private_page_mapping = tree_create();
+        pr->private_free_page_num = END_NURSERY_PAGE;
         pr->objects_pointing_to_nursery = NULL;
         pr->large_overflow_objects = NULL;
         pr->modified_old_objects = list_create();
@@ -62,7 +68,7 @@
         pr->young_outside_nursery = tree_create();
         pr->nursery_objects_shadows = tree_create();
         pr->callbacks_on_abort = tree_create();
-        pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * (i + 1);
+        pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * i;
         highest_overflow_number = pr->overflow_number;
     }
 
@@ -73,10 +79,6 @@
        STM_SEGMENT->transaction_read_version never contains zero,
        so a null read marker means "not read" whatever the
        current transaction_read_version is.
-
-       The creation markers are initially zero, which is correct:
-       it means "objects of this line of 256 bytes have not been
-       allocated by the current transaction."
     */
 
     setup_sync();
@@ -92,7 +94,7 @@
     assert(!_has_mutex());
 
     long i;
-    for (i = 0; i < NB_SEGMENTS; i++) {
+    for (i = 1; i <= NB_SEGMENTS; i++) {
         struct stm_priv_segment_info_s *pr = get_priv_segment(i);
         assert(pr->objects_pointing_to_nursery == NULL);
         assert(pr->large_overflow_objects == NULL);
@@ -107,8 +109,6 @@
     munmap(stm_object_pages, TOTAL_MEMORY);
     stm_object_pages = NULL;
 
-    memset(flag_page_private, 0, sizeof(flag_page_private));
-
     teardown_core();
     teardown_sync();
     teardown_gcpage();
@@ -146,14 +146,14 @@
         tl->prev = stm_all_thread_locals->prev;
         stm_all_thread_locals->prev->next = tl;
         stm_all_thread_locals->prev = tl;
-        num = tl->prev->associated_segment_num + 1;
+        num = tl->prev->associated_segment_num;
     }
     tl->thread_local_obj = NULL;
 
     /* assign numbers consecutively, but that's for tests; we could also
        assign the same number to all of them and they would get their own
        numbers automatically. */
-    num = num % NB_SEGMENTS;
+    num = (num % NB_SEGMENTS) + 1;
     tl->associated_segment_num = num;
     _init_shadow_stack(tl);
     set_gs_register(get_segment_base(num));
diff --git a/c7/stm/sync.c b/c7/stm/sync.c
--- a/c7/stm/sync.c
+++ b/c7/stm/sync.c
@@ -30,7 +30,7 @@
         pthread_mutex_t global_mutex;
         pthread_cond_t cond[_C_TOTAL];
         /* some additional pieces of global state follow */
-        uint8_t in_use[NB_SEGMENTS];   /* 1 if running a pthread */
+        uint8_t in_use1[NB_SEGMENTS];   /* 1 if running a pthread */
         uint64_t global_time;
     };
     char reserved[192];
@@ -60,7 +60,7 @@
             stm_fatalerror("cond destroy: %m\n");
     }
 
-    memset(&sync_ctl, 0, sizeof(sync_ctl.in_use));
+    memset(&sync_ctl, 0, sizeof(sync_ctl));
 }
 
 #ifndef NDEBUG
@@ -124,12 +124,12 @@
 {
     long i;
  restart:
-    for (i = 0; i < NB_SEGMENTS; i++) {
+    for (i = 1; i <= NB_SEGMENTS; i++) {
         if (get_priv_segment(i)->transaction_state == TS_INEVITABLE) {
             if (can_abort) {
                 /* handle this case like a contention: it will either
                    abort us (not the other thread, which is inevitable),
-                   or for a while.  If we go past this call, then we
+                   or wait for a while.  If we go past this call, then we
                    waited; in this case we have to re-check if no other
                    thread is inevitable. */
                 inevitable_contention_management(i);
@@ -152,7 +152,7 @@
     assert(_is_tl_registered(tl));
 
     int num = tl->associated_segment_num;
-    if (sync_ctl.in_use[num] == 0) {
+    if (sync_ctl.in_use1[num - 1] == 0) {
         /* fast-path: we can get the same segment number than the one
            we had before.  The value stored in GS is still valid. */
 #ifdef STM_TESTS
@@ -165,10 +165,10 @@
     }
     /* Look for the next free segment.  If there is none, wait for
        the condition variable. */
-    int i;
-    for (i = 0; i < NB_SEGMENTS; i++) {
-        num = (num + 1) % NB_SEGMENTS;
-        if (sync_ctl.in_use[num] == 0) {
+    int retries;
+    for (retries = 0; retries < NB_SEGMENTS; retries++) {
+        num = (num % NB_SEGMENTS) + 1;
+        if (sync_ctl.in_use1[num - 1] == 0) {
             /* we're getting 'num', a different number. */
             dprintf(("acquired different segment: %d->%d\n", tl->associated_segment_num, num));
             tl->associated_segment_num = num;
@@ -184,7 +184,7 @@
     return false;
 
  got_num:
-    sync_ctl.in_use[num] = 1;
+    sync_ctl.in_use1[num - 1] = 1;
     assert(STM_SEGMENT->segment_num == num);
     assert(STM_SEGMENT->running_thread == NULL);
     STM_SEGMENT->running_thread = tl;
@@ -208,8 +208,8 @@
     assert(STM_SEGMENT->running_thread == tl);
     STM_SEGMENT->running_thread = NULL;
 
-    assert(sync_ctl.in_use[tl->associated_segment_num] == 1);
-    sync_ctl.in_use[tl->associated_segment_num] = 0;
+    assert(sync_ctl.in_use1[tl->associated_segment_num - 1] == 1);
+    sync_ctl.in_use1[tl->associated_segment_num - 1] = 0;
 }
 
 __attribute__((unused))
@@ -221,7 +221,7 @@
 bool _stm_in_transaction(stm_thread_local_t *tl)
 {
     int num = tl->associated_segment_num;
-    assert(num < NB_SEGMENTS);
+    assert(1 <= num && num <= NB_SEGMENTS);
     return get_segment(num)->running_thread == tl;
 }
 
@@ -262,7 +262,7 @@
     assert((_safe_points_requested = true, 1));
 
     long i;
-    for (i = 0; i < NB_SEGMENTS; i++) {
+    for (i = 1; i <= NB_SEGMENTS; i++) {
         if (get_segment(i)->nursery_end == NURSERY_END)
             get_segment(i)->nursery_end = NSE_SIGPAUSE;
     }
@@ -276,7 +276,7 @@
     long result = 0;
     int my_num = STM_SEGMENT->segment_num;
 
-    for (i = 0; i < NB_SEGMENTS; i++) {
+    for (i = 1; i <= NB_SEGMENTS; i++) {
         if (i != my_num && get_priv_segment(i)->safe_point == SP_RUNNING) {
             assert(get_segment(i)->nursery_end <= _STM_NSE_SIGNAL_MAX);
             result++;
@@ -291,7 +291,7 @@
     assert((_safe_points_requested = false, 1));
 
     long i;
-    for (i = 0; i < NB_SEGMENTS; i++) {
+    for (i = 1; i <= NB_SEGMENTS; i++) {
         assert(get_segment(i)->nursery_end != NURSERY_END);
         if (get_segment(i)->nursery_end == NSE_SIGPAUSE)
             get_segment(i)->nursery_end = NURSERY_END;
diff --git a/c7/stm/weakref.c b/c7/stm/weakref.c
--- a/c7/stm/weakref.c
+++ b/c7/stm/weakref.c
@@ -29,6 +29,8 @@
 
 static void _set_weakref_in_all_segments(object_t *weakref, object_t *value)
 {
+    abort();//XXX
+#if 0
     ssize_t size = 16;
 
     stm_char *point_to_loc = (stm_char*)WEAKREF_PTR(weakref, size);
@@ -44,6 +46,7 @@
     else {
         *WEAKREF_PTR(weakref, size) = value;
     }
+#endif
 }
 
 /***** Minor collection *****/


More information about the pypy-commit mailing list