[pypy-commit] stmgc c8-small-uniform: merge new page handling (WIP)

Raemi noreply at buildbot.pypy.org
Wed Nov 12 11:29:47 CET 2014


Author: Remi Meier <remi.meier at inf.ethz.ch>
Branch: c8-small-uniform
Changeset: r1505:f128b7906776
Date: 2014-11-12 10:31 +0100
http://bitbucket.org/pypy/stmgc/changeset/f128b7906776/

Log:	merge new page handling (WIP)

diff too long, truncating to 2000 out of 2471 lines

diff --git a/c8/PAGES b/c8/PAGES
new file mode 100644
--- /dev/null
+++ b/c8/PAGES
@@ -0,0 +1,52 @@
+Handling of pages in stmgc
+--------------------------
+
+(Proposal)
+
+Each segment corresponds to one range of virtual addresses, of NB_PAGES.
+
+There is additionally one file descriptor corresponding to a
+memory-mapped file of length "15/16 * NB_PAGES".  In each segment, the
+final 15/16th of it is normally remapped with MAP_SHARED to correspond
+to this file.
+
+For each page of the file and each segment, we store two status bits
+that give one of three possible states: the NO_ACCESS state means that
+the corresponding page was mprotected to PROT_NONE; the SHARED state
+means the page is mapped to the file; the PRIVATE state means the page
+was remapped to an anonymous MAP_PRIVATE page.
+
+When a segment allocates new objects (out of its nursery), they go into
+pages that are initially set to SHARED in this segment, and NO_ACCESS in
+all other segments.
+
+When we try to read the same object from another segment, we get a
+segfault.  In the segment that receives the segfault, the page is
+NO_ACCESS.  At that point, two cases: either we can use the same data,
+or we can't.  We can if the data in the shared page is the unmodified
+data from the current revision (or from a later revision, in which case
+we just update to this later revision).  We can't if the shared data
+comes from a past revision or if it contains currently-modified data.
+
+If we can: we mprotect our segment's page back to SHARED.
+
+If we can't: we remap the page to PRIVATE.
+
+Finally, here is the write barrier logic.  When we're about to write to
+a page in our segment: first, we make sure it's not a NO_ACCESS page (by
+forcing a segfault to occur, I suppose).  Then, if it's a PRIVATE page,
+nothing to do; but if it's a SHARED page, we first check the other
+segments.  If none of them has also the page in the SHARED status (all
+have NO_ACCESS or PRIVATE), then we don't do anything either.  Only if
+there is a different segment that also has the page SHARED do we need
+more care.  There are two solutions in this case:
+
+1. We can change our page to PRIVATE.
+
+2. We can change the other segment's pages to NO_ACCESS.
+
+Which solution to choose in which case is unclear so far.
+
+The end result is that we end with as few PRIVATE pages as reasonably
+possible: it occurs only if one segment has currently changes in the
+page *and* a different segment is currently viewing the same data.
diff --git a/c8/stm/core.c b/c8/stm/core.c
--- a/c8/stm/core.c
+++ b/c8/stm/core.c
@@ -2,8 +2,6 @@
 # error "must be compiled via stmgc.c"
 #endif
 
-#include <signal.h>
-
 
 #ifdef NDEBUG
 #define EVENTUALLY(condition)    {/* nothing */}
@@ -19,231 +17,440 @@
     }
 #endif
 
+/* General helper: copies objects into our own segment, from some
+   source described by a range of 'struct stm_undo_s'.  Maybe later
+   we could specialize this function to avoid the checks in the
+   inner loop.
+*/
+static void import_objects(
+        int from_segnum,            /* or -1: from undo->backup,
+                                       or -2: from undo->backup if not modified */
+        uintptr_t pagenum,          /* or -1: "all accessible" */
+        struct stm_undo_s *undo,
+        struct stm_undo_s *end)
+{
+    char *src_segment_base = (from_segnum >= 0 ? get_segment_base(from_segnum)
+                                               : NULL);
+
+    assert(IMPLY(from_segnum >= 0, get_priv_segment(from_segnum)->modification_lock));
+    assert(STM_PSEGMENT->modification_lock);
+
+    for (; undo < end; undo++) {
+        object_t *obj = undo->object;
+        stm_char *oslice = ((stm_char *)obj) + SLICE_OFFSET(undo->slice);
+        uintptr_t current_page_num = ((uintptr_t)oslice) / 4096;
+
+        if (pagenum == -1) {
+            if (get_page_status_in(STM_SEGMENT->segment_num,
+                                   current_page_num) == PAGE_NO_ACCESS)
+                continue;
+        }
+        else {
+            if (current_page_num != pagenum)
+                continue;
+        }
+
+        if (from_segnum == -2 && _stm_was_read(obj) && (obj->stm_flags & GCFLAG_WB_EXECUTED)) {
+            /* called from stm_validate():
+                > if not was_read(), we certainly didn't modify
+                > if not WB_EXECUTED, we may have read from the obj in a different page but
+                  did not modify it (should not occur right now, but future proof!)
+               only the WB_EXECUTED alone is not enough, since we may have imported from a
+               segment's private page (which had the flag set) */
+            assert(IMPLY(_stm_was_read(obj), (obj->stm_flags & GCFLAG_WB_EXECUTED))); /* for now */
+            continue;           /* only copy unmodified */
+        }
+
+        dprintf(("import slice seg=%d obj=%p off=%lu sz=%d pg=%lu\n",
+                 from_segnum, obj, SLICE_OFFSET(undo->slice),
+                 SLICE_SIZE(undo->slice), current_page_num));
+        char *src, *dst;
+        if (src_segment_base != NULL)
+            src = REAL_ADDRESS(src_segment_base, oslice);
+        else
+            src = undo->backup + SLICE_OFFSET(undo->slice);
+        dst = REAL_ADDRESS(STM_SEGMENT->segment_base, oslice);
+        memcpy(dst, src, SLICE_SIZE(undo->slice));
+
+        if (src_segment_base == NULL) {
+            /* backups never should have WB_EXECUTED */
+            assert(!(obj->stm_flags & GCFLAG_WB_EXECUTED));
+        }
+    }
+}
+
+
+/* ############# signal handler ############# */
+
+static void copy_bk_objs_in_page_from(int from_segnum, uintptr_t pagenum,
+                                      bool only_if_not_modified)
+{
+    /* looks at all bk copies of objects overlapping page 'pagenum' and
+       copies the part in 'pagenum' back to the current segment */
+    dprintf(("copy_bk_objs_in_page_from(%d, %ld, %d)\n",
+             from_segnum, (long)pagenum, only_if_not_modified));
+
+    struct list_s *list = get_priv_segment(from_segnum)->modified_old_objects;
+    struct stm_undo_s *undo = (struct stm_undo_s *)list->items;
+    struct stm_undo_s *end = (struct stm_undo_s *)(list->items + list->count);
+
+    import_objects(only_if_not_modified ? -2 : -1,
+                   pagenum, undo, end);
+}
+
+static void go_to_the_past(uintptr_t pagenum,
+                           struct stm_commit_log_entry_s *from,
+                           struct stm_commit_log_entry_s *to)
+{
+    assert(STM_PSEGMENT->modification_lock);
+    assert(from->rev_num >= to->rev_num);
+    /* walk BACKWARDS the commit log and update the page 'pagenum',
+       initially at revision 'from', until we reach the revision 'to'. */
+
+    /* XXXXXXX Recursive algo for now, fix this! */
+    if (from != to) {
+        struct stm_commit_log_entry_s *cl = to->next;
+        go_to_the_past(pagenum, from, cl);
+
+        struct stm_undo_s *undo = cl->written;
+        struct stm_undo_s *end = cl->written + cl->written_count;
+
+        import_objects(-1, pagenum, undo, end);
+    }
+}
+
+
+
+static void handle_segfault_in_page(uintptr_t pagenum)
+{
+    /* assumes page 'pagenum' is ACCESS_NONE, privatizes it,
+       and validates to newest revision */
+
+    dprintf(("handle_segfault_in_page(%lu), seg %d\n", pagenum, STM_SEGMENT->segment_num));
+
+    /* XXX: bad, but no deadlocks: */
+    acquire_all_privatization_locks();
+
+    long i;
+    int my_segnum = STM_SEGMENT->segment_num;
+
+    assert(get_page_status_in(my_segnum, pagenum) == PAGE_NO_ACCESS);
+
+    /* find who has the most recent revision of our page */
+    int shared_page_holder = -1;
+    int shared_ref_count = 0;
+    int copy_from_segnum = -1;
+    uint64_t most_recent_rev = 0;
+    for (i = 0; i < NB_SEGMENTS; i++) {
+        if (i == my_segnum)
+            continue;
+
+        if (get_page_status_in(i, pagenum) == PAGE_SHARED) {
+            /* mostly for debugging now: */
+            shared_page_holder = i;
+            shared_ref_count++;
+        }
+
+        struct stm_commit_log_entry_s *log_entry;
+        log_entry = get_priv_segment(i)->last_commit_log_entry;
+        if (get_page_status_in(i, pagenum) != PAGE_NO_ACCESS
+            && (copy_from_segnum == -1 || log_entry->rev_num > most_recent_rev)) {
+            copy_from_segnum = i;
+            most_recent_rev = log_entry->rev_num;
+        }
+    }
+    OPT_ASSERT(shared_page_holder != -1);
+    OPT_ASSERT(copy_from_segnum != -1 && copy_from_segnum != my_segnum);
+
+    /* XXX: for now, we don't try to get the single shared page. We simply
+       regard it as private for its holder. */
+    /* this assert should be true for now... */
+    assert(shared_ref_count == 1);
+
+    /* make our page private */
+    page_privatize_in(my_segnum, pagenum);
+    assert(get_page_status_in(my_segnum, pagenum) == PAGE_PRIVATE);
+
+    /* before copying anything, acquire modification locks from our and
+       the other segment */
+    uint64_t to_lock = (1UL << copy_from_segnum)| (1UL << my_segnum);
+    acquire_modification_lock_set(to_lock);
+    pagecopy((char*)(get_virt_page_of(my_segnum, pagenum) * 4096UL),
+             (char*)(get_virt_page_of(copy_from_segnum, pagenum) * 4096UL));
+
+    /* if there were modifications in the page, revert them. */
+    copy_bk_objs_in_page_from(copy_from_segnum, pagenum, false);
+
+    /* we need to go from 'src_version' to 'target_version'.  This
+       might need a walk into the past. */
+    struct stm_commit_log_entry_s *src_version, *target_version;
+    src_version = get_priv_segment(copy_from_segnum)->last_commit_log_entry;
+    target_version = STM_PSEGMENT->last_commit_log_entry;
+
+
+    dprintf(("handle_segfault_in_page: rev %lu to rev %lu\n",
+             src_version->rev_num, target_version->rev_num));
+    /* adapt revision of page to our revision:
+       if our rev is higher than the page we copy from, everything
+       is fine as we never read/modified the page anyway
+     */
+    if (src_version->rev_num > target_version->rev_num)
+        go_to_the_past(pagenum, src_version, target_version);
+
+    release_modification_lock_set(to_lock);
+    release_all_privatization_locks();
+}
+
+static void _signal_handler(int sig, siginfo_t *siginfo, void *context)
+{
+    int saved_errno = errno;
+    char *addr = siginfo->si_addr;
+    dprintf(("si_addr: %p\n", addr));
+    if (addr == NULL || addr < stm_object_pages ||
+        addr >= stm_object_pages+TOTAL_MEMORY) {
+        /* actual segfault, unrelated to stmgc */
+        fprintf(stderr, "Segmentation fault: accessing %p\n", addr);
+        abort();
+    }
+
+    int segnum = get_segment_of_linear_address(addr);
+    if (segnum != STM_SEGMENT->segment_num) {
+        fprintf(stderr, "Segmentation fault: accessing %p (seg %d) from"
+                        " seg %d\n", addr, STM_SEGMENT->segment_num, segnum);
+        abort();
+    }
+    dprintf(("-> segment: %d\n", segnum));
+
+    char *seg_base = STM_SEGMENT->segment_base;
+    uintptr_t pagenum = ((char*)addr - seg_base) / 4096UL;
+    if (pagenum < END_NURSERY_PAGE) {
+        fprintf(stderr, "Segmentation fault: accessing %p (seg %d "
+                        "page %lu)\n", addr, segnum, pagenum);
+        abort();
+    }
+
+    handle_segfault_in_page(pagenum);
+
+    errno = saved_errno;
+    /* now return and retry */
+}
+
 /* ############# commit log ############# */
 
 
 void _dbg_print_commit_log()
 {
-    volatile struct stm_commit_log_entry_s *cl;
-    cl = (volatile struct stm_commit_log_entry_s *)&commit_log_root;
+    struct stm_commit_log_entry_s *cl = &commit_log_root;
 
-    fprintf(stderr, "root (%p, %d)\n", cl->next, cl->segment_num);
+    fprintf(stderr, "commit log:\n");
     while ((cl = cl->next)) {
-        if ((uintptr_t)cl == -1) {
-            fprintf(stderr, "INEVITABLE\n");
+        if (cl == INEV_RUNNING) {
+            fprintf(stderr, "  INEVITABLE\n");
             return;
         }
-        size_t i = 0;
-        fprintf(stderr, "  elem (%p, %d)\n", cl->next, cl->segment_num);
-        object_t *obj;
-        while ((obj = cl->written[i])) {
-            fprintf(stderr, "-> %p\n", obj);
-            i++;
-        };
+        fprintf(stderr, "  entry at %p: seg %d, rev %lu\n", cl, cl->segment_num, cl->rev_num);
+        struct stm_undo_s *undo = cl->written;
+        struct stm_undo_s *end = undo + cl->written_count;
+        for (; undo < end; undo++) {
+            fprintf(stderr, "    obj %p, size %d, ofs %lu: ", undo->object,
+                    SLICE_SIZE(undo->slice), SLICE_OFFSET(undo->slice));
+            /* long i; */
+            /* for (i=0; i<SLICE_SIZE(undo->slice); i += 8) */
+            /*     fprintf(stderr, " 0x%016lx", *(long *)(undo->backup + i)); */
+            fprintf(stderr, "\n");
+        }
     }
 }
 
-static void _update_obj_from(int from_seg, object_t *obj)
+static void reset_modified_from_backup_copies(int segment_num);  /* forward */
+
+static void _stm_validate(void *free_if_abort)
 {
-    /* during validation this looks up the obj in the
-       from_seg (backup or normal) and copies the version
-       over the current segment's one */
-    size_t obj_size;
-    char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj);
-    uintptr_t pagenum = (uintptr_t)obj / 4096UL;
-
-    OPT_ASSERT(!is_shared_log_page(pagenum));
-    assert(is_private_log_page_in(STM_SEGMENT->segment_num, pagenum));
-    assert(is_private_log_page_in(from_seg, pagenum));
-
-    /* look the obj up in the other segment's modified_old_objects to
-       get its backup copy: */
-    acquire_modified_objs_lock(from_seg);
-
-    wlog_t *item;
-    struct tree_s *tree = get_priv_segment(from_seg)->modified_old_objects;
-    TREE_FIND(tree, (uintptr_t)obj, item, goto not_found);
-
-    obj_size = stmcb_size_rounded_up((struct object_s*)item->val);
-    memcpy(realobj, (char*)item->val, obj_size);
-    assert(obj->stm_flags & GCFLAG_WRITE_BARRIER);
-    release_modified_objs_lock(from_seg);
-    return;
-
- not_found:
-    /* copy from page directly (obj is unmodified) */
-    obj_size = stmcb_size_rounded_up(
-        (struct object_s*)REAL_ADDRESS(get_segment_base(from_seg), obj));
-    memcpy(realobj,
-           REAL_ADDRESS(get_segment_base(from_seg), obj),
-           obj_size);
-    obj->stm_flags |= GCFLAG_WRITE_BARRIER; /* may already be gone */
-    release_modified_objs_lock(from_seg);
-}
-
-void stm_validate(void *free_if_abort)
-{
+    dprintf(("_stm_validate(%p)\n", free_if_abort));
     /* go from last known entry in commit log to the
        most current one and apply all changes done
        by other transactions. Abort if we read one of
        the committed objs. */
+    struct stm_commit_log_entry_s *first_cl = STM_PSEGMENT->last_commit_log_entry;
+    struct stm_commit_log_entry_s *next_cl, *last_cl, *cl;
+    int my_segnum = STM_SEGMENT->segment_num;
+    /* Don't check this 'cl'. This entry is already checked */
+
     if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) {
-        assert((uintptr_t)STM_PSEGMENT->last_commit_log_entry->next == -1);
+        assert(first_cl->next == INEV_RUNNING);
         return;
     }
 
-    volatile struct stm_commit_log_entry_s *cl, *prev_cl;
-    cl = prev_cl = (volatile struct stm_commit_log_entry_s *)
-        STM_PSEGMENT->last_commit_log_entry;
-
-    bool needs_abort = false;
-    /* Don't check 'cl'. This entry is already checked */
-    while ((cl = cl->next)) {
-        if ((uintptr_t)cl == -1) {
-            /* there is an inevitable transaction running */
+    /* Find the set of segments we need to copy from and lock them: */
+    uint64_t segments_to_lock = 1UL << my_segnum;
+    cl = first_cl;
+    while ((next_cl = cl->next) != NULL) {
+        if (next_cl == INEV_RUNNING) {
 #if STM_TESTS
-            free(free_if_abort);
+            if (free_if_abort != (void *)-1)
+                free(free_if_abort);
             stm_abort_transaction();
 #endif
-            cl = prev_cl;
-            _stm_collectable_safe_point();
-            continue;
+            /* only validate entries up to INEV */
+            break;
         }
-        prev_cl = cl;
+        assert(next_cl->rev_num > cl->rev_num);
+        cl = next_cl;
 
-        OPT_ASSERT(cl->segment_num >= 0 && cl->segment_num < NB_SEGMENTS);
+        if (cl->written_count) {
+            segments_to_lock |= (1UL << cl->segment_num);
+        }
+    }
+    last_cl = cl;
+    acquire_modification_lock_set(segments_to_lock);
 
-        object_t *obj;
-        size_t i = 0;
-        while ((obj = cl->written[i])) {
-            _update_obj_from(cl->segment_num, obj);
 
-            if (_stm_was_read(obj)) {
-                needs_abort = true;
+    /* import objects from first_cl to last_cl: */
+    bool needs_abort = false;
+    if (first_cl != last_cl) {
+        uint64_t segment_really_copied_from = 0UL;
 
-                /* if we wrote this obj, we need to free its backup and
-                   remove it from modified_old_objects because
-                   we would otherwise overwrite the updated obj on abort */
-                acquire_modified_objs_lock(STM_SEGMENT->segment_num);
-                wlog_t *item;
-                struct tree_s *tree = STM_PSEGMENT->modified_old_objects;
-                TREE_FIND(tree, (uintptr_t)obj, item, goto not_found);
-
-                free((void*)item->val);
-                TREE_FIND_DELETE(tree, item);
-
-            not_found:
-                /* nothing todo */
-                release_modified_objs_lock(STM_SEGMENT->segment_num);
+        cl = first_cl;
+        while ((cl = cl->next) != NULL) {
+            if (!needs_abort) {
+                struct stm_undo_s *undo = cl->written;
+                struct stm_undo_s *end = cl->written + cl->written_count;
+                for (; undo < end; undo++) {
+                    if (_stm_was_read(undo->object)) {
+                        /* first reset all modified objects from the backup
+                           copies as soon as the first conflict is detected;
+                           then we will proceed below to update our segment from
+                           the old (but unmodified) version to the newer version.
+                        */
+                        reset_modified_from_backup_copies(my_segnum);
+                        needs_abort = true;
+                        break;
+                    }
+                }
             }
 
-            i++;
-        };
+            if (cl->written_count) {
+                struct stm_undo_s *undo = cl->written;
+                struct stm_undo_s *end = cl->written + cl->written_count;
 
-        /* last fully validated entry */
-        STM_PSEGMENT->last_commit_log_entry = (struct stm_commit_log_entry_s *)cl;
+                segment_really_copied_from |= (1UL << cl->segment_num);
+                import_objects(cl->segment_num, -1, undo, end);
+            }
+
+            /* last fully validated entry */
+            STM_PSEGMENT->last_commit_log_entry = cl;
+            if (cl == last_cl)
+                break;
+        }
+        assert(cl == last_cl);
+
+        OPT_ASSERT(segment_really_copied_from < (1 << NB_SEGMENTS));
+        int segnum;
+        for (segnum = 0; segnum < NB_SEGMENTS; segnum++) {
+            if (segment_really_copied_from & (1UL << segnum)) {
+                /* here we can actually have our own modified version, so
+                   make sure to only copy things that are not modified in our
+                   segment... (if we do not abort) */
+                copy_bk_objs_in_page_from(
+                    segnum, -1,     /* any page */
+                    !needs_abort);  /* if we abort, we still want to copy everything */
+            }
+        }
     }
 
+    /* done with modifications */
+    release_modification_lock_set(segments_to_lock);
+
     if (needs_abort) {
-        free(free_if_abort);
+        if (free_if_abort != (void *)-1)
+            free(free_if_abort);
+        /* pages may be inconsistent */
+
         stm_abort_transaction();
     }
 }
 
-static struct stm_commit_log_entry_s *_create_commit_log_entry()
+static struct stm_commit_log_entry_s *_create_commit_log_entry(void)
 {
     /* puts all modified_old_objects in a new commit log entry */
 
     // we don't need the privatization lock, as we are only
     // reading from modified_old_objs and nobody but us can change it
-    struct tree_s *tree = STM_PSEGMENT->modified_old_objects;
-    size_t count = tree_count(tree);
-    size_t byte_len = sizeof(struct stm_commit_log_entry_s) + (count + 1) * sizeof(object_t*);
+    struct list_s *list = STM_PSEGMENT->modified_old_objects;
+    OPT_ASSERT((list_count(list) % 3) == 0);
+    size_t count = list_count(list) / 3;
+    size_t byte_len = sizeof(struct stm_commit_log_entry_s) +
+        count * sizeof(struct stm_undo_s);
     struct stm_commit_log_entry_s *result = malloc(byte_len);
 
     result->next = NULL;
     result->segment_num = STM_SEGMENT->segment_num;
-
-    int i = 0;
-    wlog_t *item;
-    TREE_LOOP_FORWARD(tree, item); {
-        result->written[i] = (object_t*)item->addr;
-        i++;
-    } TREE_LOOP_END;
-
-    OPT_ASSERT(count == i);
-    result->written[count] = NULL;
-
+    result->rev_num = -1;       /* invalid */
+    result->written_count = count;
+    memcpy(result->written, list->items, count * sizeof(struct stm_undo_s));
     return result;
 }
 
-static void _validate_and_add_to_commit_log()
+static void _validate_and_attach(struct stm_commit_log_entry_s *new)
 {
-    struct stm_commit_log_entry_s *new;
-    volatile struct stm_commit_log_entry_s **to;
+    struct stm_commit_log_entry_s *old;
+
+    while (1) {
+        _stm_validate(/* free_if_abort =*/ new);
+
+        /* try to attach to commit log: */
+        old = STM_PSEGMENT->last_commit_log_entry;
+        if (old->next == NULL) {
+            if (new != INEV_RUNNING) /* INEVITABLE */
+                new->rev_num = old->rev_num + 1;
+
+            if (__sync_bool_compare_and_swap(&old->next, NULL, new))
+                break;   /* success! */
+        } else if (old->next == INEV_RUNNING) {
+            /* we failed because there is an INEV transaction running */
+            usleep(10);
+        }
+
+        /* check for requested safe point. otherwise an INEV transaction
+           may try to commit but cannot because of the busy-loop here. */
+        _stm_collectable_safe_point();
+    }
+}
+
+static void _validate_and_turn_inevitable(void)
+{
+    _validate_and_attach((struct stm_commit_log_entry_s *)INEV_RUNNING);
+}
+
+static void _validate_and_add_to_commit_log(void)
+{
+    struct stm_commit_log_entry_s *old, *new;
 
     new = _create_commit_log_entry();
     if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) {
-        OPT_ASSERT((uintptr_t)STM_PSEGMENT->last_commit_log_entry->next == -1);
+        old = STM_PSEGMENT->last_commit_log_entry;
+        new->rev_num = old->rev_num + 1;
+        OPT_ASSERT(old->next == INEV_RUNNING);
 
-        to = &(STM_PSEGMENT->last_commit_log_entry->next);
-        bool yes = __sync_bool_compare_and_swap(to, (void*)-1, new);
+        bool yes = __sync_bool_compare_and_swap(&old->next, INEV_RUNNING, new);
         OPT_ASSERT(yes);
-        return;
+    }
+    else {
+        _validate_and_attach(new);
     }
 
-    /* regular transaction: */
-    do {
-        stm_validate(new);
-
-        /* try attaching to commit log: */
-        to = &(STM_PSEGMENT->last_commit_log_entry->next);
-    } while (!__sync_bool_compare_and_swap(to, NULL, new));
-}
-
-static void _validate_and_turn_inevitable()
-{
-    struct stm_commit_log_entry_s *new;
-    volatile struct stm_commit_log_entry_s **to;
-
-    new = (struct stm_commit_log_entry_s*)-1;
-    do {
-        stm_validate(NULL);
-
-        /* try attaching to commit log: */
-        to = &(STM_PSEGMENT->last_commit_log_entry->next);
-    } while (!__sync_bool_compare_and_swap(to, NULL, new));
+    acquire_modification_lock(STM_SEGMENT->segment_num);
+    list_clear(STM_PSEGMENT->modified_old_objects);
+    STM_PSEGMENT->last_commit_log_entry = new;
+    release_modification_lock(STM_SEGMENT->segment_num);
 }
 
 /* ############# STM ############# */
+void stm_validate()
+{
+    _stm_validate(NULL);
+}
 
-void _privatize_shared_page(uintptr_t pagenum)
-{
-    /* privatize pages of obj for our segment iff previously
-       the pages were fully shared. */
-#ifndef NDEBUG
-    long l;
-    for (l = 0; l < NB_SEGMENTS; l++) {
-        assert(get_priv_segment(l)->privatization_lock);
-    }
-#endif
-
-    uintptr_t i;
-    int my_segnum = STM_SEGMENT->segment_num;
-
-    assert(is_shared_log_page(pagenum));
-    char *src = (char*)(get_virt_page_of(0, pagenum) * 4096UL);
-
-    for (i = 1; i < NB_SEGMENTS; i++) {
-        assert(!is_private_log_page_in(i, pagenum));
-
-        page_privatize_in(i, pagenum, src);
-    }
-    set_page_private_in(0, pagenum);
-
-    OPT_ASSERT(is_private_log_page_in(my_segnum, pagenum));
-    assert(!is_shared_log_page(pagenum));
-}
 
 void _stm_write_slowpath(object_t *obj)
 {
@@ -268,50 +475,116 @@
     /* add to read set: */
     stm_read(obj);
 
-    /* create backup copy: */
+    if (obj->stm_flags & GCFLAG_WB_EXECUTED) {
+        /* already executed WB once in this transaction. do GC
+           part again: */
+        dprintf(("write_slowpath-fast(%p)\n", obj));
+        obj->stm_flags &= ~GCFLAG_WRITE_BARRIER;
+        LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj);
+        return;
+    }
+
+    /* create backup copy (this may cause several page faults
+       XXX: do backup later and maybe allow for having NO_ACCESS
+       pages around anyway (kind of card marking)): */
     struct object_s *bk_obj = malloc(obj_size);
     memcpy(bk_obj, realobj, obj_size);
+    assert(!(bk_obj->stm_flags & GCFLAG_WB_EXECUTED));
 
-    /* if there are shared pages, privatize them */
-    uintptr_t page = first_page;
+    dprintf(("write_slowpath(%p): sz=%lu, bk=%p\n", obj, obj_size, bk_obj));
+ retry:
+    /* privatize pages: */
+    /* XXX don't always acquire all locks... */
+    acquire_all_privatization_locks();
+
+    uintptr_t page;
     for (page = first_page; page <= end_page; page++) {
-        if (UNLIKELY(is_shared_log_page(page))) {
-            long i;
-            for (i = 0; i < NB_SEGMENTS; i++) {
-                acquire_privatization_lock(i);
-            }
-            if (is_shared_log_page(page))
-                _privatize_shared_page(page);
-            for (i = NB_SEGMENTS-1; i >= 0; i--) {
-                release_privatization_lock(i);
+        /* check if our page is private or we are the only shared-page holder */
+        switch (get_page_status_in(my_segnum, page)) {
+
+        case PAGE_PRIVATE:
+            continue;
+
+        case PAGE_NO_ACCESS:
+            /* happens if there is a concurrent WB between us making the backup
+               and acquiring the locks */
+            release_all_privatization_locks();
+
+            volatile char *dummy = REAL_ADDRESS(STM_SEGMENT->segment_base, page * 4096UL);
+            *dummy;            /* force segfault */
+
+            goto retry;
+
+        case PAGE_SHARED:
+            break;
+
+        default:
+            assert(0);
+        }
+        /* make sure all the others are NO_ACCESS
+           choosing to make us PRIVATE is harder because then nobody must ever
+           update the shared page in stm_validate() except if it is the sole
+           reader of it. But then we don't actually know which revision the page is at. */
+        /* XXX this is a temporary solution I suppose */
+        int i;
+        for (i = 0; i < NB_SEGMENTS; i++) {
+            if (i == my_segnum)
+                continue;
+
+            if (get_page_status_in(i, page) == PAGE_SHARED) {
+                /* xxx: unmap? */
+                set_page_status_in(i, page, PAGE_NO_ACCESS);
+                mprotect((char*)(get_virt_page_of(i, page) * 4096UL), 4096UL, PROT_NONE);
+                dprintf(("NO_ACCESS in seg %d page %lu\n", i, page));
             }
         }
     }
-    /* pages not shared anymore. but we still may have
-       only a read protected page ourselves: */
+    /* all pages are either private or we were the first to write to a shared
+       page and therefore got it as our private one */
 
-    acquire_privatization_lock(my_segnum);
-    OPT_ASSERT(is_private_log_page_in(my_segnum, first_page));
+    /* phew, now add the obj to the write-set and register the
+       backup copy. */
+    /* XXX: we should not be here at all fiddling with page status
+       if 'obj' is merely an overflow object.  FIX ME, likely by copying
+       the overflow number logic from c7. */
 
-    /* remove the WRITE_BARRIER flag */
+    acquire_modification_lock(STM_SEGMENT->segment_num);
+    uintptr_t slice_sz;
+    uintptr_t in_page_offset = (uintptr_t)obj % 4096UL;
+    uintptr_t remaining_obj_sz = obj_size;
+    for (page = first_page; page <= end_page; page++) {
+        /* XXX Maybe also use mprotect() again to mark pages of the object as read-only, and
+           only stick it into modified_old_objects page-by-page?  Maybe it's
+           possible to do card-marking that way, too. */
+        OPT_ASSERT(remaining_obj_sz);
+
+        slice_sz = remaining_obj_sz;
+        if (in_page_offset + slice_sz > 4096UL) {
+            /* not over page boundaries */
+            slice_sz = 4096UL - in_page_offset;
+        }
+
+        STM_PSEGMENT->modified_old_objects = list_append3(
+            STM_PSEGMENT->modified_old_objects,
+            (uintptr_t)obj,     /* obj */
+            (uintptr_t)bk_obj,  /* bk_addr */
+            NEW_SLICE(obj_size - remaining_obj_sz, slice_sz));
+
+        remaining_obj_sz -= slice_sz;
+        in_page_offset = (in_page_offset + slice_sz) % 4096UL; /* mostly 0 */
+    }
+    OPT_ASSERT(remaining_obj_sz == 0);
+
+    release_modification_lock(STM_SEGMENT->segment_num);
+    /* done fiddling with protection and privatization */
+    release_all_privatization_locks();
+
+    /* remove the WRITE_BARRIER flag and add WB_EXECUTED */
     obj->stm_flags &= ~GCFLAG_WRITE_BARRIER;
+    obj->stm_flags |= GCFLAG_WB_EXECUTED;
 
     /* also add it to the GC list for minor collections */
     LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj);
-
-    /* done fiddling with protection and privatization */
-    release_privatization_lock(my_segnum);
-
-    /* phew, now add the obj to the write-set and register the
-       backup copy. */
-    /* XXX: possibly slow check; try overflow objs again? */
-    if (!tree_contains(STM_PSEGMENT->modified_old_objects, (uintptr_t)obj)) {
-        acquire_modified_objs_lock(my_segnum);
-        tree_insert(STM_PSEGMENT->modified_old_objects,
-                    (uintptr_t)obj, (uintptr_t)bk_obj);
-        release_modified_objs_lock(my_segnum);
-    }
-
 }
 
 static void reset_transaction_read_version(void)
@@ -324,7 +597,7 @@
              (long)(NB_READMARKER_PAGES * 4096UL)));
     if (mmap(readmarkers, NB_READMARKER_PAGES * 4096UL,
              PROT_READ | PROT_WRITE,
-             MAP_FIXED | MAP_PAGES_FLAGS, -1, 0) != readmarkers) {
+             MAP_FIXED | MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0) != readmarkers) {
         /* fall-back */
 #if STM_TESTS
         stm_fatalerror("reset_transaction_read_version: %m");
@@ -334,15 +607,24 @@
     STM_SEGMENT->transaction_read_version = 1;
 }
 
+static void reset_wb_executed_flags(void)
+{
+    struct list_s *list = STM_PSEGMENT->modified_old_objects;
+    struct stm_undo_s *undo = (struct stm_undo_s *)list->items;
+    struct stm_undo_s *end = (struct stm_undo_s *)(list->items + list->count);
+
+    for (; undo < end; undo++) {
+        object_t *obj = undo->object;
+        obj->stm_flags &= ~GCFLAG_WB_EXECUTED;
+    }
+}
+
 
 static void _stm_start_transaction(stm_thread_local_t *tl)
 {
     assert(!_stm_in_transaction(tl));
 
-  retry:
-
-    if (!acquire_thread_segment(tl))
-        goto retry;
+    while (!acquire_thread_segment(tl)) {}
     /* GS invalid before this point! */
 
     assert(STM_PSEGMENT->safe_point == SP_NO_TRANSACTION);
@@ -355,7 +637,7 @@
     STM_PSEGMENT->shadowstack_at_start_of_transaction = tl->shadowstack;
 
     enter_safe_point_if_requested();
-    dprintf(("start_transaction\n"));
+    dprintf(("> start_transaction\n"));
 
     s_mutex_unlock();
 
@@ -365,7 +647,7 @@
         reset_transaction_read_version();
     }
 
-    assert(tree_is_cleared(STM_PSEGMENT->modified_old_objects));
+    assert(list_is_empty(STM_PSEGMENT->modified_old_objects));
     assert(list_is_empty(STM_PSEGMENT->objects_pointing_to_nursery));
     assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery));
     assert(tree_is_cleared(STM_PSEGMENT->nursery_objects_shadows));
@@ -374,7 +656,7 @@
 
     check_nursery_at_transaction_start();
 
-    stm_validate(NULL);
+    stm_validate();
 }
 
 long stm_start_transaction(stm_thread_local_t *tl)
@@ -422,35 +704,42 @@
     /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */
 }
 
+static void check_all_write_barrier_flags(char *segbase, struct list_s *list)
+{
+#ifndef NDEBUG
+    struct stm_undo_s *undo = (struct stm_undo_s *)list->items;
+    struct stm_undo_s *end = (struct stm_undo_s *)(list->items + list->count);
+    for (; undo < end; undo++) {
+        object_t *obj = undo->object;
+        char *dst = REAL_ADDRESS(segbase, obj);
+        assert(((struct object_s *)dst)->stm_flags & GCFLAG_WRITE_BARRIER);
+        assert(!(((struct object_s *)dst)->stm_flags & GCFLAG_WB_EXECUTED));
+    }
+#endif
+}
+
 void stm_commit_transaction(void)
 {
     assert(!_has_mutex());
     assert(STM_PSEGMENT->safe_point == SP_RUNNING);
     assert(STM_PSEGMENT->running_pthread == pthread_self());
 
-    dprintf(("stm_commit_transaction()\n"));
+    dprintf(("> stm_commit_transaction()\n"));
     minor_collection(1);
 
+    reset_wb_executed_flags();
+
+    /* minor_collection() above should have set again all WRITE_BARRIER flags.
+       Check that again here for the objects that are about to be copied into
+       the commit log. */
+    check_all_write_barrier_flags(STM_SEGMENT->segment_base,
+                                  STM_PSEGMENT->modified_old_objects);
+
     _validate_and_add_to_commit_log();
 
-    /* clear WRITE_BARRIER flags, free all backup copies,
-       and clear the tree: */
-    acquire_modified_objs_lock(STM_SEGMENT->segment_num);
-
-    struct tree_s *tree = STM_PSEGMENT->modified_old_objects;
-    wlog_t *item;
-    TREE_LOOP_FORWARD(tree, item); {
-        object_t *obj = (object_t*)item->addr;
-        struct object_s* bk_obj = (struct object_s *)item->val;
-        free(bk_obj);
-        obj->stm_flags |= GCFLAG_WRITE_BARRIER;
-    } TREE_LOOP_END;
-    tree_clear(tree);
-
-    release_modified_objs_lock(STM_SEGMENT->segment_num);
-
     invoke_and_clear_user_callbacks(0);   /* for commit */
 
+    /* XXX do we still need a s_mutex_lock() section here? */
     s_mutex_lock();
     enter_safe_point_if_requested();
     assert(STM_SEGMENT->nursery_end == NURSERY_END);
@@ -468,35 +757,42 @@
     s_mutex_unlock();
 }
 
-void reset_modified_from_backup_copies(int segment_num)
+static void reset_modified_from_backup_copies(int segment_num)
 {
 #pragma push_macro("STM_PSEGMENT")
 #pragma push_macro("STM_SEGMENT")
 #undef STM_PSEGMENT
 #undef STM_SEGMENT
-    acquire_modified_objs_lock(segment_num);
+    assert(get_priv_segment(segment_num)->modification_lock);
 
     struct stm_priv_segment_info_s *pseg = get_priv_segment(segment_num);
-    struct tree_s *tree = pseg->modified_old_objects;
-    wlog_t *item;
-    TREE_LOOP_FORWARD(tree, item); {
-        object_t *obj = (object_t*)item->addr;
-        struct object_s* bk_obj = (struct object_s *)item->val;
-        size_t obj_size;
+    struct list_s *list = pseg->modified_old_objects;
+    struct stm_undo_s *undo = (struct stm_undo_s *)list->items;
+    struct stm_undo_s *end = (struct stm_undo_s *)(list->items + list->count);
 
-        obj_size = stmcb_size_rounded_up(bk_obj);
+    for (; undo < end; undo++) {
+        object_t *obj = undo->object;
+        char *dst = REAL_ADDRESS(pseg->pub.segment_base, obj);
 
-        memcpy(REAL_ADDRESS(pseg->pub.segment_base, obj),
-               bk_obj, obj_size);
-        assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); /* not written */
+        memcpy(dst + SLICE_OFFSET(undo->slice),
+               undo->backup + SLICE_OFFSET(undo->slice),
+               SLICE_SIZE(undo->slice));
 
-        free(bk_obj);
-    } TREE_LOOP_END;
+        size_t obj_size = stmcb_size_rounded_up((struct object_s*)undo->backup);
+        dprintf(("reset_modified_from_backup_copies(%d): obj=%p off=%lu bk=%p obj_sz=%lu\n",
+                 segment_num, obj, SLICE_OFFSET(undo->slice), undo->backup, obj_size));
 
-    tree_clear(tree);
+        if (obj_size - SLICE_OFFSET(undo->slice) <= 4096UL) {
+            /* only free bk copy once (last slice): */
+            free(undo->backup);
+            dprintf(("-> free(%p)\n", undo->backup));
+        }
+    }
 
-    release_modified_objs_lock(segment_num);
+    /* check that all objects have the GCFLAG_WRITE_BARRIER afterwards */
+    check_all_write_barrier_flags(pseg->pub.segment_base, list);
 
+    list_clear(list);
 #pragma pop_macro("STM_SEGMENT")
 #pragma pop_macro("STM_PSEGMENT")
 }
@@ -521,7 +817,9 @@
 
     long bytes_in_nursery = throw_away_nursery(pseg);
 
+    acquire_modification_lock(segment_num);
     reset_modified_from_backup_copies(segment_num);
+    release_modification_lock(segment_num);
 
     stm_thread_local_t *tl = pseg->pub.running_thread;
 #ifdef STM_NO_AUTOMATIC_SETJMP
@@ -638,11 +936,36 @@
     assert(frag_size > 0);
     assert(frag_size + ((uintptr_t)frag & 4095) <= 4096);
 
-    /* if the page of the fragment is fully shared, nothing to do */
+    /* if the page of the fragment is fully shared, nothing to do:
+       |S|N|N|N| */
+
+    /* nobody must change the page mapping until we flush */
     assert(STM_PSEGMENT->privatization_lock);
-    if (is_shared_log_page((uintptr_t)frag / 4096))
+
+    int my_segnum = STM_SEGMENT->segment_num;
+    uintptr_t pagenum = (uintptr_t)frag / 4096;
+    bool fully_shared = false;
+
+    if (get_page_status_in(my_segnum, pagenum) == PAGE_SHARED) {
+        fully_shared = true;
+        int i;
+        for (i = 0; fully_shared && i < NB_SEGMENTS; i++) {
+            if (i == my_segnum)
+                continue;
+
+            /* XXX: works if never all pages use SHARED page */
+            if (get_page_status_in(i, pagenum) != PAGE_NO_ACCESS) {
+                fully_shared = false;
+                break;
+            }
+        }
+    }
+
+    if (fully_shared)
         return;                 /* nothing to do */
 
+    /* e.g. |P|S|N|P| */
+
     /* Enqueue this object (or fragemnt of object) */
     if (STM_PSEGMENT->sq_len == SYNC_QUEUE_SIZE)
         synchronize_objects_flush();
@@ -686,7 +1009,7 @@
 
 static void synchronize_objects_flush(void)
 {
-
+    /* XXX: not sure this applies anymore.  */
     /* Do a full memory barrier.  We must make sure that other
        CPUs see the changes we did to the shared page ("S", in
        synchronize_object_enqueue()) before we check the other segments
@@ -697,21 +1020,24 @@
        and copies the page; but it risks doing so before seeing the "S"
        writes.
     */
-    /* XXX: not sure this applies anymore.  */
     long j = STM_PSEGMENT->sq_len;
     if (j == 0)
         return;
     STM_PSEGMENT->sq_len = 0;
 
+    dprintf(("synchronize_objects_flush(): %ld fragments\n", j));
+
     __sync_synchronize();
+    assert(STM_PSEGMENT->privatization_lock);
 
     long i, myself = STM_SEGMENT->segment_num;
     do {
         --j;
         stm_char *frag = STM_PSEGMENT->sq_fragments[j];
         uintptr_t page = ((uintptr_t)frag) / 4096UL;
-        if (is_shared_log_page(page))
-            continue;
+        /* XXX: necessary? */
+        /* if (is_shared_log_page(page)) */
+        /*     continue; */
 
         ssize_t frag_size = STM_PSEGMENT->sq_fragsizes[j];
 
@@ -720,11 +1046,11 @@
             if (i == myself)
                 continue;
 
-            char *dst = REAL_ADDRESS(get_segment_base(i), frag);
-            if (is_private_log_page_in(i, page))
+            if (get_page_status_in(i, page) != PAGE_NO_ACCESS) {
+                /* shared or private, but never segfault */
+                char *dst = REAL_ADDRESS(get_segment_base(i), frag);
                 memcpy(dst, src, frag_size);
-            else
-                EVENTUALLY(memcmp(dst, src, frag_size) == 0);  /* same page */
+            }
         }
     } while (j > 0);
 }
diff --git a/c8/stm/core.h b/c8/stm/core.h
--- a/c8/stm/core.h
+++ b/c8/stm/core.h
@@ -6,6 +6,8 @@
 #include <sys/mman.h>
 #include <errno.h>
 #include <pthread.h>
+#include <signal.h>
+
 
 /************************************************************/
 
@@ -17,7 +19,6 @@
 #define NB_PAGES            (2500*256)    // 2500MB
 #define NB_SEGMENTS         STM_NB_SEGMENTS
 #define NB_SEGMENTS_MAX     240    /* don't increase NB_SEGMENTS past this */
-#define MAP_PAGES_FLAGS     (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE)
 #define NB_NURSERY_PAGES    (STM_GC_NURSERY/4)
 
 #define TOTAL_MEMORY          (NB_PAGES * 4096UL * NB_SEGMENTS)
@@ -25,6 +26,7 @@
 #define FIRST_OBJECT_PAGE     ((READMARKER_END + 4095) / 4096UL)
 #define FIRST_NURSERY_PAGE    FIRST_OBJECT_PAGE
 #define END_NURSERY_PAGE      (FIRST_NURSERY_PAGE + NB_NURSERY_PAGES)
+#define NB_SHARED_PAGES       (NB_PAGES - END_NURSERY_PAGE)
 
 #define READMARKER_START      ((FIRST_OBJECT_PAGE * 4096UL) >> 4)
 #define FIRST_READMARKER_PAGE (READMARKER_START / 4096UL)
@@ -32,11 +34,10 @@
 #define FIRST_OLD_RM_PAGE     (OLD_RM_START / 4096UL)
 #define NB_READMARKER_PAGES   (FIRST_OBJECT_PAGE - FIRST_READMARKER_PAGE)
 
-#define TMP_COPY_PAGE         1 /* HACK */
-
 enum /* stm_flags */ {
     GCFLAG_WRITE_BARRIER = _STM_GCFLAG_WRITE_BARRIER,
     GCFLAG_HAS_SHADOW = 0x02,
+    GCFLAG_WB_EXECUTED = 0x04,
 };
 
 
@@ -54,8 +55,20 @@
 struct stm_priv_segment_info_s {
     struct stm_segment_info_s pub;
 
-    uint8_t modified_objs_lock;
-    struct tree_s *modified_old_objects;
+    /* lock protecting from concurrent modification of
+       'modified_old_objects', page-revision-changes, ...
+       Always acquired in global order of segments to avoid deadlocks. */
+    uint8_t modification_lock;
+
+    /* All the old objects (older than the current transaction) that
+       the current transaction attempts to modify.  This is used to
+       track the STM status: these are old objects that where written
+       to and that will need to be recorded in the commit log.  The
+       list contains three entries for every such object, in the same
+       format as 'struct stm_undo_s' below.
+    */
+    struct list_s *modified_old_objects;
+
     struct list_s *objects_pointing_to_nursery;
     struct tree_s *young_outside_nursery;
     struct tree_s *nursery_objects_shadows;
@@ -86,7 +99,6 @@
     int sq_len;
 };
 
-
 enum /* safe_point */ {
     SP_NO_TRANSACTION=0,
     SP_RUNNING,
@@ -104,17 +116,41 @@
 };
 
 /* Commit Log things */
+struct stm_undo_s {
+    object_t *object;   /* the object that is modified */
+    char *backup;       /* some backup data (a slice of the original obj) */
+    uint64_t slice;     /* location and size of this slice (cannot cross
+                           pages).  The size is in the lower 2 bytes, and
+                           the offset in the remaining 6 bytes. */
+};
+#define SLICE_OFFSET(slice)  ((slice) >> 16)
+#define SLICE_SIZE(slice)    ((int)((slice) & 0xFFFF))
+#define NEW_SLICE(offset, size) (((uint64_t)(offset)) << 16 | (size))
+
+/* The model is: we have a global chained list, from 'commit_log_root',
+   of 'struct stm_commit_log_entry_s' entries.  Every one is fully
+   read-only apart from the 'next' field.  Every one stands for one
+   commit that occurred.  It lists the old objects that were modified
+   in this commit, and their attached "undo logs" --- that is, the
+   data from 'written[n].backup' is the content of (slices of) the
+   object as they were *before* that commit occurred.
+*/
+#define INEV_RUNNING ((void*)-1)
 struct stm_commit_log_entry_s {
-    volatile struct stm_commit_log_entry_s *next;
+    struct stm_commit_log_entry_s *volatile next;
     int segment_num;
-    object_t *written[];        /* terminated with a NULL ptr */
+    uint64_t rev_num;
+    size_t written_count;
+    struct stm_undo_s written[];
 };
-static struct stm_commit_log_entry_s commit_log_root = {NULL, -1};
+static struct stm_commit_log_entry_s commit_log_root = {NULL, -1, 0, 0};
+
 
 #ifndef STM_TESTS
 static
 #endif
        char *stm_object_pages;
+static char *stm_file_pages;
 static int stm_object_pages_fd;
 static stm_thread_local_t *stm_all_thread_locals = NULL;
 
@@ -154,6 +190,8 @@
 static void synchronize_object_enqueue(object_t *obj);
 static void synchronize_objects_flush(void);
 
+static void _signal_handler(int sig, siginfo_t *siginfo, void *context);
+static void _stm_validate(void *free_if_abort);
 
 static inline void _duck(void) {
     /* put a call to _duck() between two instructions that set 0 into
@@ -174,12 +212,82 @@
     spinlock_release(get_priv_segment(segnum)->privatization_lock);
 }
 
-static inline void acquire_modified_objs_lock(int segnum)
+static inline bool all_privatization_locks_acquired()
 {
-    spinlock_acquire(get_priv_segment(segnum)->modified_objs_lock);
+#ifndef NDEBUG
+    long l;
+    for (l = 0; l < NB_SEGMENTS; l++) {
+        if (!get_priv_segment(l)->privatization_lock)
+            return false;
+    }
+    return true;
+#else
+    abort();
+#endif
 }
 
-static inline void release_modified_objs_lock(int segnum)
+static inline void acquire_all_privatization_locks()
 {
-    spinlock_release(get_priv_segment(segnum)->modified_objs_lock);
+    long l;
+    for (l = 0; l < NB_SEGMENTS; l++) {
+        acquire_privatization_lock(l);
+    }
 }
+
+static inline void release_all_privatization_locks()
+{
+    long l;
+    for (l = NB_SEGMENTS-1; l >= 0; l--) {
+        release_privatization_lock(l);
+    }
+}
+
+
+
+/* Modification locks are used to prevent copying from a segment
+   where either the revision of some pages is inconsistent with the
+   rest, or the modified_old_objects list is being modified (bk_copys).
+
+   Lock ordering: acquire privatization lock around acquiring a set
+   of modification locks!
+*/
+
+static inline void acquire_modification_lock(int segnum)
+{
+    spinlock_acquire(get_priv_segment(segnum)->modification_lock);
+}
+
+static inline void release_modification_lock(int segnum)
+{
+    spinlock_release(get_priv_segment(segnum)->modification_lock);
+}
+
+static inline void acquire_modification_lock_set(uint64_t seg_set)
+{
+    assert(NB_SEGMENTS <= 64);
+    OPT_ASSERT(seg_set < (1 << NB_SEGMENTS));
+
+    /* acquire locks in global order */
+    int i;
+    for (i = 0; i < NB_SEGMENTS; i++) {
+        if ((seg_set & (1 << i)) == 0)
+            continue;
+
+        spinlock_acquire(get_priv_segment(i)->modification_lock);
+    }
+}
+
+static inline void release_modification_lock_set(uint64_t seg_set)
+{
+    assert(NB_SEGMENTS <= 64);
+    OPT_ASSERT(seg_set < (1 << NB_SEGMENTS));
+
+    int i;
+    for (i = 0; i < NB_SEGMENTS; i++) {
+        if ((seg_set & (1 << i)) == 0)
+            continue;
+
+        assert(get_priv_segment(i)->modification_lock);
+        spinlock_release(get_priv_segment(i)->modification_lock);
+    }
+}
diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c
--- a/c8/stm/gcpage.c
+++ b/c8/stm/gcpage.c
@@ -5,6 +5,7 @@
 
 static void setup_gcpage(void)
 {
+    /* XXXXXXX should use stm_file_pages, no? */
     uninitialized_page_start = stm_object_pages + END_NURSERY_PAGE * 4096UL;
     uninitialized_page_stop  = stm_object_pages + NB_PAGES * 4096UL;
 }
@@ -15,11 +16,13 @@
 
 static void setup_N_pages(char *pages_addr, uint64_t num)
 {
+    /* initialize to |S|N|N|N| */
     long i;
     for (i = 0; i < NB_SEGMENTS; i++) {
         acquire_privatization_lock(i);
     }
-    pages_initialize_shared((pages_addr - stm_object_pages) / 4096UL, num);
+    pages_initialize_shared_for(STM_SEGMENT->segment_num,
+                                (pages_addr - stm_object_pages) / 4096UL, num);
     for (i = NB_SEGMENTS-1; i >= 0; i--) {
         release_privatization_lock(i);
     }
diff --git a/c8/stm/list.h b/c8/stm/list.h
--- a/c8/stm/list.h
+++ b/c8/stm/list.h
@@ -45,6 +45,19 @@
     return lst;
 }
 
+static inline struct list_s *list_append3(struct list_s *lst, uintptr_t item0,
+                                          uintptr_t item1, uintptr_t item2)
+{
+    uintptr_t index = lst->count;
+    lst->count += 3;
+    if (UNLIKELY(index + 2 > lst->last_allocated))
+        lst = _list_grow(lst, index + 2);
+    lst->items[index + 0] = item0;
+    lst->items[index + 1] = item1;
+    lst->items[index + 2] = item2;
+    return lst;
+}
+
 
 static inline void list_clear(struct list_s *lst)
 {
diff --git a/c8/stm/misc.c b/c8/stm/misc.c
--- a/c8/stm/misc.c
+++ b/c8/stm/misc.c
@@ -46,8 +46,9 @@
 long _stm_count_modified_old_objects(void)
 {
     assert(STM_PSEGMENT->modified_old_objects);
-    assert(tree_count(STM_PSEGMENT->modified_old_objects) < 10000);
-    return tree_count(STM_PSEGMENT->modified_old_objects);
+    assert(list_count(STM_PSEGMENT->modified_old_objects) < 30000);
+    assert((list_count(STM_PSEGMENT->modified_old_objects) % 3) == 0);
+    return list_count(STM_PSEGMENT->modified_old_objects) / 3;
 }
 
 long _stm_count_objects_pointing_to_nursery(void)
@@ -59,8 +60,8 @@
 
 object_t *_stm_enum_modified_old_objects(long index)
 {
-    wlog_t* entry = tree_item(STM_PSEGMENT->modified_old_objects, index);
-    return (object_t*)entry->addr;
+    return (object_t *)list_item(
+        STM_PSEGMENT->modified_old_objects, index * 3);
 }
 
 object_t *_stm_enum_objects_pointing_to_nursery(long index)
@@ -69,13 +70,12 @@
         STM_PSEGMENT->objects_pointing_to_nursery, index);
 }
 
-static volatile struct stm_commit_log_entry_s *_last_cl_entry;
+static struct stm_commit_log_entry_s *_last_cl_entry;
 static long _last_cl_entry_index;
 void _stm_start_enum_last_cl_entry()
 {
     _last_cl_entry = &commit_log_root;
-    volatile struct stm_commit_log_entry_s *cl = (volatile struct stm_commit_log_entry_s *)
-        &commit_log_root;
+    struct stm_commit_log_entry_s *cl = &commit_log_root;
 
     while ((cl = cl->next)) {
         _last_cl_entry = cl;
@@ -85,8 +85,10 @@
 
 object_t *_stm_next_last_cl_entry()
 {
-    if (_last_cl_entry != &commit_log_root)
-        return _last_cl_entry->written[_last_cl_entry_index++];
-    return NULL;
+    if (_last_cl_entry == &commit_log_root)
+        return NULL;
+    if (_last_cl_entry_index >= _last_cl_entry->written_count)
+        return NULL;
+    return _last_cl_entry->written[_last_cl_entry_index++].object;
 }
 #endif
diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c
--- a/c8/stm/nursery.c
+++ b/c8/stm/nursery.c
@@ -183,6 +183,8 @@
         uintptr_t obj_sync_now = list_pop_item(lst);
         object_t *obj = (object_t *)(obj_sync_now & ~FLAG_SYNC_LARGE);
 
+        assert(!_is_in_nursery(obj));
+
         _collect_now(obj);
 
         if (obj_sync_now & FLAG_SYNC_LARGE) {
diff --git a/c8/stm/pages.c b/c8/stm/pages.c
--- a/c8/stm/pages.c
+++ b/c8/stm/pages.c
@@ -1,8 +1,8 @@
 #ifndef _STM_CORE_H_
 # error "must be compiled via stmgc.c"
 #endif
-#include <signal.h>
 
+#include <unistd.h>
 /************************************************************/
 
 static void setup_pages(void)
@@ -11,83 +11,56 @@
 
 static void teardown_pages(void)
 {
-    memset(pages_privatized, 0, sizeof(pages_privatized));
+    memset(pages_status, 0, sizeof(pages_status));
 }
 
 /************************************************************/
 
-static void d_remap_file_pages(char *addr, size_t size, ssize_t pgoff)
-{
-    dprintf(("remap_file_pages: 0x%lx bytes: (seg%ld %p) --> (seg%ld %p)\n",
-             (long)size,
-             (long)((addr - stm_object_pages) / 4096UL) / NB_PAGES,
-             (void *)((addr - stm_object_pages) % (4096UL * NB_PAGES)),
-             (long)pgoff / NB_PAGES,
-             (void *)((pgoff % NB_PAGES) * 4096UL)));
-    assert(size % 4096 == 0);
-    assert(size <= TOTAL_MEMORY);
-    assert(((uintptr_t)addr) % 4096 == 0);
-    assert(addr >= stm_object_pages);
-    assert(addr <= stm_object_pages + TOTAL_MEMORY - size);
-    assert(pgoff >= 0);
-    assert(pgoff <= (TOTAL_MEMORY - size) / 4096UL);
-
-    /* assert remappings follow the rule that page N in one segment
-       can only be remapped to page N in another segment */
-    assert(IMPLY(((addr - stm_object_pages) / 4096UL) != TMP_COPY_PAGE,
-                 ((addr - stm_object_pages) / 4096UL - pgoff) % NB_PAGES == 0));
-
-#ifdef USE_REMAP_FILE_PAGES
-    int res = remap_file_pages(addr, size, 0, pgoff, 0);
-    if (UNLIKELY(res < 0))
-        stm_fatalerror("remap_file_pages: %m");
-#else
-    char *res = mmap(addr, size,
-                     PROT_READ | PROT_WRITE,
-                     (MAP_PAGES_FLAGS & ~MAP_ANONYMOUS) | MAP_FIXED,
-                     stm_object_pages_fd, pgoff * 4096UL);
-    if (UNLIKELY(res != addr))
-        stm_fatalerror("mmap (remapping page): %m");
-#endif
-}
-
-
-static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count)
+static void pages_initialize_shared_for(long segnum, uintptr_t pagenum, uintptr_t count)
 {
     /* call remap_file_pages() to make all pages in the range(pagenum,
-       pagenum+count) refer to the same physical range of pages from
-       segment 0. */
-    dprintf(("pages_initialize_shared: 0x%ld - 0x%ld\n", pagenum,
-             pagenum + count));
-#ifndef NDEBUG
-    long l;
-    for (l = 0; l < NB_SEGMENTS; l++) {
-        assert(get_priv_segment(l)->privatization_lock);
-    }
-#endif
+       pagenum+count) PAGE_SHARED in segnum, and PAGE_NO_ACCESS in other segments
+       initialize to |S|N|N|N| */
+
+    dprintf(("pages_initialize_shared: 0x%ld - 0x%ld\n", pagenum, pagenum + count));
+
+    assert(all_privatization_locks_acquired());
+
     assert(pagenum < NB_PAGES);
     if (count == 0)
         return;
+
+    /* already shared after setup.c (also for the other count-1 pages) */
+    assert(get_page_status_in(segnum, pagenum) == PAGE_SHARED);
+
+    /* make other segments NO_ACCESS: */
     uintptr_t i;
-    for (i = 1; i < NB_SEGMENTS; i++) {
-        char *segment_base = get_segment_base(i);
-        d_remap_file_pages(segment_base + pagenum * 4096UL,
-                           count * 4096UL, pagenum);
-    }
+    for (i = 0; i < NB_SEGMENTS; i++) {
+        if (i != segnum) {
+            char *segment_base = get_segment_base(i);
+            mprotect(segment_base + pagenum * 4096UL,
+                     count * 4096UL, PROT_NONE);
 
-    for (i = 0; i < NB_SEGMENTS; i++) {
-        uintptr_t amount = count;
-        while (amount-->0) {
-            volatile struct page_shared_s *ps2 = (volatile struct page_shared_s *)
-                &pages_privatized[pagenum + amount - PAGE_FLAG_START];
+            /* char *result = mmap( */
+            /*     segment_base + pagenum * 4096UL, */
+            /*     count * 4096UL, */
+            /*     PROT_NONE, */
+            /*     MAP_FIXED|MAP_NORESERVE|MAP_PRIVATE|MAP_ANONYMOUS, */
+            /*     -1, 0); */
+            /* if (result == MAP_FAILED) */
+            /*     stm_fatalerror("pages_initialize_shared failed (mmap): %m"); */
 
-            ps2->by_segment = 0; /* not private */
+
+            long amount = count;
+            while (amount-->0) {
+                set_page_status_in(i, pagenum + amount, PAGE_NO_ACCESS);
+            }
         }
     }
 }
 
 
-static void page_privatize_in(int segnum, uintptr_t pagenum, char *initialize_from)
+static void page_privatize_in(int segnum, uintptr_t pagenum)
 {
 #ifndef NDEBUG
     long l;
@@ -95,34 +68,19 @@
         assert(get_priv_segment(l)->privatization_lock);
     }
 #endif
-
-    /* check this thread's 'pages_privatized' bit */
-    uint64_t bitmask = 1UL << segnum;
-    volatile struct page_shared_s *ps = (volatile struct page_shared_s *)
-        &pages_privatized[pagenum - PAGE_FLAG_START];
-    if (ps->by_segment & bitmask) {
-        /* the page is already privatized; nothing to do */
-        return;
-    }
-
+    assert(get_page_status_in(segnum, pagenum) == PAGE_NO_ACCESS);
     dprintf(("page_privatize(%lu) in seg:%d\n", pagenum, segnum));
 
-    /* add this thread's 'pages_privatized' bit */
-    ps->by_segment |= bitmask;
+    char *addr = (char*)(get_virt_page_of(segnum, pagenum) * 4096UL);
+    char *result = mmap(
+        addr, 4096UL, PROT_READ | PROT_WRITE,
+        MAP_FIXED | MAP_PRIVATE | MAP_NORESERVE,
+        stm_object_pages_fd, get_file_page_of(pagenum) * 4096UL);
+    if (result == MAP_FAILED)
+        stm_fatalerror("page_privatize_in failed (mmap): %m");
 
-    /* "unmaps" the page to make the address space location correspond
-       again to its underlying file offset (XXX later we should again
-       attempt to group together many calls to d_remap_file_pages() in
-       succession) */
-    uintptr_t pagenum_in_file = NB_PAGES * segnum + pagenum;
-    char *tmp_page = stm_object_pages + TMP_COPY_PAGE * 4096UL;
-    /* first remap to TMP_PAGE, then copy stuff there (to the underlying
-       file page), then remap this file-page hopefully atomically to the
-       segnum's virtual page */
-    d_remap_file_pages(tmp_page, 4096, pagenum_in_file);
-    pagecopy(tmp_page, initialize_from);
-    write_fence();
+    set_page_status_in(segnum, pagenum, PAGE_PRIVATE);
 
-    char *new_page = stm_object_pages + pagenum_in_file * 4096UL;
-    d_remap_file_pages(new_page, 4096, pagenum_in_file);
+    volatile char *dummy = REAL_ADDRESS(get_segment_base(segnum), pagenum*4096UL);
+    *dummy = *dummy;            /* force copy-on-write from shared page */
 }
diff --git a/c8/stm/pages.h b/c8/stm/pages.h
--- a/c8/stm/pages.h
+++ b/c8/stm/pages.h
@@ -20,27 +20,35 @@
 
 #define PAGE_FLAG_START   END_NURSERY_PAGE
 #define PAGE_FLAG_END     NB_PAGES
-
-#define USE_REMAP_FILE_PAGES
+/* == NB_SHARED_PAGES */
 
 struct page_shared_s {
-#if NB_SEGMENTS <= 8
+#if NB_SEGMENTS <= 4
     uint8_t by_segment;
+#elif NB_SEGMENTS <= 8
+    uint16_t by_segment;
 #elif NB_SEGMENTS <= 16
-    uint16_t by_segment;
+    uint32_t by_segment;
 #elif NB_SEGMENTS <= 32
-    uint32_t by_segment;
-#elif NB_SEGMENTS <= 64
     uint64_t by_segment;
 #else
-#   error "NB_SEGMENTS > 64 not supported right now"
+#   error "NB_SEGMENTS > 32 not supported right now"
 #endif
 };
 
-static struct page_shared_s pages_privatized[PAGE_FLAG_END - PAGE_FLAG_START];
+enum {
+    PAGE_SHARED = 0,
+    PAGE_PRIVATE = 1,
+    PAGE_NO_ACCESS = 2,
+};
 
-static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count);
-static void page_privatize_in(int segnum, uintptr_t pagenum, char *initialize_from);
+static struct page_shared_s pages_status[NB_SHARED_PAGES];
+
+static void pages_initialize_shared_for(long segnum, uintptr_t pagenum, uintptr_t count);
+static void page_privatize_in(int segnum, uintptr_t pagenum);
+
+
+
 
 static inline uintptr_t get_virt_page_of(long segnum, uintptr_t pagenum)
 {
@@ -48,24 +56,33 @@
     return (uintptr_t)get_segment_base(segnum) / 4096UL + pagenum;
 }
 
-static inline bool is_shared_log_page(uintptr_t pagenum)
+static inline uintptr_t get_file_page_of(uintptr_t pagenum)
 {
-    assert(pagenum >= PAGE_FLAG_START);
-    return pages_privatized[pagenum - PAGE_FLAG_START].by_segment == 0;
+    /* logical page -> file page */
+    return pagenum - PAGE_FLAG_START;
 }
 
-static inline void set_page_private_in(long segnum, uintptr_t pagenum)
+
+static inline uint8_t get_page_status_in(long segnum, uintptr_t pagenum)
 {
-    uint64_t bitmask = 1UL << segnum;
+    int seg_shift = segnum * 2;
+    OPT_ASSERT(seg_shift < 8 * sizeof(struct page_shared_s));
     volatile struct page_shared_s *ps = (volatile struct page_shared_s *)
-        &pages_privatized[pagenum - PAGE_FLAG_START];
-    assert(!(ps->by_segment & bitmask));
-    ps->by_segment |= bitmask;
+        &pages_status[get_file_page_of(pagenum)];
+
+    return (ps->by_segment >> seg_shift) & 3;
 }
 
-static inline bool is_private_log_page_in(long segnum, uintptr_t pagenum)
+static inline void set_page_status_in(long segnum, uintptr_t pagenum, uint8_t status)
 {
-    assert(pagenum >= PAGE_FLAG_START);
-    uint64_t bitmask = 1UL << segnum;
-    return (pages_privatized[pagenum - PAGE_FLAG_START].by_segment & bitmask);
+    OPT_ASSERT(status < 3);
+
+    int seg_shift = segnum * 2;
+    OPT_ASSERT(seg_shift < 8 * sizeof(struct page_shared_s));
+    volatile struct page_shared_s *ps = (volatile struct page_shared_s *)
+        &pages_status[get_file_page_of(pagenum)];
+
+    assert(status != get_page_status_in(segnum, pagenum));
+    ps->by_segment &= ~(3UL << seg_shift); /* clear */
+    ps->by_segment |= status << seg_shift; /* set */
 }
diff --git a/c8/stm/setup.c b/c8/stm/setup.c
--- a/c8/stm/setup.c
+++ b/c8/stm/setup.c
@@ -3,55 +3,61 @@
 #endif
 
 #include <signal.h>
+#include <fcntl.h>           /* For O_* constants */
 
-#ifdef USE_REMAP_FILE_PAGES
-static char *setup_mmap(char *reason, int *ignored)
+static void setup_mmap(char *reason)
 {
-    char *result = mmap(NULL, TOTAL_MEMORY,
-                        PROT_READ | PROT_WRITE,
-                        MAP_PAGES_FLAGS, -1, 0);
-    if (result == MAP_FAILED)
-        stm_fatalerror("%s failed: %m", reason);
-
-    return result;
-}
-static void close_fd_mmap(int ignored)
-{
-}
-#else
-#include <fcntl.h>           /* For O_* constants */
-static char *setup_mmap(char *reason, int *map_fd)
-{
-    char name[128];
+    char name[] = "/__stmgc_c8__";
 
     /* Create the big shared memory object, and immediately unlink it.
        There is a small window where if this process is killed the
        object is left around.  It doesn't seem possible to do anything
        about it...
     */
-    int fd = shm_open(name, O_RDWR | O_CREAT | O_EXCL, 0600);
+    stm_object_pages_fd = shm_open(name, O_RDWR | O_CREAT | O_EXCL, 0600);
     shm_unlink(name);
 
-    if (fd == -1) {
+    if (stm_object_pages_fd == -1)
         stm_fatalerror("%s failed (stm_open): %m", reason);
+
+    if (ftruncate(stm_object_pages_fd, NB_SHARED_PAGES * 4096UL) != 0)
+        stm_fatalerror("%s failed (ftruncate): %m", reason);
+
+    stm_file_pages = mmap(NULL, NB_SHARED_PAGES * 4096UL,
+                          PROT_READ | PROT_WRITE,
+                          MAP_SHARED | MAP_NORESERVE,
+                          stm_object_pages_fd, 0);
+
+    if (stm_file_pages == MAP_FAILED)
+        stm_fatalerror("%s failed (mmap): %m", reason);
+
+
+    /* reserve the whole virtual memory space of the program for
+       all segments: */
+    stm_object_pages = mmap(NULL, TOTAL_MEMORY,
+                            PROT_READ | PROT_WRITE,
+                            MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS,
+                            -1, 0);
+    if (stm_object_pages == MAP_FAILED)
+        stm_fatalerror("%s failed (mmap): %m", reason);
+
+    /* remap the shared part of the segments to the file pages */
+    long l;
+    for (l = 0; l < NB_SEGMENTS; l++) {
+        char *result = mmap(
+            stm_object_pages + (l * NB_PAGES + END_NURSERY_PAGE) * 4096UL, /* addr */
+            NB_SHARED_PAGES * 4096UL, /* len */
+            PROT_READ | PROT_WRITE,
+            MAP_FIXED | MAP_SHARED | MAP_NORESERVE,
+            stm_object_pages_fd, 0); /* file & offset */
+        if (result == MAP_FAILED)
+            stm_fatalerror("%s failed (mmap): %m", reason);
     }
-    if (ftruncate(fd, TOTAL_MEMORY) != 0) {
-        stm_fatalerror("%s failed (ftruncate): %m", reason);
-    }
-    char *result = mmap(NULL, TOTAL_MEMORY,
-                        PROT_READ | PROT_WRITE,
-                        MAP_PAGES_FLAGS & ~MAP_ANONYMOUS, fd, 0);
-    if (result == MAP_FAILED) {
-        stm_fatalerror("%s failed (mmap): %m", reason);
-    }
-    *map_fd = fd;
-    return result;
 }
 static void close_fd_mmap(int map_fd)
 {
     close(map_fd);
 }
-#endif
 
 static void setup_protection_settings(void)
 {
@@ -63,33 +69,40 @@
            NULL accesses land.  We mprotect it so that accesses fail. */
         mprotect(segment_base, 4096, PROT_NONE);
 
-        /* TMP_COPY_PAGE is used for atomic privatization */
-        mprotect(segment_base + TMP_COPY_PAGE * 4096UL,
-                 4096UL, PROT_READ|PROT_WRITE);
-
         /* Pages in range(2, FIRST_READMARKER_PAGE) are never used */
-        if (FIRST_READMARKER_PAGE > TMP_COPY_PAGE + 1)
-            mprotect(segment_base + (TMP_COPY_PAGE + 1) * 4096,
-                     (FIRST_READMARKER_PAGE - TMP_COPY_PAGE - 1) * 4096UL,
+        if (FIRST_READMARKER_PAGE > 2)
+            mprotect(segment_base + 2 * 4096,
+                     (FIRST_READMARKER_PAGE - 2) * 4096UL,
                      PROT_NONE);
 
-        /* STM_SEGMENT */
-        mprotect(segment_base + ((uintptr_t)STM_SEGMENT / 4096UL) * 4096UL,
-                 4096UL, PROT_READ|PROT_WRITE);
+        /* STM_SEGMENT-TL is in page 1 */
     }
 }
 
 
+static void setup_signal_handler(void)
+{
+    struct sigaction act;
+    memset(&act, 0, sizeof(act));
+
+	act.sa_sigaction = &_signal_handler;
+	/* The SA_SIGINFO flag tells sigaction() to use the sa_sigaction field, not sa_handler. */
+	act.sa_flags = SA_SIGINFO | SA_NODEFER;
+
+	if (sigaction(SIGSEGV, &act, NULL) < 0) {
+		perror ("sigaction");
+		abort();
+	}
+}
+
 void stm_setup(void)
 {
     /* Check that some values are acceptable */
-    assert(TMP_COPY_PAGE > 0 && TMP_COPY_PAGE <= 1);
-    assert(TMP_COPY_PAGE * 4096 + 4096 <= ((uintptr_t)STM_SEGMENT));
+    assert(4096 <= ((uintptr_t)STM_SEGMENT));
     assert((uintptr_t)STM_SEGMENT == (uintptr_t)STM_PSEGMENT);
     assert(((uintptr_t)STM_PSEGMENT) + sizeof(*STM_PSEGMENT) <= FIRST_READMARKER_PAGE*4096);
 
     assert(NB_SEGMENTS <= NB_SEGMENTS_MAX);
-    assert(TMP_COPY_PAGE < FIRST_READMARKER_PAGE);
     assert(FIRST_READMARKER_PAGE * 4096UL <= READMARKER_START);
     assert(READMARKER_START < READMARKER_END);
     assert(READMARKER_END <= 4096UL * FIRST_OBJECT_PAGE);
@@ -99,9 +112,14 @@
            (FIRST_READMARKER_PAGE * 4096UL));
     assert(_STM_FAST_ALLOC <= NB_NURSERY_PAGES * 4096);
 
-    stm_object_pages = setup_mmap("initial stm_object_pages mmap()",
-                                  &stm_object_pages_fd);
+    setup_mmap("initial stm_object_pages mmap()");
+
+    assert(stm_object_pages_fd);
+    assert(stm_object_pages);
+    assert(stm_file_pages);
+
     setup_protection_settings();
+    setup_signal_handler();
 
     long i;
     for (i = 0; i < NB_SEGMENTS; i++) {
@@ -118,7 +136,7 @@
         assert(0 <= i && i < 255);   /* 255 is WL_VISITED in gcpage.c */
         pr->pub.segment_num = i;
         pr->pub.segment_base = segment_base;
-        pr->modified_old_objects = tree_create();
+        pr->modified_old_objects = list_create();
         pr->objects_pointing_to_nursery = list_create();
         pr->young_outside_nursery = tree_create();
         pr->nursery_objects_shadows = tree_create();
@@ -155,7 +173,7 @@
         struct stm_priv_segment_info_s *pr = get_priv_segment(i);
         assert(list_is_empty(pr->objects_pointing_to_nursery));
         list_free(pr->objects_pointing_to_nursery);
-        tree_free(pr->modified_old_objects);
+        list_free(pr->modified_old_objects);
         tree_free(pr->young_outside_nursery);
         tree_free(pr->nursery_objects_shadows);
         tree_free(pr->callbacks_on_commit_and_abort[0]);
@@ -243,6 +261,15 @@
     _init_shadow_stack(tl);
     set_gs_register(get_segment_base(num));
     s_mutex_unlock();
+
+    if (num == 0) {
+        dprintf(("STM_GC_NURSERY: %d\n", STM_GC_NURSERY));
+        dprintf(("NB_PAGES: %d\n", NB_PAGES));
+        dprintf(("NB_SEGMENTS: %d\n", NB_SEGMENTS));
+        dprintf(("FIRST_OBJECT_PAGE=FIRST_NURSERY_PAGE: %lu\n", FIRST_OBJECT_PAGE));
+        dprintf(("END_NURSERY_PAGE: %lu\n", END_NURSERY_PAGE));
+        dprintf(("NB_SHARED_PAGES: %lu\n", NB_SHARED_PAGES));
+    }
 }
 
 void stm_unregister_thread_local(stm_thread_local_t *tl)
diff --git a/c8/stm/setup.h b/c8/stm/setup.h
--- a/c8/stm/setup.h
+++ b/c8/stm/setup.h
@@ -1,4 +1,4 @@
-static char *setup_mmap(char *reason, int *map_fd);
+static void setup_mmap(char *reason);
 static void close_fd_mmap(int map_fd);
 static void setup_protection_settings(void);
 static pthread_t *_get_cpth(stm_thread_local_t *);
diff --git a/c8/stmgc.h b/c8/stmgc.h
--- a/c8/stmgc.h
+++ b/c8/stmgc.h
@@ -34,7 +34,7 @@
     uintptr_t nursery_end;
     struct stm_thread_local_s *running_thread;
 };
-#define STM_SEGMENT           ((stm_segment_info_t *)8192)
+#define STM_SEGMENT           ((stm_segment_info_t *)4352)
 
 
 struct stm_shadowentry_s {
@@ -71,7 +71,6 @@
 char *_stm_real_address(object_t *o);
 #ifdef STM_TESTS
 #include <stdbool.h>
-void stm_validate(void *free_if_abort);
 bool _stm_was_read(object_t *obj);
 bool _stm_was_written(object_t *obj);
 
@@ -231,6 +230,7 @@
 }
 
 void stm_become_globally_unique_transaction(stm_thread_local_t *tl, const char *msg);
+void stm_validate(void);
 
 
 /* dummies for now: */
diff --git a/c8/test/support.py b/c8/test/support.py
--- a/c8/test/support.py
+++ b/c8/test/support.py
@@ -43,13 +43,14 @@
 void stm_teardown(void);
 void stm_register_thread_local(stm_thread_local_t *tl);
 void stm_unregister_thread_local(stm_thread_local_t *tl);
-void stm_validate(void *free_if_abort);
+void stm_validate();
 bool _check_stm_validate();
 
 object_t *stm_setup_prebuilt(object_t *);
 void _stm_start_safe_point(void);
 bool _check_stop_safe_point(void);
 
+ssize_t _checked_stmcb_size_rounded_up(struct object_s *obj);
 
 bool _checked_stm_write(object_t *obj);
 bool _stm_was_read(object_t *obj);
@@ -172,6 +173,20 @@
     return 1;
 }
 
+ssize_t _checked_stmcb_size_rounded_up(struct object_s *obj)
+{
+    stm_thread_local_t *_tl = STM_SEGMENT->running_thread;      \
+    void **jmpbuf = _tl->rjthread.jmpbuf;                       \
+    if (__builtin_setjmp(jmpbuf) == 0) { /* returned directly */\
+        ssize_t res = stmcb_size_rounded_up(obj);
+        clear_jmpbuf(_tl);
+        return res;
+    }
+    clear_jmpbuf(_tl);
+    return 1;
+}
+
+
 bool _check_stop_safe_point(void) {
     CHECKED(_stm_stop_safe_point());
 }
@@ -189,7 +204,7 @@
 }
 
 bool _check_stm_validate(void) {
-    CHECKED(stm_validate(NULL));
+    CHECKED(stm_validate());
 }
 
 #undef CHECKED
@@ -276,7 +291,7 @@
                     ],
      undef_macros=['NDEBUG'],
      include_dirs=[parent_dir],
-     extra_compile_args=['-g', '-O0', '-Wall', '-ferror-limit=1'],


More information about the pypy-commit mailing list