[pypy-commit] pypy stmgc-c4: import stmgc with with allocate_public_integer_address
Raemi
noreply at buildbot.pypy.org
Mon Aug 19 08:59:23 CEST 2013
Author: Remi Meier <remi.meier at gmail.com>
Branch: stmgc-c4
Changeset: r66203:56869831cd72
Date: 2013-08-19 08:53 +0200
http://bitbucket.org/pypy/pypy/changeset/56869831cd72/
Log: import stmgc with with allocate_public_integer_address
diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c
--- a/rpython/translator/stm/src_stm/et.c
+++ b/rpython/translator/stm/src_stm/et.c
@@ -275,29 +275,81 @@
/* Version of stm_DirectReadBarrier() that doesn't abort and assumes
* that 'P' was already an up-to-date result of a previous
* stm_DirectReadBarrier(). We only have to check if we did in the
- * meantime a stm_write_barrier().
+ * meantime a stm_write_barrier(). Should only be called if we
+ * have the flag PUBLIC_TO_PRIVATE or on MOVED objects. This version
+ * should never abort (it is used in stm_decode_abort_info()).
*/
- if (P->h_tid & GCFLAG_PUBLIC)
+ assert(P->h_tid & GCFLAG_PUBLIC);
+ assert(!(P->h_tid & GCFLAG_STUB));
+
+ if (P->h_tid & GCFLAG_MOVED)
{
- if (P->h_tid & GCFLAG_MOVED)
+ dprintf(("repeat_read_barrier: %p -> %p moved\n", P,
+ (gcptr)P->h_revision));
+ P = (gcptr)P->h_revision;
+ assert(P->h_tid & GCFLAG_PUBLIC);
+ assert(!(P->h_tid & GCFLAG_STUB));
+ assert(!(P->h_tid & GCFLAG_MOVED));
+ if (!(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE))
+ return P;
+ }
+ assert(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE);
+
+ struct tx_descriptor *d = thread_descriptor;
+ wlog_t *item;
+ G2L_FIND(d->public_to_private, P, item, goto no_private_obj);
+
+ /* We have a key in 'public_to_private'. The value is the
+ corresponding private object. */
+ dprintf(("repeat_read_barrier: %p -> %p public_to_private\n", P, item->val));
+ P = item->val;
+ assert(!(P->h_tid & GCFLAG_PUBLIC));
+ assert(!(P->h_tid & GCFLAG_STUB));
+ assert(is_private(P));
+ return P;
+
+ no_private_obj:
+ /* Key not found. It should not be waiting in 'stolen_objects',
+ because this case from steal.c applies to objects to were originally
+ backup objects. 'P' cannot be a backup object if it was obtained
+ earlier as a result of stm_read_barrier().
+ */
+ return P;
+}
+
+gcptr stm_ImmutReadBarrier(gcptr P)
+{
+ assert(P->h_tid & GCFLAG_STUB);
+ assert(P->h_tid & GCFLAG_PUBLIC);
+
+ revision_t v = ACCESS_ONCE(P->h_revision);
+ assert(IS_POINTER(v)); /* "is a pointer", "has a more recent revision" */
+
+ if (!(v & 2))
+ {
+ P = (gcptr)v;
+ }
+ else
+ {
+ /* follow a stub reference */
+ struct tx_descriptor *d = thread_descriptor;
+ struct tx_public_descriptor *foreign_pd = STUB_THREAD(P);
+ if (foreign_pd == d->public_descriptor)
{
- P = (gcptr)P->h_revision;
- assert(P->h_tid & GCFLAG_PUBLIC);
+ /* Same thread: dereference the pointer directly. */
+ dprintf(("immut_read_barrier: %p -> %p via stub\n ", P,
+ (gcptr)(v - 2)));
+ P = (gcptr)(v - 2);
}
- if (P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)
+ else
{
- struct tx_descriptor *d = thread_descriptor;
- wlog_t *item;
- G2L_FIND(d->public_to_private, P, item, goto no_private_obj);
-
- P = item->val;
- assert(!(P->h_tid & GCFLAG_PUBLIC));
- no_private_obj:
- ;
+ /* stealing: needed because accessing v - 2 from this thread
+ is forbidden (the target might disappear under our feet) */
+ dprintf(("immut_read_barrier: %p -> stealing...\n ", P));
+ stm_steal_stub(P);
}
}
- assert(!(P->h_tid & GCFLAG_STUB));
- return P;
+ return stm_immut_read_barrier(P); /* retry */
}
static gcptr _match_public_to_private(gcptr P, gcptr pubobj, gcptr privobj,
@@ -565,6 +617,16 @@
}
}
+gcptr stm_RepeatWriteBarrier(gcptr P)
+{
+ assert(!(P->h_tid & GCFLAG_IMMUTABLE));
+ assert(is_private(P));
+ assert(P->h_tid & GCFLAG_WRITE_BARRIER);
+ P->h_tid &= ~GCFLAG_WRITE_BARRIER;
+ gcptrlist_insert(&thread_descriptor->old_objects_to_trace, P);
+ return P;
+}
+
gcptr stm_WriteBarrier(gcptr P)
{
assert(!(P->h_tid & GCFLAG_IMMUTABLE));
@@ -1276,9 +1338,13 @@
and then free B, which will not be used any more. */
size_t size = stmgc_size(B);
assert(B->h_tid & GCFLAG_BACKUP_COPY);
+ /* if h_original was 0, it must stay that way and not point
+ to itself. (B->h_original may point to P) */
+ revision_t h_original = P->h_original;
memcpy(((char *)P) + offsetof(struct stm_object_s, h_revision),
((char *)B) + offsetof(struct stm_object_s, h_revision),
size - offsetof(struct stm_object_s, h_revision));
+ P->h_original = h_original;
assert(!(P->h_tid & GCFLAG_BACKUP_COPY));
stmgcpage_free(B);
dprintf(("abort: free backup at %p\n", B));
diff --git a/rpython/translator/stm/src_stm/et.h b/rpython/translator/stm/src_stm/et.h
--- a/rpython/translator/stm/src_stm/et.h
+++ b/rpython/translator/stm/src_stm/et.h
@@ -70,11 +70,11 @@
static const revision_t GCFLAG_VISITED = STM_FIRST_GCFLAG << 1;
static const revision_t GCFLAG_PUBLIC = STM_FIRST_GCFLAG << 2;
static const revision_t GCFLAG_PREBUILT_ORIGINAL = STM_FIRST_GCFLAG << 3;
-static const revision_t GCFLAG_PUBLIC_TO_PRIVATE = STM_FIRST_GCFLAG << 4;
+// in stmgc.h: GCFLAG_PUBLIC_TO_PRIVATE = STM_FIRST_GCFLAG << 4;
// in stmgc.h: GCFLAG_WRITE_BARRIER = STM_FIRST_GCFLAG << 5;
-static const revision_t GCFLAG_MOVED = STM_FIRST_GCFLAG << 6;
+// in stmgc.h: GCFLAG_MOVED = STM_FIRST_GCFLAG << 6;
static const revision_t GCFLAG_BACKUP_COPY /*debug*/ = STM_FIRST_GCFLAG << 7;
-static const revision_t GCFLAG_STUB /*debug*/ = STM_FIRST_GCFLAG << 8;
+// in stmgc.h: GCFLAG_STUB = STM_FIRST_GCFLAG << 8;
static const revision_t GCFLAG_PRIVATE_FROM_PROTECTED = STM_FIRST_GCFLAG << 9;
static const revision_t GCFLAG_HAS_ID = STM_FIRST_GCFLAG << 10;
static const revision_t GCFLAG_IMMUTABLE = STM_FIRST_GCFLAG << 11;
@@ -196,8 +196,10 @@
void SpinLoop(int);
gcptr stm_DirectReadBarrier(gcptr);
+gcptr stm_WriteBarrier(gcptr);
gcptr stm_RepeatReadBarrier(gcptr);
-gcptr stm_WriteBarrier(gcptr);
+gcptr stm_ImmutReadBarrier(gcptr);
+gcptr stm_RepeatWriteBarrier(gcptr);
gcptr _stm_nonrecord_barrier(gcptr); /* debugging: read barrier, but
not recording anything */
int _stm_is_private(gcptr); /* debugging */
diff --git a/rpython/translator/stm/src_stm/extra.c b/rpython/translator/stm/src_stm/extra.c
--- a/rpython/translator/stm/src_stm/extra.c
+++ b/rpython/translator/stm/src_stm/extra.c
@@ -24,6 +24,53 @@
stm_bytes_to_clear_on_abort = bytes;
}
+
+intptr_t stm_allocate_public_integer_address(gcptr obj)
+{
+ struct tx_descriptor *d = thread_descriptor;
+ gcptr stub;
+ intptr_t result;
+ /* plan: we allocate a small stub whose reference
+ we never give to the caller except in the form
+ of an integer.
+ During major collections, we visit them and update
+ their references. */
+
+ /* we don't want to deal with young objs */
+ if (!(obj->h_tid & GCFLAG_OLD)) {
+ stm_push_root(obj);
+ stm_minor_collect();
+ obj = stm_pop_root();
+ }
+
+ spinlock_acquire(d->public_descriptor->collection_lock, 'P');
+
+ stub = stm_stub_malloc(d->public_descriptor, 0);
+ stub->h_tid = (obj->h_tid & STM_USER_TID_MASK)
+ | GCFLAG_PUBLIC | GCFLAG_STUB | GCFLAG_SMALLSTUB
+ | GCFLAG_OLD;
+
+ stub->h_revision = ((revision_t)obj) | 2;
+ if (!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL) && obj->h_original) {
+ stub->h_original = obj->h_original;
+ }
+ else {
+ stub->h_original = (revision_t)obj;
+ }
+
+ result = (intptr_t)stub;
+ spinlock_release(d->public_descriptor->collection_lock);
+ stm_register_integer_address(result);
+
+ dprintf(("allocate_public_int_adr(%p): %p", obj, stub));
+ return result;
+}
+
+
+
+
+
+
/************************************************************/
/* Each object has a h_original pointer to an old copy of
the same object (e.g. an old revision), the "original".
@@ -92,6 +139,8 @@
return (revision_t)p;
}
+ assert(p->h_original != (revision_t)p);
+
dprintf(("stm_id(%p) has orig fst: %p\n",
p, (gcptr)p->h_original));
return p->h_original;
@@ -154,6 +203,19 @@
return (p1 == p2);
}
+_Bool stm_pointer_equal_prebuilt(gcptr p1, gcptr p2)
+{
+ assert(p2 != NULL);
+ assert(p2->h_tid & GCFLAG_PREBUILT_ORIGINAL);
+
+ if (p1 == p2)
+ return 1;
+
+ /* the only possible case to still get True is if p2 == p1->h_original */
+ return (p1 != NULL) && (p1->h_original == (revision_t)p2) &&
+ !(p1->h_tid & GCFLAG_PREBUILT_ORIGINAL);
+}
+
/************************************************************/
void stm_abort_info_push(gcptr obj, long fieldoffsets[])
@@ -205,7 +267,7 @@
WRITE_BUF(buffer, res_size);
WRITE('e');
for (i=0; i<d->abortinfo.size; i+=2) {
- char *object = (char *)stm_RepeatReadBarrier(d->abortinfo.items[i+0]);
+ char *object = (char*)stm_repeat_read_barrier(d->abortinfo.items[i+0]);
long *fieldoffsets = (long*)d->abortinfo.items[i+1];
long kind, offset;
size_t rps_size;
diff --git a/rpython/translator/stm/src_stm/gcpage.c b/rpython/translator/stm/src_stm/gcpage.c
--- a/rpython/translator/stm/src_stm/gcpage.c
+++ b/rpython/translator/stm/src_stm/gcpage.c
@@ -23,6 +23,9 @@
/* Only computed during a major collection */
static size_t mc_total_in_use, mc_total_reserved;
+/* keeps track of registered smallstubs that will survive unless unregistered */
+static struct G2L registered_stubs;
+
/* For tests */
long stmgcpage_count(int quantity)
{
@@ -63,6 +66,8 @@
nblocks_for_size[i] =
(GC_PAGE_SIZE - sizeof(page_header_t)) / (WORD * i);
}
+
+ memset(®istered_stubs, 0, sizeof(registered_stubs));
}
void stmgcpage_init_tls(void)
@@ -209,6 +214,34 @@
}
+/***** registering of small stubs as integer addresses *****/
+
+void stm_register_integer_address(intptr_t adr)
+{
+ gcptr obj = (gcptr)adr;
+ assert(obj->h_tid & GCFLAG_SMALLSTUB);
+ assert(obj->h_tid & GCFLAG_PUBLIC);
+
+ stmgcpage_acquire_global_lock();
+ g2l_insert(®istered_stubs, obj, NULL);
+ stmgcpage_release_global_lock();
+ dprintf(("registered %p\n", obj));
+}
+
+void stm_unregister_integer_address(intptr_t adr)
+{
+ gcptr obj = (gcptr)adr;
+ assert(obj->h_tid & GCFLAG_SMALLSTUB);
+ assert(obj->h_tid & GCFLAG_PUBLIC);
+
+ stmgcpage_acquire_global_lock();
+ g2l_delete_item(®istered_stubs, obj);
+ stmgcpage_release_global_lock();
+ dprintf(("unregistered %p\n", obj));
+}
+
+
+
/***** Major collections: marking *****/
static struct GcPtrList objects_to_trace;
@@ -460,6 +493,27 @@
}
}
+static void mark_registered_stubs(void)
+{
+ wlog_t *item;
+ G2L_LOOP_FORWARD(registered_stubs, item) {
+ gcptr R = item->addr;
+ assert(R->h_tid & GCFLAG_SMALLSTUB);
+
+ R->h_tid |= (GCFLAG_MARKED | GCFLAG_VISITED);
+
+ gcptr L = (gcptr)(R->h_revision - 2);
+ L = stmgcpage_visit(L);
+ R->h_revision = ((revision_t)L) | 2;
+
+ /* h_original will be kept up-to-date because
+ it is either == L or L's h_original. And
+ h_originals don't move */
+ } G2L_LOOP_END;
+
+}
+
+
static void mark_roots(gcptr *root, gcptr *end)
{
assert(*root == END_MARKER_ON);
@@ -497,6 +551,14 @@
visit_take_protected(d->thread_local_obj_ref);
visit_take_protected(&d->old_thread_local_obj);
+ /* the abortinfo objects */
+ long i, size = d->abortinfo.size;
+ gcptr *items = d->abortinfo.items;
+ for (i = 0; i < size; i += 2) {
+ visit_take_protected(&items[i]);
+ /* items[i+1] is not a gc ptr */
+ }
+
/* the current transaction's private copies of public objects */
wlog_t *item;
G2L_LOOP_FORWARD(d->public_to_private, item) {
@@ -528,8 +590,8 @@
} G2L_LOOP_END;
/* reinsert to real pub_to_priv */
- long i, size = new_public_to_private.size;
- gcptr *items = new_public_to_private.items;
+ size = new_public_to_private.size;
+ items = new_public_to_private.items;
for (i = 0; i < size; i += 2) {
g2l_insert(&d->public_to_private, items[i], items[i + 1]);
}
@@ -890,6 +952,7 @@
assert(gcptrlist_size(&objects_to_trace) == 0);
mark_prebuilt_roots();
+ mark_registered_stubs();
mark_all_stack_roots();
do {
visit_all_objects();
diff --git a/rpython/translator/stm/src_stm/nursery.c b/rpython/translator/stm/src_stm/nursery.c
--- a/rpython/translator/stm/src_stm/nursery.c
+++ b/rpython/translator/stm/src_stm/nursery.c
@@ -176,6 +176,7 @@
stm_copy_to_old_id_copy(obj, id_obj);
fresh_old_copy = id_obj;
+ fresh_old_copy->h_original = 0;
obj->h_tid &= ~GCFLAG_HAS_ID;
}
else {
@@ -437,6 +438,19 @@
spinlock_release(d->public_descriptor->collection_lock);
}
+static void mark_extra_stuff(struct tx_descriptor *d)
+{
+ visit_if_young(d->thread_local_obj_ref);
+ visit_if_young(&d->old_thread_local_obj);
+
+ long i, size = d->abortinfo.size;
+ gcptr *items = d->abortinfo.items;
+ for (i = 0; i < size; i += 2) {
+ visit_if_young(&items[i]);
+ /* items[i+1] is not a gc ptr */
+ }
+}
+
static void minor_collect(struct tx_descriptor *d)
{
dprintf(("minor collection [%p to %p]\n",
@@ -452,8 +466,7 @@
mark_young_roots(d);
- visit_if_young(d->thread_local_obj_ref);
- visit_if_young(&d->old_thread_local_obj);
+ mark_extra_stuff(d);
mark_stolen_young_stubs(d);
diff --git a/rpython/translator/stm/src_stm/steal.c b/rpython/translator/stm/src_stm/steal.c
--- a/rpython/translator/stm/src_stm/steal.c
+++ b/rpython/translator/stm/src_stm/steal.c
@@ -20,6 +20,59 @@
};
static __thread struct tx_steal_data *steal_data;
+static void replace_ptr_to_immutable_with_stub(gcptr * pobj)
+{
+ gcptr stub, obj = *pobj;
+ assert(obj->h_tid & GCFLAG_IMMUTABLE);
+ assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED));
+ if (obj->h_tid & GCFLAG_PUBLIC) {
+ /* young public, replace with stolen old copy */
+ assert(obj->h_tid & GCFLAG_MOVED);
+ assert(IS_POINTER(obj->h_revision));
+ stub = (gcptr)obj->h_revision;
+ assert(!IS_POINTER(stub->h_revision)); /* not outdated */
+ goto done;
+ }
+
+ /* old or young protected! mark as PUBLIC */
+ if (!(obj->h_tid & GCFLAG_OLD)) {
+ /* young protected */
+ gcptr O;
+
+ if (obj->h_tid & GCFLAG_HAS_ID) {
+ /* use id-copy for us */
+ O = (gcptr)obj->h_original;
+ obj->h_tid &= ~GCFLAG_HAS_ID;
+ stm_copy_to_old_id_copy(obj, O);
+ O->h_original = 0;
+ } else {
+ O = stmgc_duplicate_old(obj);
+
+ /* young and without original? */
+ if (!(obj->h_original))
+ obj->h_original = (revision_t)O;
+ }
+ obj->h_tid |= (GCFLAG_MOVED | GCFLAG_PUBLIC);
+ obj->h_revision = (revision_t)O;
+
+ O->h_tid |= GCFLAG_PUBLIC;
+ /* here it is fine if it stays in read caches because
+ the object is immutable anyway and there are no
+ write_barriers allowed. */
+ dprintf(("steal prot immutable -> public: %p -> %p\n", obj, O));
+ stub = O;
+ goto done;
+ }
+ /* old protected: */
+ dprintf(("prot immutable -> public: %p\n", obj));
+ obj->h_tid |= GCFLAG_PUBLIC;
+
+ return;
+ done:
+ *pobj = stub;
+ dprintf((" stolen: fixing *%p: %p -> %p\n", pobj, obj, stub));
+}
+
static void replace_ptr_to_protected_with_stub(gcptr *pobj)
{
gcptr stub, obj = *pobj;
@@ -28,49 +81,7 @@
return;
if (obj->h_tid & GCFLAG_IMMUTABLE) {
- assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED));
- if (obj->h_tid & GCFLAG_PUBLIC) {
- /* young public, replace with stolen old copy */
- assert(obj->h_tid & GCFLAG_MOVED);
- assert(IS_POINTER(obj->h_revision));
- stub = (gcptr)obj->h_revision;
- assert(!IS_POINTER(stub->h_revision)); /* not outdated */
- goto done;
- }
-
- /* old or young protected! mark as PUBLIC */
- if (!(obj->h_tid & GCFLAG_OLD)) {
- /* young protected */
- gcptr O;
-
- if (obj->h_tid & GCFLAG_HAS_ID) {
- /* use id-copy for us */
- O = (gcptr)obj->h_original;
- obj->h_tid &= ~GCFLAG_HAS_ID;
- stm_copy_to_old_id_copy(obj, O);
- O->h_original = 0;
- } else {
- O = stmgc_duplicate_old(obj);
-
- /* young and without original? */
- if (!(obj->h_original))
- obj->h_original = (revision_t)O;
- }
- obj->h_tid |= (GCFLAG_MOVED | GCFLAG_PUBLIC);
- obj->h_revision = (revision_t)O;
-
- O->h_tid |= GCFLAG_PUBLIC;
- /* here it is fine if it stays in read caches because
- the object is immutable anyway and there are no
- write_barriers allowed. */
- dprintf(("steal prot immutable -> public: %p -> %p\n", obj, O));
- stub = O;
- goto done;
- }
- /* old protected: */
- dprintf(("prot immutable -> public: %p\n", obj));
- obj->h_tid |= GCFLAG_PUBLIC;
-
+ replace_ptr_to_immutable_with_stub(pobj);
return;
}
diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h
--- a/rpython/translator/stm/src_stm/stmgc.h
+++ b/rpython/translator/stm/src_stm/stmgc.h
@@ -29,12 +29,21 @@
#define PREBUILT_REVISION 1
+/* push roots around allocating functions! */
+
/* allocate an object out of the local nursery */
gcptr stm_allocate(size_t size, unsigned long tid);
/* allocate an object that is be immutable. it cannot be changed with
a stm_write_barrier() or after the next commit */
gcptr stm_allocate_immutable(size_t size, unsigned long tid);
+/* allocates a public reference to the object that will
+ not be freed until stm_unregister_integer_address is
+ called on the result */
+intptr_t stm_allocate_public_integer_address(gcptr);
+void stm_unregister_integer_address(intptr_t);
+
+
/* returns a never changing hash for the object */
revision_t stm_hash(gcptr);
/* returns a number for the object which is unique during its lifetime */
@@ -42,6 +51,7 @@
/* returns nonzero if the two object-copy pointers belong to the
same original object */
_Bool stm_pointer_equal(gcptr, gcptr);
+_Bool stm_pointer_equal_prebuilt(gcptr, gcptr); /* 2nd arg is known prebuilt */
/* to push/pop objects into the local shadowstack */
#if 0 // (optimized version below)
@@ -59,7 +69,7 @@
int stm_enter_callback_call(void);
void stm_leave_callback_call(int);
-/* read/write barriers (the most general versions only for now).
+/* read/write barriers.
- the read barrier must be applied before reading from an object.
the result is valid as long as we're in the same transaction,
@@ -69,10 +79,28 @@
the result is valid for a shorter period of time: we have to
do stm_write_barrier() again if we ended the transaction, or
if we did a potential collection (e.g. stm_allocate()).
+
+ - as an optimization, stm_repeat_read_barrier() can be used
+ instead of stm_read_barrier() if the object was already
+ obtained by a stm_read_barrier() in the same transaction.
+ The only thing that may have occurred is that a
+ stm_write_barrier() on the same object could have made it
+ invalid.
+
+ - a different optimization is to read immutable fields: in order
+ to do that, use stm_immut_read_barrier(), which only activates
+ on stubs.
+
+ - stm_repeat_write_barrier() can be used on an object on which
+ we already did stm_write_barrier(), but a potential collection
+ can have occurred.
*/
#if 0 // (optimized version below)
gcptr stm_read_barrier(gcptr);
gcptr stm_write_barrier(gcptr);
+gcptr stm_repeat_read_barrier(gcptr);
+gcptr stm_immut_read_barrier(gcptr);
+gcptr stm_repeat_write_barrier(gcptr); /* <= always returns its argument */
#endif
/* start a new transaction, calls callback(), and when it returns
@@ -148,6 +176,8 @@
extern __thread void *stm_to_clear_on_abort;
extern __thread size_t stm_bytes_to_clear_on_abort;
+/* only user currently is stm_allocate_public_integer_address() */
+void stm_register_integer_address(intptr_t);
/* macro functionality */
@@ -159,7 +189,13 @@
extern __thread revision_t stm_private_rev_num;
gcptr stm_DirectReadBarrier(gcptr);
gcptr stm_WriteBarrier(gcptr);
+gcptr stm_RepeatReadBarrier(gcptr);
+gcptr stm_ImmutReadBarrier(gcptr);
+gcptr stm_RepeatWriteBarrier(gcptr);
+static const revision_t GCFLAG_PUBLIC_TO_PRIVATE = STM_FIRST_GCFLAG << 4;
static const revision_t GCFLAG_WRITE_BARRIER = STM_FIRST_GCFLAG << 5;
+static const revision_t GCFLAG_MOVED = STM_FIRST_GCFLAG << 6;
+static const revision_t GCFLAG_STUB = STM_FIRST_GCFLAG << 8;
extern __thread char *stm_read_barrier_cache;
#define FX_MASK 65535
#define FXCACHE_AT(obj) \
@@ -179,5 +215,20 @@
stm_WriteBarrier(obj) \
: (obj))
+#define stm_repeat_read_barrier(obj) \
+ (UNLIKELY((obj)->h_tid & (GCFLAG_PUBLIC_TO_PRIVATE | GCFLAG_MOVED)) ? \
+ stm_RepeatReadBarrier(obj) \
+ : (obj))
+
+#define stm_immut_read_barrier(obj) \
+ (UNLIKELY((obj)->h_tid & GCFLAG_STUB) ? \
+ stm_ImmutReadBarrier(obj) \
+ : (obj))
+
+#define stm_repeat_write_barrier(obj) \
+ (UNLIKELY((obj)->h_tid & GCFLAG_WRITE_BARRIER) ? \
+ stm_RepeatWriteBarrier(obj) \
+ : (obj))
+
#endif
More information about the pypy-commit
mailing list