[pypy-commit] pypy stmgc-c4: Import stmgc/007ac02eb935
arigo
noreply at buildbot.pypy.org
Sat Jul 6 21:37:32 CEST 2013
Author: Armin Rigo <arigo at tunes.org>
Branch: stmgc-c4
Changeset: r65234:654fcdd1eb30
Date: 2013-07-06 15:57 +0200
http://bitbucket.org/pypy/pypy/changeset/654fcdd1eb30/
Log: Import stmgc/007ac02eb935
diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c
--- a/rpython/translator/stm/src_stm/et.c
+++ b/rpython/translator/stm/src_stm/et.c
@@ -249,6 +249,36 @@
}
}
+gcptr stm_RepeatReadBarrier(gcptr P)
+{
+ /* Version of stm_DirectReadBarrier() that doesn't abort and assumes
+ * that 'P' was already an up-to-date result of a previous
+ * stm_DirectReadBarrier(). We only have to check if we did in the
+ * meantime a stm_write_barrier().
+ */
+ if (P->h_tid & GCFLAG_PUBLIC)
+ {
+ if (P->h_tid & GCFLAG_NURSERY_MOVED)
+ {
+ P = (gcptr)P->h_revision;
+ assert(P->h_tid & GCFLAG_PUBLIC);
+ }
+ if (P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE)
+ {
+ struct tx_descriptor *d = thread_descriptor;
+ wlog_t *item;
+ G2L_FIND(d->public_to_private, P, item, goto no_private_obj);
+
+ P = item->val;
+ assert(!(P->h_tid & GCFLAG_PUBLIC));
+ no_private_obj:
+ ;
+ }
+ }
+ assert(!(P->h_tid & GCFLAG_STUB));
+ return P;
+}
+
static gcptr _match_public_to_private(gcptr P, gcptr pubobj, gcptr privobj,
int from_stolen)
{
@@ -423,29 +453,6 @@
goto restart_all;
}
-#if 0
-void *stm_DirectReadBarrierFromR(void *G1, void *R_Container1, size_t offset)
-{
- return _direct_read_barrier((gcptr)G1, (gcptr)R_Container1, offset);
-}
-#endif
-
-gcptr stm_RepeatReadBarrier(gcptr O)
-{
- abort();//XXX
-#if 0
- // LatestGlobalRevision(O) would either return O or abort
- // the whole transaction, so omitting it is not wrong
- struct tx_descriptor *d = thread_descriptor;
- gcptr L;
- wlog_t *entry;
- G2L_FIND(d->global_to_local, O, entry, return O);
- L = entry->val;
- assert(L->h_revision == stm_local_revision);
- return L;
-#endif
-}
-
static gcptr LocalizeProtected(struct tx_descriptor *d, gcptr P)
{
gcptr B;
@@ -750,10 +757,10 @@
smp_spinloop();
}
-#if 0
-size_t _stm_decode_abort_info(struct tx_descriptor *d, long long elapsed_time,
- int abort_reason, char *output);
-#endif
+void stm_abort_and_retry(void)
+{
+ AbortTransaction(ABRT_MANUAL);
+}
void AbortPrivateFromProtected(struct tx_descriptor *d);
@@ -796,41 +803,24 @@
elapsed_time = 1;
}
-#if 0
- size_t size;
if (elapsed_time >= d->longest_abort_info_time)
{
/* decode the 'abortinfo' and produce a human-readable summary in
the string 'longest_abort_info' */
- size = _stm_decode_abort_info(d, elapsed_time, num, NULL);
+ size_t size = stm_decode_abort_info(d, elapsed_time, num, NULL);
free(d->longest_abort_info);
d->longest_abort_info = malloc(size);
if (d->longest_abort_info == NULL)
d->longest_abort_info_time = 0; /* out of memory! */
else
{
- if (_stm_decode_abort_info(d, elapsed_time,
+ if (stm_decode_abort_info(d, elapsed_time,
num, d->longest_abort_info) != size)
stm_fatalerror("during stm abort: object mutated unexpectedly\n");
d->longest_abort_info_time = elapsed_time;
}
}
-#endif
-
-#if 0
- /* run the undo log in reverse order, cancelling the values set by
- stm_ThreadLocalRef_LLSet(). */
- if (d->undolog.size > 0) {
- gcptr *item = d->undolog.items;
- long i;
- for (i=d->undolog.size; i>=0; i-=2) {
- void **addr = (void **)(item[i-2]);
- void *oldvalue = (void *)(item[i-1]);
- *addr = oldvalue;
- }
- }
-#endif
/* upon abort, set the reads size limit to 94% of how much was read
so far. This should ensure that, assuming the retry does the same
@@ -937,10 +927,7 @@
d->count_reads = 1;
fxcache_clear(&d->recent_reads_cache);
-#if 0
- gcptrlist_clear(&d->undolog);
gcptrlist_clear(&d->abortinfo);
-#endif
}
void BeginTransaction(jmp_buf* buf)
@@ -1497,17 +1484,6 @@
/************************************************************/
-#if 0
-void stm_ThreadLocalRef_LLSet(void **addr, void *newvalue)
-{
- struct tx_descriptor *d = thread_descriptor;
- gcptrlist_insert2(&d->undolog, (gcptr)addr, (gcptr)*addr);
- *addr = newvalue;
-}
-#endif
-
-/************************************************************/
-
struct tx_descriptor *stm_tx_head = NULL;
struct tx_public_descriptor *stm_descriptor_array[MAX_THREADS] = {0};
static revision_t descriptor_array_free_list = 0;
@@ -1636,11 +1612,8 @@
assert(d->private_from_protected.size == 0);
gcptrlist_delete(&d->private_from_protected);
gcptrlist_delete(&d->list_of_read_objects);
-#if 0
gcptrlist_delete(&d->abortinfo);
free(d->longest_abort_info);
- gcptrlist_delete(&d->undolog);
-#endif
int num_aborts = 0, num_spinloops = 0;
char line[256], *p = line;
diff --git a/rpython/translator/stm/src_stm/et.h b/rpython/translator/stm/src_stm/et.h
--- a/rpython/translator/stm/src_stm/et.h
+++ b/rpython/translator/stm/src_stm/et.h
@@ -153,9 +153,9 @@
unsigned int num_aborts[ABORT_REASONS];
unsigned int num_spinloops[SPINLOOP_REASONS];
struct GcPtrList list_of_read_objects;
- //struct GcPtrList abortinfo;
struct GcPtrList private_from_protected;
struct G2L public_to_private;
+ struct GcPtrList abortinfo;
char *longest_abort_info;
long long longest_abort_info_time;
revision_t *private_revision_ref;
diff --git a/rpython/translator/stm/src_stm/extra.c b/rpython/translator/stm/src_stm/extra.c
new file mode 100644
--- /dev/null
+++ b/rpython/translator/stm/src_stm/extra.c
@@ -0,0 +1,260 @@
+/* Imported by rpython/translator/stm/import_stmgc.py */
+#include "stmimpl.h"
+
+
+void stm_copy_to_old_id_copy(gcptr obj, gcptr id)
+{
+ //assert(!is_in_nursery(thread_descriptor, id));
+ assert(id->h_tid & GCFLAG_OLD);
+
+ size_t size = stmgc_size(obj);
+ memcpy(id, obj, size);
+ id->h_tid &= ~GCFLAG_HAS_ID;
+ id->h_tid |= GCFLAG_OLD;
+ dprintf(("copy_to_old_id_copy(%p -> %p)\n", obj, id));
+}
+
+/************************************************************/
+/* Each object has a h_original pointer to an old copy of
+ the same object (e.g. an old revision), the "original".
+ The memory location of this old object is used as the ID
+ for this object. If h_original is NULL *and* it is an
+ old object copy, it itself is the original. This invariant
+ must be upheld by all code dealing with h_original.
+ The original copy must never be moved again. Also, it may
+ be just a stub-object.
+
+ If we want the ID of an object which is still young,
+ we must preallocate an old shadow-original that is used
+ as the target of the young object in a minor collection.
+ In this case, we set the HAS_ID flag on the young obj
+ to notify minor_collect.
+ This flag can be lost if the young obj is stolen. Then
+ the stealing thread uses the shadow-original itself and
+ minor_collect must not overwrite it again.
+ Also, if there is already a backup-copy around, we use
+ this instead of allocating another old object to use as
+ the shadow-original.
+ */
+
+static revision_t mangle_hash(revision_t n)
+{
+ /* To hash pointers in dictionaries. Assumes that i shows some
+ alignment (to 4, 8, maybe 16 bytes), so we use the following
+ formula to avoid the trailing bits being always 0.
+ This formula is reversible: two different values of 'i' will
+ always give two different results.
+ */
+ return n ^ (((urevision_t)n) >> 4);
+}
+
+
+revision_t stm_hash(gcptr p)
+{
+ /* Prebuilt objects may have a specific hash stored in an extra
+ field. For now, we will simply always follow h_original and
+ see, if it is a prebuilt object (XXX: maybe propagate a flag
+ to all copies of a prebuilt to avoid this cache miss).
+ */
+ if (p->h_original) {
+ if (p->h_tid & GCFLAG_PREBUILT_ORIGINAL) {
+ return p->h_original;
+ }
+ gcptr orig = (gcptr)p->h_original;
+ if ((orig->h_tid & GCFLAG_PREBUILT_ORIGINAL) && orig->h_original) {
+ return orig->h_original;
+ }
+ }
+ return mangle_hash(stm_id(p));
+}
+
+
+revision_t stm_id(gcptr p)
+{
+ struct tx_descriptor *d = thread_descriptor;
+ revision_t result;
+
+ if (p->h_original) { /* fast path */
+ if (p->h_tid & GCFLAG_PREBUILT_ORIGINAL) {
+ /* h_original may contain a specific hash value,
+ but in case of the prebuilt original version,
+ its memory location is the id */
+ return (revision_t)p;
+ }
+
+ dprintf(("stm_id(%p) has orig fst: %p\n",
+ p, (gcptr)p->h_original));
+ return p->h_original;
+ }
+ else if (p->h_tid & GCFLAG_OLD) {
+ /* old objects must have an h_original xOR be
+ the original itself. */
+ dprintf(("stm_id(%p) is old, orig=0 fst: %p\n", p, p));
+ return (revision_t)p;
+ }
+
+ spinlock_acquire(d->public_descriptor->collection_lock, 'I');
+ /* old objects must have an h_original xOR be
+ the original itself.
+ if some thread stole p when it was still young,
+ it must have set h_original. stealing an old obj
+ makes the old obj "original".
+ */
+ if (p->h_original) { /* maybe now? */
+ result = p->h_original;
+ dprintf(("stm_id(%p) has orig: %p\n",
+ p, (gcptr)p->h_original));
+ }
+ else {
+ /* must create shadow original object XXX: or use
+ backup, if exists */
+
+ /* XXX use stmgcpage_malloc() directly, we don't need to copy
+ * the contents yet */
+ gcptr O = stmgc_duplicate_old(p);
+ p->h_original = (revision_t)O;
+ p->h_tid |= GCFLAG_HAS_ID;
+
+ if (p->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) {
+ gcptr B = (gcptr)p->h_revision;
+ B->h_original = (revision_t)O;
+ }
+
+ result = (revision_t)O;
+ dprintf(("stm_id(%p) young, make shadow %p\n", p, O));
+ }
+
+ spinlock_release(d->public_descriptor->collection_lock);
+ return result;
+}
+
+_Bool stm_pointer_equal(gcptr p1, gcptr p2)
+{
+ /* fast path for two equal pointers */
+ if (p1 == p2)
+ return 1;
+ /* types must be the same */
+ if ((p1->h_tid & STM_USER_TID_MASK) != (p2->h_tid & STM_USER_TID_MASK))
+ return 0;
+ return stm_id(p1) == stm_id(p2);
+}
+
+/************************************************************/
+
+void stm_abort_info_push(gcptr obj, long fieldoffsets[])
+{
+ struct tx_descriptor *d = thread_descriptor;
+ obj = stm_read_barrier(obj);
+ gcptrlist_insert2(&d->abortinfo, obj, (gcptr)fieldoffsets);
+}
+
+void stm_abort_info_pop(long count)
+{
+ struct tx_descriptor *d = thread_descriptor;
+ long newsize = d->abortinfo.size - 2 * count;
+ gcptrlist_reduce_size(&d->abortinfo, newsize < 0 ? 0 : newsize);
+}
+
+size_t stm_decode_abort_info(struct tx_descriptor *d, long long elapsed_time,
+ int abort_reason, char *output)
+{
+ /* re-encodes the abort info as a single string.
+ For convenience (no escaping needed, no limit on integer
+ sizes, etc.) we follow the bittorrent format. */
+ size_t totalsize = 0;
+ long i;
+ char buffer[32];
+ size_t res_size;
+#define WRITE(c) { totalsize++; if (output) *output++=(c); }
+#define WRITE_BUF(p, sz) { totalsize += (sz); \
+ if (output) { \
+ memcpy(output, (p), (sz)); output += (sz); \
+ } \
+ }
+ WRITE('l');
+ WRITE('l');
+ res_size = sprintf(buffer, "i%llde", (long long)elapsed_time);
+ WRITE_BUF(buffer, res_size);
+ res_size = sprintf(buffer, "i%de", (int)abort_reason);
+ WRITE_BUF(buffer, res_size);
+ res_size = sprintf(buffer, "i%lde", (long)d->public_descriptor_index);
+ WRITE_BUF(buffer, res_size);
+ res_size = sprintf(buffer, "i%lde", (long)d->atomic);
+ WRITE_BUF(buffer, res_size);
+ res_size = sprintf(buffer, "i%de", (int)d->active);
+ WRITE_BUF(buffer, res_size);
+ res_size = sprintf(buffer, "i%lue", (unsigned long)d->count_reads);
+ WRITE_BUF(buffer, res_size);
+ res_size = sprintf(buffer, "i%lue",
+ (unsigned long)d->reads_size_limit_nonatomic);
+ WRITE_BUF(buffer, res_size);
+ WRITE('e');
+ for (i=0; i<d->abortinfo.size; i+=2) {
+ char *object = (char *)stm_RepeatReadBarrier(d->abortinfo.items[i+0]);
+ long *fieldoffsets = (long*)d->abortinfo.items[i+1];
+ long kind, offset;
+ size_t rps_size;
+ char *rps;
+
+ while (1) {
+ kind = *fieldoffsets++;
+ if (kind <= 0) {
+ if (kind == -2) {
+ WRITE('l'); /* '[', start of sublist */
+ continue;
+ }
+ if (kind == -1) {
+ WRITE('e'); /* ']', end of sublist */
+ continue;
+ }
+ break; /* 0, terminator */
+ }
+ offset = *fieldoffsets++;
+ switch(kind) {
+ case 1: /* signed */
+ res_size = sprintf(buffer, "i%lde",
+ *(long*)(object + offset));
+ WRITE_BUF(buffer, res_size);
+ break;
+ case 2: /* unsigned */
+ res_size = sprintf(buffer, "i%lue",
+ *(unsigned long*)(object + offset));
+ WRITE_BUF(buffer, res_size);
+ break;
+ case 3: /* a string of bytes from the target object */
+ rps = *(char **)(object + offset);
+ offset = *fieldoffsets++;
+ if (rps) {
+ /* xxx a bit ad-hoc: it's a string whose length is a
+ * long at 'offset', following immediately the offset */
+ rps_size = *(long *)(rps + offset);
+ offset += sizeof(long);
+ assert(rps_size >= 0);
+ res_size = sprintf(buffer, "%zu:", rps_size);
+ WRITE_BUF(buffer, res_size);
+ WRITE_BUF(rps + offset, rps_size);
+ }
+ else {
+ WRITE_BUF("0:", 2);
+ }
+ break;
+ default:
+ stm_fatalerror("corrupted abort log\n");
+ }
+ }
+ }
+ WRITE('e');
+ WRITE('\0'); /* final null character */
+#undef WRITE
+#undef WRITE_BUF
+ return totalsize;
+}
+
+char *stm_inspect_abort_info(void)
+{
+ struct tx_descriptor *d = thread_descriptor;
+ if (d->longest_abort_info_time <= 0)
+ return NULL;
+ d->longest_abort_info_time = 0;
+ return d->longest_abort_info;
+}
diff --git a/rpython/translator/stm/src_stm/extra.h b/rpython/translator/stm/src_stm/extra.h
new file mode 100644
--- /dev/null
+++ b/rpython/translator/stm/src_stm/extra.h
@@ -0,0 +1,10 @@
+/* Imported by rpython/translator/stm/import_stmgc.py */
+#ifndef _SRCSTM_EXTRA_H
+#define _SRCSTM_EXTRA_H
+
+
+void stm_copy_to_old_id_copy(gcptr obj, gcptr id);
+size_t stm_decode_abort_info(struct tx_descriptor *d, long long elapsed_time,
+ int abort_reason, char *output);
+
+#endif
diff --git a/rpython/translator/stm/src_stm/fprintcolor.c b/rpython/translator/stm/src_stm/fprintcolor.c
--- a/rpython/translator/stm/src_stm/fprintcolor.c
+++ b/rpython/translator/stm/src_stm/fprintcolor.c
@@ -6,7 +6,7 @@
{
va_list ap;
-#ifdef _GC_DEBUG
+#ifdef _GC_DEBUGPRINTS
dprintf(("STM Subsystem: Fatal Error\n"));
#else
fprintf(stderr, "STM Subsystem: Fatal Error\n");
@@ -20,7 +20,7 @@
}
-#ifdef _GC_DEBUG
+#ifdef _GC_DEBUGPRINTS
static __thread revision_t tcolor = 0;
static revision_t tnextid = 0;
diff --git a/rpython/translator/stm/src_stm/fprintcolor.h b/rpython/translator/stm/src_stm/fprintcolor.h
--- a/rpython/translator/stm/src_stm/fprintcolor.h
+++ b/rpython/translator/stm/src_stm/fprintcolor.h
@@ -7,7 +7,7 @@
__attribute__((format (printf, 1, 2), noreturn));
-#ifdef _GC_DEBUG
+#ifdef _GC_DEBUGPRINTS
#define dprintf(args) threadcolor_printf args
int dprintfcolor(void);
diff --git a/rpython/translator/stm/src_stm/gcpage.c b/rpython/translator/stm/src_stm/gcpage.c
--- a/rpython/translator/stm/src_stm/gcpage.c
+++ b/rpython/translator/stm/src_stm/gcpage.c
@@ -213,6 +213,30 @@
static struct GcPtrList objects_to_trace;
+static void keep_original_alive(gcptr obj)
+{
+ /* keep alive the original of a visited object */
+ gcptr id_copy = (gcptr)obj->h_original;
+ /* prebuilt original objects may have a predifined
+ hash in h_original */
+ if (id_copy && !(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) {
+ if (!(id_copy->h_tid & GCFLAG_PREBUILT_ORIGINAL)) {
+ id_copy->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE;
+ /* see fix_outdated() */
+ id_copy->h_tid |= GCFLAG_VISITED;
+
+ /* XXX: may not always need tracing? */
+ //if (!(id_copy->h_tid & GCFLAG_STUB))
+ // gcptrlist_insert(&objects_to_trace, id_copy);
+ }
+ else {
+ /* prebuilt originals won't get collected anyway
+ and if they are not reachable in any other way,
+ we only ever need their location, not their content */
+ }
+ }
+}
+
static void visit(gcptr *pobj)
{
gcptr obj = *pobj;
@@ -227,6 +251,8 @@
obj->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE; /* see fix_outdated() */
obj->h_tid |= GCFLAG_VISITED;
gcptrlist_insert(&objects_to_trace, obj);
+
+ keep_original_alive(obj);
}
}
else if (obj->h_tid & GCFLAG_PUBLIC) {
@@ -247,6 +273,8 @@
obj = (gcptr)(obj->h_revision - 2);
if (!(obj->h_tid & GCFLAG_PUBLIC)) {
prev_obj->h_tid |= GCFLAG_VISITED;
+ keep_original_alive(prev_obj);
+
assert(*pobj == prev_obj);
gcptr obj1 = obj;
visit(&obj1); /* recursion, but should be only once */
@@ -257,6 +285,9 @@
}
if (!(obj->h_revision & 3)) {
+ /* obj is neither a stub nor a most recent revision:
+ completely ignore obj->h_revision */
+
obj = (gcptr)obj->h_revision;
assert(obj->h_tid & GCFLAG_PUBLIC);
prev_obj->h_revision = (revision_t)obj;
@@ -275,7 +306,14 @@
assert(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED);
gcptr B = (gcptr)obj->h_revision;
assert(B->h_tid & (GCFLAG_PUBLIC | GCFLAG_BACKUP_COPY));
-
+
+ if (obj->h_original && (gcptr)obj->h_original != B) {
+ /* if B is original, it will be visited anyway */
+ assert(obj->h_original == B->h_original);
+ assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL));
+ keep_original_alive(obj);
+ }
+
obj->h_tid |= GCFLAG_VISITED;
B->h_tid |= GCFLAG_VISITED;
assert(!(obj->h_tid & GCFLAG_STUB));
@@ -294,6 +332,7 @@
}
}
+
static void visit_keep(gcptr obj)
{
if (!(obj->h_tid & GCFLAG_VISITED)) {
@@ -305,6 +344,7 @@
assert(!(obj->h_revision & 2));
visit((gcptr *)&obj->h_revision);
}
+ keep_original_alive(obj);
}
}
@@ -376,8 +416,24 @@
outdated, it will be found at that time */
gcptr R = item->addr;
gcptr L = item->val;
+
+ /* Objects that were not visited yet must have the PUB_TO_PRIV
+ flag. Except if that transaction will abort anyway, then it
+ may be removed from a previous major collection that didn't
+ fix the PUB_TO_PRIV because the transaction was going to
+ abort anyway:
+ 1. minor_collect before major collect (R->L, R is outdated, abort)
+ 2. major collect removes flag
+ 3. major collect again, same thread, no time to abort
+ 4. flag still removed
+ */
+ assert(IMPLIES(!(R->h_tid & GCFLAG_VISITED) && d->active > 0,
+ R->h_tid & GCFLAG_PUBLIC_TO_PRIVATE));
visit_keep(R);
if (L != NULL) {
+ /* minor collection found R->L in public_to_young
+ and R was modified. It then sets item->val to NULL and wants
+ to abort later. */
revision_t v = L->h_revision;
visit_keep(L);
/* a bit of custom logic here: if L->h_revision used to
@@ -385,8 +441,10 @@
keep this property, even though visit_keep(L) might
decide it would be better to make it point to a more
recent copy. */
- if (v == (revision_t)R)
+ if (v == (revision_t)R) {
+ assert(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED);
L->h_revision = v; /* restore */
+ }
}
} G2L_LOOP_END;
@@ -449,6 +507,7 @@
just removing it is very wrong --- we want 'd' to abort.
*/
if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) {
+ /* follow obj to its backup */
assert(IS_POINTER(obj->h_revision));
obj = (gcptr)obj->h_revision;
}
@@ -483,14 +542,16 @@
/* We are now after visiting all objects, and we know the
* transaction isn't aborting because of this collection. We have
* cleared GCFLAG_PUBLIC_TO_PRIVATE from public objects at the end
- * of the chain. Now we have to set it again on public objects that
- * have a private copy.
+ * of the chain (head revisions). Now we have to set it again on
+ * public objects that have a private copy.
*/
wlog_t *item;
dprintf(("fix public_to_private on thread %p\n", d));
G2L_LOOP_FORWARD(d->public_to_private, item) {
+ assert(item->addr->h_tid & GCFLAG_VISITED);
+ assert(item->val->h_tid & GCFLAG_VISITED);
assert(item->addr->h_tid & GCFLAG_PUBLIC);
/* assert(is_private(item->val)); but in the other thread,
diff --git a/rpython/translator/stm/src_stm/nursery.c b/rpython/translator/stm/src_stm/nursery.c
--- a/rpython/translator/stm/src_stm/nursery.c
+++ b/rpython/translator/stm/src_stm/nursery.c
@@ -45,7 +45,12 @@
void stmgc_done_nursery(void)
{
struct tx_descriptor *d = thread_descriptor;
- assert(!minor_collect_anything_to_do(d));
+ /* someone may have called minor_collect_soon()
+ inbetween the preceeding minor_collect() and
+ this assert (committransaction() ->
+ updatechainheads() -> stub_malloc() -> ...): */
+ assert(!minor_collect_anything_to_do(d)
+ || d->nursery_current == d->nursery_end);
stm_free(d->nursery_base, GC_NURSERY);
gcptrlist_delete(&d->old_objects_to_trace);
@@ -121,131 +126,6 @@
}
/************************************************************/
-/* Each object has a h_original pointer to an old copy of
- the same object (e.g. an old revision), the "original".
- The memory location of this old object is used as the ID
- for this object. If h_original is NULL *and* it is an
- old object copy, it itself is the original. This invariant
- must be upheld by all code dealing with h_original.
- The original copy must never be moved again. Also, it may
- be just a stub-object.
-
- If we want the ID of an object which is still young,
- we must preallocate an old shadow-original that is used
- as the target of the young object in a minor collection.
- In this case, we set the HAS_ID flag on the young obj
- to notify minor_collect.
- This flag can be lost if the young obj is stolen. Then
- the stealing thread uses the shadow-original itself and
- minor_collect must not overwrite it again.
- Also, if there is already a backup-copy around, we use
- this instead of allocating another old object to use as
- the shadow-original.
- */
-
-static revision_t mangle_hash(revision_t n)
-{
- /* To hash pointers in dictionaries. Assumes that i shows some
- alignment (to 4, 8, maybe 16 bytes), so we use the following
- formula to avoid the trailing bits being always 0.
- This formula is reversible: two different values of 'i' will
- always give two different results.
- */
- return n ^ (((urevision_t)n) >> 4);
-}
-
-
-revision_t stm_hash(gcptr p)
-{
- /* Prebuilt objects may have a specific hash stored in an extra
- field. For now, we will simply always follow h_original and
- see, if it is a prebuilt object (XXX: maybe propagate a flag
- to all copies of a prebuilt to avoid this cache miss).
- */
- if (p->h_original) {
- if (p->h_tid & GCFLAG_PREBUILT_ORIGINAL) {
- return p->h_original;
- }
- gcptr orig = (gcptr)p->h_original;
- if ((orig->h_tid & GCFLAG_PREBUILT_ORIGINAL) && orig->h_original) {
- return orig->h_original;
- }
- }
- return mangle_hash(stm_id(p));
-}
-
-
-revision_t stm_id(gcptr p)
-{
- struct tx_descriptor *d = thread_descriptor;
- revision_t result;
-
- if (p->h_original) { /* fast path */
- if (p->h_tid & GCFLAG_PREBUILT_ORIGINAL) {
- /* h_original may contain a specific hash value,
- but in case of the prebuilt original version,
- its memory location is the id */
- return (revision_t)p;
- }
-
- dprintf(("stm_id(%p) has orig fst: %p\n",
- p, (gcptr)p->h_original));
- return p->h_original;
- }
- else if (p->h_tid & GCFLAG_OLD) {
- /* old objects must have an h_original xOR be
- the original itself. */
- dprintf(("stm_id(%p) is old, orig=0 fst: %p\n", p, p));
- return (revision_t)p;
- }
-
- spinlock_acquire(d->public_descriptor->collection_lock, 'I');
- /* old objects must have an h_original xOR be
- the original itself.
- if some thread stole p when it was still young,
- it must have set h_original. stealing an old obj
- makes the old obj "original".
- */
- if (p->h_original) { /* maybe now? */
- result = p->h_original;
- dprintf(("stm_id(%p) has orig: %p\n",
- p, (gcptr)p->h_original));
- }
- else {
- /* must create shadow original object XXX: or use
- backup, if exists */
-
- /* XXX use stmgcpage_malloc() directly, we don't need to copy
- * the contents yet */
- gcptr O = stmgc_duplicate_old(p);
- p->h_original = (revision_t)O;
- p->h_tid |= GCFLAG_HAS_ID;
-
- if (p->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) {
- gcptr B = (gcptr)p->h_revision;
- B->h_original = (revision_t)O;
- }
-
- result = (revision_t)O;
- dprintf(("stm_id(%p) young, make shadow %p\n", p, O));
- }
-
- spinlock_release(d->public_descriptor->collection_lock);
- return result;
-}
-
-_Bool stm_pointer_equal(gcptr p1, gcptr p2)
-{
- /* fast path for two equal pointers */
- if (p1 == p2)
- return 1;
- /* types must be the same */
- if ((p1->h_tid & STM_USER_TID_MASK) != (p2->h_tid & STM_USER_TID_MASK))
- return 0;
- return stm_id(p1) == stm_id(p2);
-}
-
-/************************************************************/
static inline gcptr create_old_object_copy(gcptr obj)
{
@@ -262,18 +142,6 @@
return fresh_old_copy;
}
-inline void copy_to_old_id_copy(gcptr obj, gcptr id)
-{
- assert(!is_in_nursery(thread_descriptor, id));
- assert(id->h_tid & GCFLAG_OLD);
-
- size_t size = stmgc_size(obj);
- memcpy(id, obj, size);
- id->h_tid &= ~GCFLAG_HAS_ID;
- id->h_tid |= GCFLAG_OLD;
- dprintf(("copy_to_old_id_copy(%p -> %p)\n", obj, id));
-}
-
static void visit_if_young(gcptr *root)
{
gcptr obj = *root;
@@ -299,7 +167,7 @@
/* already has a place to go to */
gcptr id_obj = (gcptr)obj->h_original;
- copy_to_old_id_copy(obj, id_obj);
+ stm_copy_to_old_id_copy(obj, id_obj);
fresh_old_copy = id_obj;
obj->h_tid &= ~GCFLAG_HAS_ID;
}
@@ -315,6 +183,7 @@
*root = fresh_old_copy;
/* add 'fresh_old_copy' to the list of objects to trace */
+ assert(!(fresh_old_copy->h_tid & GCFLAG_PUBLIC));
gcptrlist_insert(&d->old_objects_to_trace, fresh_old_copy);
}
}
@@ -426,6 +295,7 @@
gcptr P = items[i];
assert(P->h_tid & GCFLAG_PUBLIC);
assert(P->h_tid & GCFLAG_OLD);
+ assert(P->h_tid & GCFLAG_PUBLIC_TO_PRIVATE);
revision_t v = ACCESS_ONCE(P->h_revision);
wlog_t *item;
@@ -474,7 +344,18 @@
assert(obj->h_tid & GCFLAG_OLD);
assert(!(obj->h_tid & GCFLAG_WRITE_BARRIER));
- obj->h_tid |= GCFLAG_WRITE_BARRIER;
+
+ /* We add the WRITE_BARRIER flag to objects here, but warning:
+ we may occasionally see a PUBLIC object --- one that was
+ a private/protected object when it was added to
+ old_objects_to_trace, and has been stolen. So we have to
+ check and not do any change to the obj->h_tid in that case.
+ Otherwise this conflicts with the rule that we may only
+ modify obj->h_tid of a public object in order to add
+ PUBLIC_TO_PRIVATE.
+ */
+ if (!(obj->h_tid & GCFLAG_PUBLIC))
+ obj->h_tid |= GCFLAG_WRITE_BARRIER;
stmgc_trace(obj, &visit_if_young);
}
@@ -672,6 +553,7 @@
gcptr P = stmgcpage_malloc(allocate_size);
memset(P, 0, allocate_size);
P->h_tid = tid | GCFLAG_OLD;
+ assert(!(P->h_tid & GCFLAG_PUBLIC));
gcptrlist_insert(&d->old_objects_to_trace, P);
return P;
}
diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision
--- a/rpython/translator/stm/src_stm/revision
+++ b/rpython/translator/stm/src_stm/revision
@@ -1,1 +1,1 @@
-637f6c9d19f7
+007ac02eb935
diff --git a/rpython/translator/stm/src_stm/steal.c b/rpython/translator/stm/src_stm/steal.c
--- a/rpython/translator/stm/src_stm/steal.c
+++ b/rpython/translator/stm/src_stm/steal.c
@@ -2,8 +2,6 @@
#include "stmimpl.h"
-inline void copy_to_old_id_copy(gcptr obj, gcptr id);
-
gcptr stm_stub_malloc(struct tx_public_descriptor *pd)
{
assert(pd->collection_lock != 0);
@@ -168,7 +166,7 @@
/* use id-copy for us */
O = (gcptr)L->h_original;
L->h_tid &= ~GCFLAG_HAS_ID;
- copy_to_old_id_copy(L, O);
+ stm_copy_to_old_id_copy(L, O);
O->h_original = 0;
} else {
/* Copy the object out of the other thread's nursery,
@@ -254,6 +252,7 @@
for (i = 0; i < size; i += 2) {
gcptr B = items[i];
assert(!(B->h_tid & GCFLAG_BACKUP_COPY)); /* already removed */
+ assert(B->h_tid & GCFLAG_PUBLIC);
/* to be on the safe side --- but actually needed, see the
gcptrlist_insert2(L, NULL) above */
@@ -265,6 +264,7 @@
assert(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED);
assert(IS_POINTER(L->h_revision));
+ assert(B->h_tid & GCFLAG_PUBLIC_TO_PRIVATE);
g2l_insert(&d->public_to_private, B, L);
/* this is definitely needed: all keys in public_to_private
diff --git a/rpython/translator/stm/src_stm/stmgc.c b/rpython/translator/stm/src_stm/stmgc.c
--- a/rpython/translator/stm/src_stm/stmgc.c
+++ b/rpython/translator/stm/src_stm/stmgc.c
@@ -10,5 +10,6 @@
#include "nursery.c"
#include "gcpage.c"
#include "stmsync.c"
+#include "extra.c"
#include "dbgmem.c"
#include "fprintcolor.c"
diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h
--- a/rpython/translator/stm/src_stm/stmgc.h
+++ b/rpython/translator/stm/src_stm/stmgc.h
@@ -102,6 +102,19 @@
It is set to NULL by stm_initialize(). */
extern __thread gcptr stm_thread_local_obj;
+/* For tracking where aborts occurs, you can push/pop information
+ into this stack. When an abort occurs this information is encoded
+ and flattened into a buffer which can later be retrieved with
+ stm_inspect_abort_info(). (XXX details not documented yet) */
+void stm_abort_info_push(gcptr obj, long fieldoffsets[]);
+void stm_abort_info_pop(long count);
+char *stm_inspect_abort_info(void);
+
+void stm_abort_and_retry(void);
+
+
+/**************** END OF PUBLIC INTERFACE *****************/
+/************************************************************/
/* macro-like functionality */
diff --git a/rpython/translator/stm/src_stm/stmimpl.h b/rpython/translator/stm/src_stm/stmimpl.h
--- a/rpython/translator/stm/src_stm/stmimpl.h
+++ b/rpython/translator/stm/src_stm/stmimpl.h
@@ -13,7 +13,7 @@
# endif
#endif
-#ifdef _GC_DEBUG
+#if defined(_GC_DEBUG) && !defined(DUMP_EXTRA)
# if _GC_DEBUG >= 2
# define DUMP_EXTRA
# endif
@@ -36,5 +36,6 @@
#include "et.h"
#include "steal.h"
#include "stmsync.h"
+#include "extra.h"
#endif
diff --git a/rpython/translator/stm/src_stm/stmsync.c b/rpython/translator/stm/src_stm/stmsync.c
--- a/rpython/translator/stm/src_stm/stmsync.c
+++ b/rpython/translator/stm/src_stm/stmsync.c
@@ -81,6 +81,7 @@
int stm_enter_callback_call(void)
{
int token = (thread_descriptor == NULL);
+ dprintf(("enter_callback_call(tok=%d)\n", token));
if (token == 1) {
stmgcpage_acquire_global_lock();
DescriptorInit();
@@ -94,6 +95,7 @@
void stm_leave_callback_call(int token)
{
+ dprintf(("leave_callback_call(%d)\n", token));
if (token == 1)
stmgc_minor_collect(); /* force everything out of the nursery */
More information about the pypy-commit
mailing list