[pypy-commit] pypy stmgc-static-barrier: import stmgc/111c09337109
arigo
noreply at buildbot.pypy.org
Sun Sep 15 12:23:10 CEST 2013
Author: Armin Rigo <arigo at tunes.org>
Branch: stmgc-static-barrier
Changeset: r66950:c92df32a39c0
Date: 2013-09-15 10:35 +0200
http://bitbucket.org/pypy/pypy/changeset/c92df32a39c0/
Log: import stmgc/111c09337109
diff --git a/rpython/translator/stm/src_stm/dbgmem.c b/rpython/translator/stm/src_stm/dbgmem.c
--- a/rpython/translator/stm/src_stm/dbgmem.c
+++ b/rpython/translator/stm/src_stm/dbgmem.c
@@ -4,12 +4,12 @@
#define PAGE_SIZE 4096
-
+#define MEM_SIZE(mem) (*(((size_t *)(mem)) - 1))
#ifdef _GC_DEBUG
/************************************************************/
-#define MMAP_TOTAL 1280*1024*1024 /* 1280MB */
+#define MMAP_TOTAL 2000*1024*1024 /* 2000MB */
static pthread_mutex_t malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
static char *zone_start, *zone_current = NULL, *zone_end = NULL;
@@ -32,8 +32,10 @@
void *stm_malloc(size_t sz)
{
+ size_t real_sz = sz + sizeof(size_t);
+
+#ifdef _GC_MEMPROTECT
pthread_mutex_lock(&malloc_mutex);
-
if (zone_current == NULL) {
zone_start = mmap(NULL, MMAP_TOTAL, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
@@ -43,10 +45,11 @@
zone_current = zone_start;
zone_end = zone_start + MMAP_TOTAL;
assert((MMAP_TOTAL % PAGE_SIZE) == 0);
+
_stm_dbgmem(zone_start, MMAP_TOTAL, PROT_NONE);
}
- size_t nb_pages = (sz + PAGE_SIZE - 1) / PAGE_SIZE + 1;
+ size_t nb_pages = (real_sz + PAGE_SIZE - 1) / PAGE_SIZE + 1;
char *result = zone_current;
zone_current += nb_pages * PAGE_SIZE;
if (zone_current > zone_end) {
@@ -55,50 +58,67 @@
}
pthread_mutex_unlock(&malloc_mutex);
- result += (-sz) & (PAGE_SIZE-1);
- assert(((intptr_t)(result + sz) & (PAGE_SIZE-1)) == 0);
- _stm_dbgmem(result, sz, PROT_READ | PROT_WRITE);
+ result += (-real_sz) & (PAGE_SIZE-1);
+ assert(((intptr_t)(result + real_sz) & (PAGE_SIZE-1)) == 0);
+ _stm_dbgmem(result, real_sz, PROT_READ | PROT_WRITE);
long i, base = (result - zone_start) / PAGE_SIZE;
for (i = 0; i < nb_pages; i++)
accessible_pages[base + i] = 42;
+ assert(((intptr_t)(result + real_sz) & (PAGE_SIZE-1)) == 0);
+#else
+ char * result = malloc(real_sz);
+#endif
+
dprintf(("stm_malloc(%zu): %p\n", sz, result));
- assert(((intptr_t)(result + sz) & (PAGE_SIZE-1)) == 0);
- memset(result, 0xBB, sz);
+ memset(result, 0xBB, real_sz);
+
+ result += sizeof(size_t);
+ MEM_SIZE(result) = real_sz;
return result;
}
-void stm_free(void *p, size_t sz)
+void stm_free(void *p)
{
if (p == NULL) {
- assert(sz == 0);
return;
}
- assert(((intptr_t)((char *)p + sz) & (PAGE_SIZE-1)) == 0);
+ size_t real_sz = MEM_SIZE(p);
+ void *real_p = p - sizeof(size_t);
+ assert(real_sz > 0);
- size_t nb_pages = (sz + PAGE_SIZE - 1) / PAGE_SIZE + 1;
- long i, base = ((char *)p - zone_start) / PAGE_SIZE;
+ memset(real_p, 0xDD, real_sz);
+#ifdef _GC_MEMPROTECT
+ assert(((intptr_t)((char *)real_p + real_sz) & (PAGE_SIZE-1)) == 0);
+
+ size_t nb_pages = (real_sz + PAGE_SIZE - 1) / PAGE_SIZE + 1;
+ long i, base = ((char *)real_p - zone_start) / PAGE_SIZE;
assert(0 <= base && base < (MMAP_TOTAL / PAGE_SIZE));
for (i = 0; i < nb_pages; i++) {
assert(accessible_pages[base + i] == 42);
accessible_pages[base + i] = -1;
}
- memset(p, 0xDD, sz);
- _stm_dbgmem(p, sz, PROT_NONE);
+
+ _stm_dbgmem(real_p, real_sz, PROT_NONE);
+#endif
}
void *stm_realloc(void *p, size_t newsz, size_t oldsz)
{
void *r = stm_malloc(newsz);
memcpy(r, p, oldsz < newsz ? oldsz : newsz);
- stm_free(p, oldsz);
+ stm_free(p);
return r;
}
int _stm_can_access_memory(char *p)
{
- long base = ((char *)p - zone_start) / PAGE_SIZE;
+#ifndef _GC_MEMPROTECT
+ assert(0); /* tests must use MEMPROTECT */
+#endif
+ char* real_p = p - sizeof(size_t);
+ long base = ((char *)real_p - zone_start) / PAGE_SIZE;
assert(0 <= base && base < (MMAP_TOTAL / PAGE_SIZE));
return accessible_pages[base] == 42;
}
diff --git a/rpython/translator/stm/src_stm/dbgmem.h b/rpython/translator/stm/src_stm/dbgmem.h
--- a/rpython/translator/stm/src_stm/dbgmem.h
+++ b/rpython/translator/stm/src_stm/dbgmem.h
@@ -6,7 +6,7 @@
#ifdef _GC_DEBUG
void *stm_malloc(size_t);
-void stm_free(void *, size_t);
+void stm_free(void *);
void *stm_realloc(void *, size_t, size_t);
int _stm_can_access_memory(char *);
void assert_cleared(char *, size_t);
@@ -14,7 +14,7 @@
#else
#define stm_malloc(sz) malloc(sz)
-#define stm_free(p,sz) free(p)
+#define stm_free(p) free(p)
#define stm_realloc(p,newsz,oldsz) realloc(p,newsz)
#define assert_cleared(p,sz) do { } while(0)
diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c
--- a/rpython/translator/stm/src_stm/et.c
+++ b/rpython/translator/stm/src_stm/et.c
@@ -16,7 +16,7 @@
i = 0;
cur = tmp_buf;
- cur += sprintf(cur, "%p:", obj);
+ cur += sprintf(cur, "%p : ", obj);
while (flags[i]) {
if (obj->h_tid & (STM_FIRST_GCFLAG << i)) {
cur += sprintf(cur, "%s|", flags[i]);
@@ -24,9 +24,36 @@
i++;
}
cur += sprintf(cur, "tid=%ld", stm_get_tid(obj));
+ cur += sprintf(cur, " : rev=%lx : orig=%lx",
+ (long)obj->h_revision, (long)obj->h_original);
return tmp_buf;
}
+void stm_dump_dbg(void)
+{
+ fprintf(stderr, "/**** stm_dump_dbg ****/\n\n");
+
+ int i;
+ for (i = 0; i < MAX_THREADS; i++) {
+ struct tx_public_descriptor *pd = stm_descriptor_array[i];
+ if (pd == NULL)
+ continue;
+ fprintf(stderr, "stm_descriptor_array[%d]\n((struct tx_public_descriptor *)%p)\n",
+ i, pd);
+
+ struct tx_descriptor *d = stm_tx_head;
+ while (d && d->public_descriptor != pd)
+ d = d->tx_next;
+ if (!d)
+ continue;
+
+ fprintf(stderr, "((struct tx_descriptor *)\033[%dm%p\033[0m)\n"
+ "pthread_self = 0x%lx\n\n", d->tcolor, d, (long)d->pthreadid);
+ }
+
+ fprintf(stderr, "/**********************/\n");
+}
+
__thread struct tx_descriptor *thread_descriptor = NULL;
@@ -109,6 +136,7 @@
revision_t v;
d->count_reads++;
+ assert(IMPLIES(!(P->h_tid & GCFLAG_OLD), stmgc_is_in_nursery(d, P)));
restart_all:
if (P->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED)
@@ -281,6 +309,9 @@
*/
assert(P->h_tid & GCFLAG_PUBLIC);
assert(!(P->h_tid & GCFLAG_STUB));
+ assert(IMPLIES(!(P->h_tid & GCFLAG_OLD),
+ stmgc_is_in_nursery(thread_descriptor, P)));
+
if (P->h_tid & GCFLAG_MOVED)
{
@@ -321,6 +352,9 @@
{
assert(P->h_tid & GCFLAG_STUB);
assert(P->h_tid & GCFLAG_PUBLIC);
+ assert(IMPLIES(!(P->h_tid & GCFLAG_OLD),
+ stmgc_is_in_nursery(thread_descriptor, P)));
+
revision_t v = ACCESS_ONCE(P->h_revision);
assert(IS_POINTER(v)); /* "is a pointer", "has a more recent revision" */
@@ -569,6 +603,7 @@
not_found:
#endif
+ assert(!(R->h_tid & GCFLAG_STUB));
R->h_tid |= GCFLAG_PUBLIC_TO_PRIVATE;
/* note that stmgc_duplicate() usually returns a young object, but may
@@ -589,6 +624,7 @@
assert(!(L->h_tid & GCFLAG_STUB));
assert(!(L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED));
L->h_tid &= ~(GCFLAG_VISITED |
+ GCFLAG_MARKED |
GCFLAG_PUBLIC |
GCFLAG_PREBUILT_ORIGINAL |
GCFLAG_PUBLIC_TO_PRIVATE |
@@ -610,8 +646,12 @@
static inline void record_write_barrier(gcptr P)
{
+ assert(is_private(P));
+ assert(IMPLIES(!(P->h_tid & GCFLAG_OLD),
+ stmgc_is_in_nursery(thread_descriptor, P)));
if (P->h_tid & GCFLAG_WRITE_BARRIER)
{
+ assert(P->h_tid & GCFLAG_OLD);
P->h_tid &= ~GCFLAG_WRITE_BARRIER;
gcptrlist_insert(&thread_descriptor->old_objects_to_trace, P);
}
@@ -619,6 +659,9 @@
gcptr stm_RepeatWriteBarrier(gcptr P)
{
+ assert(IMPLIES(!(P->h_tid & GCFLAG_OLD),
+ stmgc_is_in_nursery(thread_descriptor, P)));
+
assert(!(P->h_tid & GCFLAG_IMMUTABLE));
assert(is_private(P));
assert(P->h_tid & GCFLAG_WRITE_BARRIER);
@@ -637,6 +680,9 @@
over it. However such objects are so small that they contain no field
at all, and so no write barrier should occur on them. */
+ assert(IMPLIES(!(P->h_tid & GCFLAG_OLD),
+ stmgc_is_in_nursery(thread_descriptor, P)));
+
if (is_private(P))
{
/* If we have GCFLAG_WRITE_BARRIER in P, then list it into
@@ -857,6 +903,7 @@
void AbortTransaction(int num)
{
+ static const char *abort_names[] = ABORT_NAMES;
struct tx_descriptor *d = thread_descriptor;
unsigned long limit;
struct timespec now;
@@ -905,8 +952,8 @@
d->longest_abort_info_time = 0; /* out of memory! */
else
{
- if (stm_decode_abort_info(d, elapsed_time,
- num, d->longest_abort_info) != size)
+ if (stm_decode_abort_info(d, elapsed_time, num,
+ (struct tx_abort_info *)d->longest_abort_info) != size)
stm_fatalerror("during stm abort: object mutated unexpectedly\n");
d->longest_abort_info_time = elapsed_time;
@@ -937,28 +984,38 @@
stm_thread_local_obj = d->old_thread_local_obj;
d->old_thread_local_obj = NULL;
+ // notifies the CPU that we're potentially in a spin loop
+ SpinLoop(SPLP_ABORT);
+
+ /* make the transaction no longer active */
+ d->active = 0;
+ d->atomic = 0;
+
/* release the lock */
spinlock_release(d->public_descriptor->collection_lock);
/* clear memory registered by stm_clear_on_abort */
- if (stm_to_clear_on_abort)
- memset(stm_to_clear_on_abort, 0, stm_bytes_to_clear_on_abort);
+ if (d->mem_clear_on_abort)
+ memset(d->mem_clear_on_abort, 0, d->mem_bytes_to_clear_on_abort);
+ /* invoke the callbacks registered by stm_call_on_abort */
+ stm_invoke_callbacks_on_abort(d);
+ stm_clear_callbacks_on_abort(d);
+
+ /* XXX */
+ fprintf(stderr, "[%lx] abort %s\n",
+ (long)d->public_descriptor_index, abort_names[num]);
dprintf(("\n"
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
- "!!!!!!!!!!!!!!!!!!!!! [%lx] abort %d\n"
+ "!!!!!!!!!!!!!!!!!!!!! [%lx] abort %s\n"
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
- "\n", (long)d->public_descriptor_index, num));
+ "\n", (long)d->public_descriptor_index, abort_names[num]));
if (num != ABRT_MANUAL && d->max_aborts >= 0 && !d->max_aborts--)
stm_fatalerror("unexpected abort!\n");
- // notifies the CPU that we're potentially in a spin loop
- SpinLoop(SPLP_ABORT);
// jump back to the setjmp_buf (this call does not return)
- d->active = 0;
- d->atomic = 0;
stm_stop_sharedlock();
longjmp(*d->setjmp_buf, 1);
}
@@ -1437,6 +1494,10 @@
d->num_commits++;
d->active = 0;
stm_stop_sharedlock();
+
+ /* clear the list of callbacks that would have been called
+ on abort */
+ stm_clear_callbacks_on_abort(d);
}
/************************************************************/
@@ -1477,6 +1538,9 @@
(XXX statically we should know when we're outside
a transaction) */
+ /* XXX */
+ fprintf(stderr, "[%lx] inevitable: %s\n",
+ (long)d->public_descriptor_index, why);
dprintf(("[%lx] inevitable: %s\n",
(long)d->public_descriptor_index, why));
@@ -1673,6 +1737,8 @@
stm_thread_local_obj = NULL;
d->thread_local_obj_ref = &stm_thread_local_obj;
d->max_aborts = -1;
+ d->tcolor = dprintfcolor();
+ d->pthreadid = pthread_self();
d->tx_prev = NULL;
d->tx_next = stm_tx_head;
if (d->tx_next != NULL) d->tx_next->tx_prev = d;
@@ -1746,5 +1812,5 @@
p += sprintf(p, "]\n");
dprintf(("%s", line));
- stm_free(d, sizeof(struct tx_descriptor));
+ stm_free(d);
}
diff --git a/rpython/translator/stm/src_stm/et.h b/rpython/translator/stm/src_stm/et.h
--- a/rpython/translator/stm/src_stm/et.h
+++ b/rpython/translator/stm/src_stm/et.h
@@ -65,6 +65,10 @@
*
* GCFLAG_HAS_ID is set on young objects that have an old reserved
* memory to be copied to in minor collections (obj->h_original)
+ *
+ * GCFLAG_WEAKREF is set on weakrefs. Only needed so that we can trace
+ * the weakptr when stealing a weakref. Maybe a better solution is to
+ * check the typeid?
*/
static const revision_t GCFLAG_OLD = STM_FIRST_GCFLAG << 0;
static const revision_t GCFLAG_VISITED = STM_FIRST_GCFLAG << 1;
@@ -80,6 +84,7 @@
static const revision_t GCFLAG_IMMUTABLE = STM_FIRST_GCFLAG << 11;
static const revision_t GCFLAG_SMALLSTUB /*debug*/ = STM_FIRST_GCFLAG << 12;
static const revision_t GCFLAG_MARKED = STM_FIRST_GCFLAG << 13;
+static const revision_t GCFLAG_WEAKREF = STM_FIRST_GCFLAG << 14;
/* warning, the last flag available is "<< 15" on 32-bit */
@@ -104,6 +109,7 @@
"IMMUTABLE", \
"SMALLSTUB", \
"MARKED", \
+ "WEAKREF", \
NULL }
#define IS_POINTER(v) (!((v) & 1)) /* even-valued number */
@@ -119,6 +125,15 @@
#define ABRT_COLLECT_MINOR 6
#define ABRT_COLLECT_MAJOR 7
#define ABORT_REASONS 8
+#define ABORT_NAMES { "MANUAL", \
+ "COMMIT", \
+ "STOLEN_MODIFIED", \
+ "VALIDATE_INFLIGHT", \
+ "VALIDATE_COMMIT", \
+ "VALIDATE_INEV", \
+ "COLLECT_MINOR", \
+ "COLLECT_MAJOR", \
+ }
#define SPLP_ABORT 0
#define SPLP_LOCKED_INFLIGHT 1
@@ -176,6 +191,11 @@
struct FXCache recent_reads_cache;
char **read_barrier_cache_ref;
struct tx_descriptor *tx_prev, *tx_next;
+ int tcolor;
+ pthread_t pthreadid;
+ void *mem_clear_on_abort;
+ size_t mem_bytes_to_clear_on_abort;
+ struct G2L callbacks_on_abort;
};
extern __thread struct tx_descriptor *thread_descriptor;
diff --git a/rpython/translator/stm/src_stm/extra.c b/rpython/translator/stm/src_stm/extra.c
--- a/rpython/translator/stm/src_stm/extra.c
+++ b/rpython/translator/stm/src_stm/extra.c
@@ -15,13 +15,53 @@
}
-__thread void *stm_to_clear_on_abort = NULL;
-__thread size_t stm_bytes_to_clear_on_abort;
-
void stm_clear_on_abort(void *start, size_t bytes)
{
- stm_to_clear_on_abort = start;
- stm_bytes_to_clear_on_abort = bytes;
+ struct tx_descriptor *d = thread_descriptor;
+ assert(d != NULL);
+ d->mem_clear_on_abort = start;
+ d->mem_bytes_to_clear_on_abort = bytes;
+}
+
+void stm_call_on_abort(void *key, void callback(void *))
+{
+ struct tx_descriptor *d = thread_descriptor;
+ if (d == NULL || d->active != 1)
+ return; /* ignore callbacks if we're outside a transaction or
+ in an inevitable transaction (which cannot abort) */
+ if (callback == NULL) {
+ /* ignore the return value: unregistered keys can be
+ "deleted" again */
+ g2l_delete_item(&d->callbacks_on_abort, (gcptr)key);
+ }
+ else {
+ /* double-registering the same key will crash */
+ g2l_insert(&d->callbacks_on_abort, (gcptr)key, (gcptr)callback);
+ }
+}
+
+void stm_clear_callbacks_on_abort(struct tx_descriptor *d)
+{
+ if (g2l_any_entry(&d->callbacks_on_abort))
+ g2l_clear(&d->callbacks_on_abort);
+}
+
+void stm_invoke_callbacks_on_abort(struct tx_descriptor *d)
+{
+ wlog_t *item;
+ assert(d->active == 0);
+
+ G2L_LOOP_FORWARD(d->callbacks_on_abort, item) {
+ void *key = (void *)item->addr;
+ void (*callback)(void *) = (void(*)(void *))item->val;
+ assert(key != NULL);
+ assert(callback != NULL);
+
+ /* The callback may call stm_call_on_abort(key, NULL).
+ It is ignored, because we're no longer active. */
+ callback(key);
+
+ } G2L_LOOP_END;
}
@@ -42,7 +82,8 @@
stm_minor_collect();
obj = stm_pop_root();
}
-
+ assert(obj->h_tid & GCFLAG_OLD);
+
spinlock_acquire(d->public_descriptor->collection_lock, 'P');
stub = stm_stub_malloc(d->public_descriptor, 0);
@@ -178,6 +219,8 @@
if (p->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) {
gcptr B = (gcptr)p->h_revision;
+ /* not stolen already: */
+ assert(!(B->h_tid & GCFLAG_PUBLIC));
B->h_original = (revision_t)O;
}
@@ -233,13 +276,75 @@
}
size_t stm_decode_abort_info(struct tx_descriptor *d, long long elapsed_time,
- int abort_reason, char *output)
+ int abort_reason, struct tx_abort_info *output)
{
- /* re-encodes the abort info as a single string.
+ /* Re-encodes the abort info as a single tx_abort_info structure.
+ This struct tx_abort_info is not visible to the outside, and used
+ only as an intermediate format that is fast to generate and without
+ requiring stm_read_barrier().
+ */
+ if (output != NULL) {
+ output->signature_packed = 127;
+ output->elapsed_time = elapsed_time;
+ output->abort_reason = abort_reason;
+ output->active = d->active;
+ output->atomic = d->atomic;
+ output->count_reads = d->count_reads;
+ output->reads_size_limit_nonatomic = d->reads_size_limit_nonatomic;
+ }
+
+ long num_words = 0;
+#define WRITE_WORD(word) { \
+ if (output) output->words[num_words] = (word); \
+ num_words++; \
+ }
+
+ long i;
+ for (i=0; i<d->abortinfo.size; i+=2) {
+ char *object = (char*)stm_repeat_read_barrier(d->abortinfo.items[i+0]);
+ long *fieldoffsets = (long*)d->abortinfo.items[i+1];
+ long kind, offset;
+ while (*fieldoffsets != 0) {
+ kind = *fieldoffsets++;
+ WRITE_WORD(kind);
+ if (kind < 0) {
+ /* -1 is start of sublist; -2 is end of sublist */
+ continue;
+ }
+ offset = *fieldoffsets++;
+ switch(kind) {
+ case 1: /* signed */
+ case 2: /* unsigned */
+ WRITE_WORD(*(long *)(object + offset));
+ break;
+ case 3: /* a string of bytes from the target object */
+ WRITE_WORD((revision_t)*(char **)(object + offset));
+ offset = *fieldoffsets++; /* offset of len in the string */
+ WRITE_WORD(offset);
+ break;
+ default:
+ stm_fatalerror("corrupted abort log\n");
+ }
+ }
+ }
+ WRITE_WORD(0);
+#undef WRITE_WORD
+ return sizeof(struct tx_abort_info) + (num_words - 1) * sizeof(revision_t);
+}
+
+static size_t unpack_abort_info(struct tx_descriptor *d,
+ struct tx_abort_info *ai,
+ char *output)
+{
+ /* Lazily decodes a struct tx_abort_info into a single plain string.
For convenience (no escaping needed, no limit on integer
- sizes, etc.) we follow the bittorrent format. */
+ sizes, etc.) we follow the bittorrent format. This makes the
+ format a bit more flexible for future changes. The struct
+ tx_abort_info is still needed as an intermediate step, because
+ the string parameters may not be readable during an abort
+ (they may be stubs).
+ */
size_t totalsize = 0;
- long i;
char buffer[32];
size_t res_size;
#define WRITE(c) { totalsize++; if (output) *output++=(c); }
@@ -250,74 +355,74 @@
}
WRITE('l');
WRITE('l');
- res_size = sprintf(buffer, "i%llde", (long long)elapsed_time);
+ res_size = sprintf(buffer, "i%llde", (long long)ai->elapsed_time);
WRITE_BUF(buffer, res_size);
- res_size = sprintf(buffer, "i%de", (int)abort_reason);
+ res_size = sprintf(buffer, "i%de", (int)ai->abort_reason);
WRITE_BUF(buffer, res_size);
res_size = sprintf(buffer, "i%lde", (long)d->public_descriptor_index);
WRITE_BUF(buffer, res_size);
- res_size = sprintf(buffer, "i%lde", (long)d->atomic);
+ res_size = sprintf(buffer, "i%lde", (long)ai->atomic);
WRITE_BUF(buffer, res_size);
- res_size = sprintf(buffer, "i%de", (int)d->active);
+ res_size = sprintf(buffer, "i%de", (int)ai->active);
WRITE_BUF(buffer, res_size);
- res_size = sprintf(buffer, "i%lue", (unsigned long)d->count_reads);
+ res_size = sprintf(buffer, "i%lue", (unsigned long)ai->count_reads);
WRITE_BUF(buffer, res_size);
res_size = sprintf(buffer, "i%lue",
- (unsigned long)d->reads_size_limit_nonatomic);
+ (unsigned long)ai->reads_size_limit_nonatomic);
WRITE_BUF(buffer, res_size);
WRITE('e');
- for (i=0; i<d->abortinfo.size; i+=2) {
- char *object = (char*)stm_repeat_read_barrier(d->abortinfo.items[i+0]);
- long *fieldoffsets = (long*)d->abortinfo.items[i+1];
- long kind, offset;
- size_t rps_size;
+
+ revision_t *src = ai->words;
+ while (*src != 0) {
+ long signed_value;
+ unsigned long unsigned_value;
char *rps;
+ long offset, rps_size;
- while (1) {
- kind = *fieldoffsets++;
- if (kind <= 0) {
- if (kind == -2) {
- WRITE('l'); /* '[', start of sublist */
- continue;
- }
- if (kind == -1) {
- WRITE('e'); /* ']', end of sublist */
- continue;
- }
- break; /* 0, terminator */
+ switch (*src++) {
+
+ case -2:
+ WRITE('l'); /* '[', start of sublist */
+ break;
+
+ case -1:
+ WRITE('e'); /* ']', end of sublist */
+ break;
+
+ case 1: /* signed */
+ signed_value = (long)(*src++);
+ res_size = sprintf(buffer, "i%lde", signed_value);
+ WRITE_BUF(buffer, res_size);
+ break;
+
+ case 2: /* unsigned */
+ unsigned_value = (unsigned long)(*src++);
+ res_size = sprintf(buffer, "i%lue", unsigned_value);
+ WRITE_BUF(buffer, res_size);
+ break;
+
+ case 3: /* a string of bytes from the target object */
+ rps = (char *)(*src++);
+ offset = *src++;
+ if (rps) {
+ rps = (char *)stm_read_barrier((gcptr)rps);
+ /* xxx a bit ad-hoc: it's a string whose length is a
+ * long at 'rps_size'; the string data follows
+ * immediately the length */
+ rps_size = *(long *)(rps + offset);
+ assert(rps_size >= 0);
+ res_size = sprintf(buffer, "%ld:", rps_size);
+ WRITE_BUF(buffer, res_size);
+ WRITE_BUF(rps + offset + sizeof(long), rps_size);
}
- offset = *fieldoffsets++;
- switch(kind) {
- case 1: /* signed */
- res_size = sprintf(buffer, "i%lde",
- *(long*)(object + offset));
- WRITE_BUF(buffer, res_size);
- break;
- case 2: /* unsigned */
- res_size = sprintf(buffer, "i%lue",
- *(unsigned long*)(object + offset));
- WRITE_BUF(buffer, res_size);
- break;
- case 3: /* a string of bytes from the target object */
- rps = *(char **)(object + offset);
- offset = *fieldoffsets++;
- if (rps) {
- /* xxx a bit ad-hoc: it's a string whose length is a
- * long at 'offset', following immediately the offset */
- rps_size = *(long *)(rps + offset);
- offset += sizeof(long);
- assert(rps_size >= 0);
- res_size = sprintf(buffer, "%zu:", rps_size);
- WRITE_BUF(buffer, res_size);
- WRITE_BUF(rps + offset, rps_size);
- }
- else {
- WRITE_BUF("0:", 2);
- }
- break;
- default:
- stm_fatalerror("corrupted abort log\n");
+ else {
+ /* write NULL as an empty string, good enough for now */
+ WRITE_BUF("0:", 2);
}
+ break;
+
+ default:
+ stm_fatalerror("corrupted abort log\n");
}
}
WRITE('e');
@@ -332,6 +437,53 @@
struct tx_descriptor *d = thread_descriptor;
if (d->longest_abort_info_time <= 0)
return NULL;
+
+ struct tx_abort_info *ai = (struct tx_abort_info *)d->longest_abort_info;
+ assert(ai->signature_packed == 127);
+
+ stm_become_inevitable("stm_inspect_abort_info");
+
+ size_t size = unpack_abort_info(d, ai, NULL);
+ char *text = malloc(size);
+ if (text == NULL)
+ return NULL; /* out of memory */
+ if (unpack_abort_info(d, ai, text) != size)
+ stm_fatalerror("stm_inspect_abort_info: "
+ "object mutated unexpectedly\n");
+ free(ai);
+ d->longest_abort_info = text;
d->longest_abort_info_time = 0;
return d->longest_abort_info;
}
+
+void stm_visit_abort_info(struct tx_descriptor *d, void (*visit)(gcptr *))
+{
+ long i, size = d->abortinfo.size;
+ gcptr *items = d->abortinfo.items;
+ for (i = 0; i < size; i += 2) {
+ visit(&items[i]);
+ /* items[i+1] is not a gc ptr */
+ }
+
+ struct tx_abort_info *ai = (struct tx_abort_info *)d->longest_abort_info;
+ if (ai != NULL && ai->signature_packed == 127) {
+ revision_t *src = ai->words;
+ while (*src != 0) {
+ gcptr *rpps;
+
+ switch (*src++) {
+
+ case 1: /* signed */
+ case 2: /* unsigned */
+ src++; /* ignore the value */
+ break;
+
+ case 3:
+ rpps = (gcptr *)(src++);
+ src++; /* ignore the offset */
+ visit(rpps); /* visit() the string object */
+ break;
+ }
+ }
+ }
+}
diff --git a/rpython/translator/stm/src_stm/extra.h b/rpython/translator/stm/src_stm/extra.h
--- a/rpython/translator/stm/src_stm/extra.h
+++ b/rpython/translator/stm/src_stm/extra.h
@@ -3,8 +3,22 @@
#define _SRCSTM_EXTRA_H
+struct tx_abort_info {
+ char signature_packed; /* 127 when the abort_info is in this format */
+ long long elapsed_time;
+ int abort_reason;
+ int active;
+ long atomic;
+ unsigned long count_reads;
+ unsigned long reads_size_limit_nonatomic;
+ revision_t words[1]; /* the 'words' list is a bytecode-like format */
+};
+
void stm_copy_to_old_id_copy(gcptr obj, gcptr id);
size_t stm_decode_abort_info(struct tx_descriptor *d, long long elapsed_time,
- int abort_reason, char *output);
+ int abort_reason, struct tx_abort_info *output);
+void stm_visit_abort_info(struct tx_descriptor *d, void (*visit)(gcptr *));
+void stm_clear_callbacks_on_abort(struct tx_descriptor *d);
+void stm_invoke_callbacks_on_abort(struct tx_descriptor *d);
#endif
diff --git a/rpython/translator/stm/src_stm/gcpage.c b/rpython/translator/stm/src_stm/gcpage.c
--- a/rpython/translator/stm/src_stm/gcpage.c
+++ b/rpython/translator/stm/src_stm/gcpage.c
@@ -208,8 +208,9 @@
//stm_dbgmem_not_used(obj, size_class * WORD, 0);
}
else {
- g2l_delete_item(&gcp->nonsmall_objects, obj);
- stm_free(obj, size);
+ int deleted = g2l_delete_item(&gcp->nonsmall_objects, obj);
+ assert(deleted);
+ stm_free(obj);
}
}
@@ -235,7 +236,8 @@
assert(obj->h_tid & GCFLAG_PUBLIC);
stmgcpage_acquire_global_lock();
- g2l_delete_item(®istered_stubs, obj);
+ int deleted = g2l_delete_item(®istered_stubs, obj);
+ assert(deleted);
stmgcpage_release_global_lock();
dprintf(("unregistered %p\n", obj));
}
@@ -416,6 +418,7 @@
}
else if (obj != original) {
/* copy obj over original */
+ assert(obj->h_original == (revision_t)original);
copy_over_original(obj, original);
}
@@ -496,15 +499,27 @@
static void mark_registered_stubs(void)
{
wlog_t *item;
+ gcptr L;
+
G2L_LOOP_FORWARD(registered_stubs, item) {
gcptr R = item->addr;
assert(R->h_tid & GCFLAG_SMALLSTUB);
+ /* The following assert can fail if we have a stub pointing to
+ a stub and both are registered_stubs. This case is benign. */
+ //assert(!(R->h_tid & (GCFLAG_VISITED | GCFLAG_MARKED)));
R->h_tid |= (GCFLAG_MARKED | GCFLAG_VISITED);
- gcptr L = (gcptr)(R->h_revision - 2);
- L = stmgcpage_visit(L);
- R->h_revision = ((revision_t)L) | 2;
+ if (R->h_revision & 2) {
+ L = (gcptr)(R->h_revision - 2);
+ L = stmgcpage_visit(L);
+ R->h_revision = ((revision_t)L) | 2;
+ }
+ else {
+ L = (gcptr)R->h_revision;
+ L = stmgcpage_visit(L);
+ R->h_revision = (revision_t)L;
+ }
/* h_original will be kept up-to-date because
it is either == L or L's h_original. And
@@ -552,12 +567,7 @@
visit_take_protected(&d->old_thread_local_obj);
/* the abortinfo objects */
- long i, size = d->abortinfo.size;
- gcptr *items = d->abortinfo.items;
- for (i = 0; i < size; i += 2) {
- visit_take_protected(&items[i]);
- /* items[i+1] is not a gc ptr */
- }
+ stm_visit_abort_info(d, &visit_take_protected);
/* the current transaction's private copies of public objects */
wlog_t *item;
@@ -590,8 +600,8 @@
} G2L_LOOP_END;
/* reinsert to real pub_to_priv */
- size = new_public_to_private.size;
- items = new_public_to_private.items;
+ long i, size = new_public_to_private.size;
+ gcptr *items = new_public_to_private.items;
for (i = 0; i < size; i += 2) {
g2l_insert(&d->public_to_private, items[i], items[i + 1]);
}
@@ -809,7 +819,7 @@
p = (gcptr)(((char *)p) + obj_size);
}
#endif
- stm_free(lpage, GC_PAGE_SIZE);
+ stm_free(lpage);
assert(gcp->count_pages > 0);
assert(count_global_pages > 0);
gcp->count_pages--;
@@ -839,7 +849,7 @@
}
else {
G2L_LOOP_DELETE(item);
- stm_free(p, stmgc_size(p));
+ stm_free(p);
}
} G2L_LOOP_END_AND_COMPRESS;
@@ -954,10 +964,14 @@
mark_prebuilt_roots();
mark_registered_stubs();
mark_all_stack_roots();
+
+ /* weakrefs: */
do {
visit_all_objects();
+ stm_update_old_weakrefs_lists();
stm_visit_old_weakrefs();
} while (gcptrlist_size(&objects_to_trace) != 0);
+
gcptrlist_delete(&objects_to_trace);
clean_up_lists_of_read_objects_and_fix_outdated_flags();
stm_clean_old_weakrefs();
diff --git a/rpython/translator/stm/src_stm/lists.c b/rpython/translator/stm/src_stm/lists.c
--- a/rpython/translator/stm/src_stm/lists.c
+++ b/rpython/translator/stm/src_stm/lists.c
@@ -19,7 +19,7 @@
void g2l_delete(struct G2L *g2l)
{
- stm_free(g2l->raw_start, g2l->raw_end - g2l->raw_start);
+ stm_free(g2l->raw_start);
memset(g2l, 0, sizeof(struct G2L));
}
@@ -66,7 +66,7 @@
{
g2l_insert(&newg2l, item->addr, item->val);
} G2L_LOOP_END;
- stm_free(g2l->raw_start, g2l->raw_end - g2l->raw_start);
+ stm_free(g2l->raw_start);
*g2l = newg2l;
}
@@ -91,7 +91,6 @@
int shift = 0;
char *p = (char *)(g2l->toplevel.items);
char *entry;
- assert((key & (sizeof(void*)-1)) == 0); /* only for aligned keys */
while (1)
{
p += (key >> shift) & TREE_MASK;
@@ -133,15 +132,15 @@
*(char **)p = (char *)wlog;
}
-void g2l_delete_item(struct G2L *g2l, gcptr addr)
+int g2l_delete_item(struct G2L *g2l, gcptr addr)
{
wlog_t *entry;
G2L_FIND(*g2l, addr, entry, goto missing);
entry->addr = NULL;
- return;
+ return 1;
missing:
- stm_fatalerror("g2l_delete_item: item %p not in dict", addr);
+ return 0;
}
/************************************************************/
@@ -152,7 +151,7 @@
//fprintf(stderr, "list %p deleted (%ld KB)\n",
//gcptrlist, gcptrlist->alloc * sizeof(gcptr) / 1024);
gcptrlist->size = 0;
- stm_free(gcptrlist->items, gcptrlist->alloc * sizeof(gcptr));
+ stm_free(gcptrlist->items);
gcptrlist->items = NULL;
gcptrlist->alloc = 0;
}
@@ -183,7 +182,7 @@
long i;
for (i=0; i<gcptrlist->size; i++)
newitems[i] = gcptrlist->items[i];
- stm_free(gcptrlist->items, gcptrlist->alloc * sizeof(gcptr));
+ stm_free(gcptrlist->items);
gcptrlist->items = newitems;
gcptrlist->alloc = newalloc;
}
diff --git a/rpython/translator/stm/src_stm/lists.h b/rpython/translator/stm/src_stm/lists.h
--- a/rpython/translator/stm/src_stm/lists.h
+++ b/rpython/translator/stm/src_stm/lists.h
@@ -39,7 +39,7 @@
void g2l_clear(struct G2L *g2l);
void g2l_delete(struct G2L *g2l);
static inline void g2l_delete_not_used_any_more(struct G2L *g2l) {
- stm_free(g2l->raw_start, g2l->raw_end - g2l->raw_start);
+ stm_free(g2l->raw_start);
}
static inline int g2l_any_entry(struct G2L *g2l) {
@@ -114,7 +114,7 @@
wlog_t *_g2l_find(char *entry, gcptr addr);
void _g2l_compress(struct G2L *g2l);
void g2l_insert(struct G2L *g2l, gcptr addr, gcptr val);
-void g2l_delete_item(struct G2L *g2l, gcptr addr);
+int g2l_delete_item(struct G2L *g2l, gcptr addr);
static inline int g2l_contains(struct G2L *g2l, gcptr addr)
{
diff --git a/rpython/translator/stm/src_stm/nursery.c b/rpython/translator/stm/src_stm/nursery.c
--- a/rpython/translator/stm/src_stm/nursery.c
+++ b/rpython/translator/stm/src_stm/nursery.c
@@ -50,7 +50,7 @@
updatechainheads() -> stub_malloc() -> ...): */
assert(!minor_collect_anything_to_do(d)
|| d->nursery_current == d->nursery_end);
- stm_free(d->nursery_base, GC_NURSERY);
+ stm_free(d->nursery_base);
gcptrlist_delete(&d->old_objects_to_trace);
gcptrlist_delete(&d->public_with_young_copy);
@@ -95,6 +95,7 @@
{
/* XXX inline the fast path */
assert(tid == (tid & STM_USER_TID_MASK));
+ assert(thread_descriptor->active > 0);
gcptr P = allocate_nursery(size, tid);
P->h_revision = stm_private_rev_num;
assert(P->h_original == 0); /* null-initialized already */
@@ -156,6 +157,7 @@
struct tx_descriptor *d = thread_descriptor;
if (!stmgc_is_in_nursery(d, obj)) {
+ assert(IMPLIES(obj, obj->h_tid & GCFLAG_OLD));
/* not a nursery object */
}
else {
@@ -454,12 +456,7 @@
visit_if_young(d->thread_local_obj_ref);
visit_if_young(&d->old_thread_local_obj);
- long i, size = d->abortinfo.size;
- gcptr *items = d->abortinfo.items;
- for (i = 0; i < size; i += 2) {
- visit_if_young(&items[i]);
- /* items[i+1] is not a gc ptr */
- }
+ stm_visit_abort_info(d, &visit_if_young);
}
static void minor_collect(struct tx_descriptor *d)
@@ -524,7 +521,7 @@
#if defined(_GC_DEBUG) && _GC_DEBUG >= 2
if (d->nursery_cleared == NC_ALREADY_CLEARED)
assert_cleared(d->nursery_base, GC_NURSERY);
- stm_free(d->nursery_base, GC_NURSERY);
+ stm_free(d->nursery_base);
d->nursery_base = stm_malloc(GC_NURSERY);
d->nursery_end = d->nursery_base + GC_NURSERY;
dprintf(("minor: nursery moved to [%p to %p]\n", d->nursery_base,
diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision
--- a/rpython/translator/stm/src_stm/revision
+++ b/rpython/translator/stm/src_stm/revision
@@ -1,1 +1,1 @@
-d2e01fce511f
+111c09337109
diff --git a/rpython/translator/stm/src_stm/steal.c b/rpython/translator/stm/src_stm/steal.c
--- a/rpython/translator/stm/src_stm/steal.c
+++ b/rpython/translator/stm/src_stm/steal.c
@@ -20,58 +20,6 @@
};
static __thread struct tx_steal_data *steal_data;
-static void replace_ptr_to_immutable_with_stub(gcptr * pobj)
-{
- gcptr stub, obj = *pobj;
- assert(obj->h_tid & GCFLAG_IMMUTABLE);
- assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED));
- if (obj->h_tid & GCFLAG_PUBLIC) {
- /* young public, replace with stolen old copy */
- assert(obj->h_tid & GCFLAG_MOVED);
- assert(IS_POINTER(obj->h_revision));
- stub = (gcptr)obj->h_revision;
- assert(!IS_POINTER(stub->h_revision)); /* not outdated */
- goto done;
- }
-
- /* old or young protected! mark as PUBLIC */
- if (!(obj->h_tid & GCFLAG_OLD)) {
- /* young protected */
- gcptr O;
-
- if (obj->h_tid & GCFLAG_HAS_ID) {
- /* use id-copy for us */
- O = (gcptr)obj->h_original;
- obj->h_tid &= ~GCFLAG_HAS_ID;
- stm_copy_to_old_id_copy(obj, O);
- O->h_original = 0;
- } else {
- O = stmgc_duplicate_old(obj);
-
- /* young and without original? */
- if (!(obj->h_original))
- obj->h_original = (revision_t)O;
- }
- obj->h_tid |= (GCFLAG_MOVED | GCFLAG_PUBLIC);
- obj->h_revision = (revision_t)O;
-
- O->h_tid |= GCFLAG_PUBLIC;
- /* here it is fine if it stays in read caches because
- the object is immutable anyway and there are no
- write_barriers allowed. */
- dprintf(("steal prot immutable -> public: %p -> %p\n", obj, O));
- stub = O;
- goto done;
- }
- /* old protected: */
- dprintf(("prot immutable -> public: %p\n", obj));
- obj->h_tid |= GCFLAG_PUBLIC;
-
- return;
- done:
- *pobj = stub;
- dprintf((" stolen: fixing *%p: %p -> %p\n", pobj, obj, stub));
-}
static void replace_ptr_to_protected_with_stub(gcptr *pobj)
{
@@ -80,11 +28,17 @@
(GCFLAG_PUBLIC | GCFLAG_OLD))
return;
- if (obj->h_tid & GCFLAG_IMMUTABLE) {
- replace_ptr_to_immutable_with_stub(pobj);
- return;
- }
-
+ /* if ((obj->h_tid & GCFLAG_PUBLIC) && (obj->h_tid & GCFLAG_MOVED)) { */
+ /* /\* young stolen public, replace with stolen old copy */
+ /* All references in this old object should be stubbed already */
+ /* by stealing.*\/ */
+ /* assert(IS_POINTER(obj->h_revision)); */
+ /* stub = (gcptr)obj->h_revision; */
+ /* assert(stub->h_tid & GCFLAG_OLD); */
+ /* assert(stub->h_tid & GCFLAG_PUBLIC); */
+ /* goto done; */
+ /* } */
+
/* we use 'all_stubs', a dictionary, in order to try to avoid
duplicate stubs for the same object. XXX maybe it would be
better to use a fast approximative cache that stays around for
@@ -169,6 +123,7 @@
To know which case it is, read GCFLAG_PRIVATE_FROM_PROTECTED.
*/
if (L->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) {
+ assert(!(L->h_tid & GCFLAG_IMMUTABLE));
gcptr B = (gcptr)L->h_revision; /* the backup copy */
/* On young objects here, h_original is always set
@@ -298,6 +253,8 @@
memset(&sd.all_stubs, 0, sizeof(sd.all_stubs));
steal_data = &sd;
stmgc_trace(L, &replace_ptr_to_protected_with_stub);
+ if (L->h_tid & GCFLAG_WEAKREF)
+ replace_ptr_to_protected_with_stub(WEAKREF_PTR(L, stmgc_size(L)));
g2l_delete_not_used_any_more(&sd.all_stubs);
/* If another thread (the foreign or a 3rd party) does a read
diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h
--- a/rpython/translator/stm/src_stm/stmgc.h
+++ b/rpython/translator/stm/src_stm/stmgc.h
@@ -155,7 +155,7 @@
stm_inspect_abort_info(). (XXX details not documented yet) */
void stm_abort_info_push(gcptr obj, long fieldoffsets[]);
void stm_abort_info_pop(long count);
-char *stm_inspect_abort_info(void);
+char *stm_inspect_abort_info(void); /* turns inevitable */
/* mostly for debugging support */
void stm_abort_and_retry(void);
@@ -176,10 +176,14 @@
/* Clear some memory when aborting a transaction in the current
thread. This is a provisional API. The information is stored
- thread-locally and belongs to the current thread. */
+ in the current tx_descriptor. */
void stm_clear_on_abort(void *start, size_t bytes);
-extern __thread void *stm_to_clear_on_abort;
-extern __thread size_t stm_bytes_to_clear_on_abort;
+
+/* If the current transaction aborts later, invoke 'callback(key)'.
+ If the current transaction commits, then the callback is forgotten.
+ You can only register one callback per key. You can call
+ 'stm_call_on_abort(key, NULL)' to cancel an existing callback. */
+void stm_call_on_abort(void *key, void callback(void *));
/* only user currently is stm_allocate_public_integer_address() */
void stm_register_integer_address(intptr_t);
diff --git a/rpython/translator/stm/src_stm/stmsync.c b/rpython/translator/stm/src_stm/stmsync.c
--- a/rpython/translator/stm/src_stm/stmsync.c
+++ b/rpython/translator/stm/src_stm/stmsync.c
@@ -69,7 +69,7 @@
assert(x == END_MARKER_ON);
assert(stm_shadowstack == d->shadowstack);
stm_shadowstack = NULL;
- stm_free(d->shadowstack, sizeof(gcptr) * LENGTH_SHADOW_STACK);
+ stm_free(d->shadowstack);
}
void stm_set_max_aborts(int max_aborts)
@@ -168,7 +168,7 @@
has configured 'reads_size_limit_nonatomic' to a smaller value.
When such a shortened transaction succeeds, the next one will
see its length limit doubled, up to the maximum. */
- if (counter == 0) {
+ if (counter == 0 && d->active != 2) {
unsigned long limit = d->reads_size_limit_nonatomic;
if (limit != 0 && limit < (stm_regular_length_limit >> 1))
limit = (limit << 1) | 1;
@@ -219,6 +219,8 @@
struct tx_descriptor *d = thread_descriptor;
if (!d->atomic)
CommitTransaction();
+ else
+ BecomeInevitable("stm_commit_transaction but atomic");
}
void stm_begin_inevitable_transaction(void)
diff --git a/rpython/translator/stm/src_stm/weakref.c b/rpython/translator/stm/src_stm/weakref.c
--- a/rpython/translator/stm/src_stm/weakref.c
+++ b/rpython/translator/stm/src_stm/weakref.c
@@ -1,17 +1,16 @@
/* Imported by rpython/translator/stm/import_stmgc.py */
#include "stmimpl.h"
-#define WEAKREF_PTR(wr, sz) (*(gcptr *)(((char *)(wr)) + (sz) - WORD))
-
gcptr stm_weakref_allocate(size_t size, unsigned long tid, gcptr obj)
{
stm_push_root(obj);
gcptr weakref = stm_allocate_immutable(size, tid);
obj = stm_pop_root();
+ weakref->h_tid |= GCFLAG_WEAKREF;
assert(!(weakref->h_tid & GCFLAG_OLD)); /* 'size' too big? */
assert(stmgc_size(weakref) == size);
- WEAKREF_PTR(weakref, size) = obj;
+ *WEAKREF_PTR(weakref, size) = obj;
gcptrlist_insert(&thread_descriptor->young_weakrefs, weakref);
dprintf(("alloc weakref %p -> %p\n", weakref, obj));
return weakref;
@@ -32,34 +31,34 @@
continue; /* the weakref itself dies */
weakref = (gcptr)weakref->h_revision;
+ assert(weakref->h_tid & GCFLAG_OLD);
+ assert(!IS_POINTER(weakref->h_revision));
+
size_t size = stmgc_size(weakref);
- gcptr pointing_to = WEAKREF_PTR(weakref, size);
+ gcptr pointing_to = *WEAKREF_PTR(weakref, size);
assert(pointing_to != NULL);
if (stmgc_is_in_nursery(d, pointing_to)) {
if (pointing_to->h_tid & GCFLAG_MOVED) {
dprintf(("weakref ptr moved %p->%p\n",
- WEAKREF_PTR(weakref, size),
+ *WEAKREF_PTR(weakref, size),
(gcptr)pointing_to->h_revision));
- WEAKREF_PTR(weakref, size) = (gcptr)pointing_to->h_revision;
+ *WEAKREF_PTR(weakref, size) = (gcptr)pointing_to->h_revision;
}
else {
- dprintf(("weakref lost ptr %p\n", WEAKREF_PTR(weakref, size)));
- WEAKREF_PTR(weakref, size) = NULL;
+ assert(!IS_POINTER(pointing_to->h_revision));
+ assert(IMPLIES(!(pointing_to->h_tid & GCFLAG_HAS_ID),
+ pointing_to->h_original == 0));
+
+ dprintf(("weakref lost ptr %p\n", *WEAKREF_PTR(weakref, size)));
+ *WEAKREF_PTR(weakref, size) = NULL;
continue; /* no need to remember this weakref any longer */
}
}
- else {
- /* # see test_weakref_to_prebuilt: it's not useful to put
- # weakrefs into 'old_objects_with_weakrefs' if they point
- # to a prebuilt object (they are immortal). If moreover
- # the 'pointing_to' prebuilt object still has the
- # GCFLAG_NO_HEAP_PTRS flag, then it's even wrong, because
- # 'pointing_to' will not get the GCFLAG_VISITED during
- # the next major collection. Solve this by not registering
- # the weakref into 'old_objects_with_weakrefs'.
- */
- }
+ assert((*WEAKREF_PTR(weakref, size))->h_tid & GCFLAG_OLD);
+ /* in case we now point to a stub because the weakref got stolen,
+ simply keep by inserting into old_weakrefs */
+
gcptrlist_insert(&d->public_descriptor->old_weakrefs, weakref);
}
}
@@ -78,11 +77,10 @@
if (obj->h_tid & GCFLAG_MARKED)
return 1;
- if (!(obj->h_tid & GCFLAG_PUBLIC))
- return 0;
-
- if (obj->h_original != 0 &&
- !(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL)) {
+ /* if (!(obj->h_tid & GCFLAG_PUBLIC)) */
+ /* return 0; */
+ assert(!(obj->h_tid & GCFLAG_PREBUILT_ORIGINAL));
+ if (obj->h_original != 0) {
gcptr original = (gcptr)obj->h_original;
assert(IMPLIES(original->h_tid & GCFLAG_VISITED,
original->h_tid & GCFLAG_MARKED));
@@ -92,6 +90,21 @@
return 0;
}
+static void update_old_weakrefs_list(struct tx_public_descriptor *gcp)
+{
+ long i, size = gcp->old_weakrefs.size;
+ gcptr *items = gcp->old_weakrefs.items;
+
+ for (i = 0; i < size; i++) {
+ gcptr weakref = items[i];
+
+ /* if a weakref moved, update its position in the list */
+ if (weakref->h_tid & GCFLAG_MOVED) {
+ items[i] = (gcptr)weakref->h_original;
+ }
+ }
+}
+
static void visit_old_weakrefs(struct tx_public_descriptor *gcp)
{
/* Note: it's possible that a weakref points to a public stub to a
@@ -105,27 +118,25 @@
for (i = 0; i < size; i++) {
gcptr weakref = items[i];
- /* weakrefs are immutable: during a major collection, they
- cannot be in the nursery, and so there should be only one
- version of each weakref object. XXX relying on this is
- a bit fragile, but simplifies things a lot... */
- assert(weakref->h_revision & 1);
-
if (!(weakref->h_tid & GCFLAG_VISITED)) {
/* the weakref itself dies */
}
else {
+ /* the weakref belongs to our thread, therefore we should
+ always see the most current revision here: */
+ assert(weakref->h_revision & 1);
+
size_t size = stmgc_size(weakref);
- gcptr pointing_to = WEAKREF_PTR(weakref, size);
+ gcptr pointing_to = *WEAKREF_PTR(weakref, size);
assert(pointing_to != NULL);
if (is_partially_visited(pointing_to)) {
pointing_to = stmgcpage_visit(pointing_to);
dprintf(("mweakref ptr moved %p->%p\n",
- WEAKREF_PTR(weakref, size),
+ *WEAKREF_PTR(weakref, size),
pointing_to));
assert(pointing_to->h_tid & GCFLAG_VISITED);
- WEAKREF_PTR(weakref, size) = pointing_to;
+ *WEAKREF_PTR(weakref, size) = pointing_to;
}
else {
/* the weakref appears to be pointing to a dying object,
@@ -146,12 +157,12 @@
assert(weakref->h_revision & 1);
if (weakref->h_tid & GCFLAG_VISITED) {
size_t size = stmgc_size(weakref);
- gcptr pointing_to = WEAKREF_PTR(weakref, size);
+ gcptr pointing_to = *WEAKREF_PTR(weakref, size);
if (pointing_to->h_tid & GCFLAG_VISITED) {
continue; /* the target stays alive, the weakref remains */
}
- dprintf(("mweakref lost ptr %p\n", WEAKREF_PTR(weakref, size)));
- WEAKREF_PTR(weakref, size) = NULL; /* the target dies */
+ dprintf(("mweakref lost ptr %p\n", *WEAKREF_PTR(weakref, size)));
+ *WEAKREF_PTR(weakref, size) = NULL; /* the target dies */
}
/* remove this weakref from the list */
items[i] = items[--gcp->old_weakrefs.size];
@@ -171,6 +182,14 @@
visit(gcp);
}
+void stm_update_old_weakrefs_lists(void)
+{
+ /* go over old weakrefs lists and update the list with possibly
+ new pointers because of copy_over_original */
+ for_each_public_descriptor(update_old_weakrefs_list);
+}
+
+
void stm_visit_old_weakrefs(void)
{
/* Figure out which weakrefs survive, which possibly
diff --git a/rpython/translator/stm/src_stm/weakref.h b/rpython/translator/stm/src_stm/weakref.h
--- a/rpython/translator/stm/src_stm/weakref.h
+++ b/rpython/translator/stm/src_stm/weakref.h
@@ -2,8 +2,10 @@
#ifndef _SRCSTM_WEAKREF_H
#define _SRCSTM_WEAKREF_H
+#define WEAKREF_PTR(wr, sz) ((gcptr *)(((char *)(wr)) + (sz) - WORD))
void stm_move_young_weakrefs(struct tx_descriptor *);
+void stm_update_old_weakrefs_lists(void);
void stm_visit_old_weakrefs(void);
void stm_clean_old_weakrefs(void);
More information about the pypy-commit
mailing list