[pypy-commit] pypy stmgc-c4: import stmgc (contains important fix regarding new thread locals)
Raemi
noreply at buildbot.pypy.org
Tue Nov 5 22:07:16 CET 2013
Author: Remi Meier <remi.meier at gmail.com>
Branch: stmgc-c4
Changeset: r67859:01c1a87d3707
Date: 2013-11-05 22:05 +0100
http://bitbucket.org/pypy/pypy/changeset/01c1a87d3707/
Log: import stmgc (contains important fix regarding new thread locals)
diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c
--- a/rpython/translator/stm/src_stm/et.c
+++ b/rpython/translator/stm/src_stm/et.c
@@ -706,7 +706,7 @@
}
struct tx_descriptor *d = thread_descriptor;
- assert(*d->active_ref >= 1);
+ assert(stm_active >= 1);
/* We need the collection_lock for the sequel; this is required notably
because we're about to edit flags on a protected object.
@@ -890,7 +890,7 @@
void SpinLoop(int num)
{
struct tx_descriptor *d = thread_descriptor;
- assert(*d->active_ref >= 1);
+ assert(stm_active >= 1);
assert(num < SPINLOOP_REASONS);
d->num_spinloops[num]++;
smp_spinloop();
@@ -925,7 +925,7 @@
assert(!stm_has_got_any_lock(d));
}
- assert(*d->active_ref != 0);
+ assert(stm_active != 0);
assert(!is_inevitable(d));
assert(num < ABORT_REASONS);
d->num_aborts[num]++;
@@ -990,7 +990,7 @@
SpinLoop(SPLP_ABORT);
/* make the transaction no longer active */
- *d->active_ref = 0;
+ stm_active = 0;
d->atomic = 0;
/* release the lock */
@@ -1044,10 +1044,10 @@
void AbortNowIfDelayed(void)
{
struct tx_descriptor *d = thread_descriptor;
- if (*d->active_ref < 0)
+ if (stm_active < 0)
{
- int reason = -*d->active_ref;
- *d->active_ref = 1;
+ int reason = -stm_active;
+ stm_active = 1;
AbortTransaction(reason);
}
}
@@ -1099,7 +1099,7 @@
{
struct tx_descriptor *d = thread_descriptor;
init_transaction(d, 0);
- *d->active_ref = 1;
+ stm_active = 1;
d->setjmp_buf = buf;
d->longjmp_callback = longjmp_callback;
d->old_thread_local_obj = stm_thread_local_obj;
@@ -1509,7 +1509,7 @@
spinlock_release(d->public_descriptor->collection_lock);
d->num_commits++;
- *d->active_ref = 0;
+ stm_active = 0;
if (!stay_inevitable)
stm_stop_sharedlock();
@@ -1551,7 +1551,7 @@
{ /* must save roots around this call */
revision_t cur_time;
struct tx_descriptor *d = thread_descriptor;
- if (d == NULL || *d->active_ref != 1)
+ if (d == NULL || stm_active != 1)
return; /* I am already inevitable, or not in a transaction at all
(XXX statically we should know when we're outside
a transaction) */
@@ -1762,11 +1762,15 @@
assert(d->my_lock & 1);
assert(d->my_lock >= LOCKED);
stm_private_rev_num = -d->my_lock;
+ /* Attention: in the following, we add references to real thread-locals
+ to the thread_descriptor. Make sure that force_minor_collections()
+ fakes all of them when doing minor collections in other threads! */
d->active_ref = &stm_active;
d->nursery_current_ref = &stm_nursery_current;
d->nursery_nextlimit_ref = &stm_nursery_nextlimit;
d->private_revision_ref = &stm_private_rev_num;
d->read_barrier_cache_ref = &stm_read_barrier_cache;
+
stm_thread_local_obj = NULL;
d->thread_local_obj_ref = &stm_thread_local_obj;
d->max_aborts = -1;
diff --git a/rpython/translator/stm/src_stm/extra.c b/rpython/translator/stm/src_stm/extra.c
--- a/rpython/translator/stm/src_stm/extra.c
+++ b/rpython/translator/stm/src_stm/extra.c
@@ -26,7 +26,7 @@
void stm_call_on_abort(void *key, void callback(void *))
{
struct tx_descriptor *d = thread_descriptor;
- if (d == NULL || *d->active_ref != 1)
+ if (d == NULL || stm_active != 1)
return; /* ignore callbacks if we're outside a transaction or
in an inevitable transaction (which cannot abort) */
if (callback == NULL) {
diff --git a/rpython/translator/stm/src_stm/gcpage.c b/rpython/translator/stm/src_stm/gcpage.c
--- a/rpython/translator/stm/src_stm/gcpage.c
+++ b/rpython/translator/stm/src_stm/gcpage.c
@@ -920,9 +920,15 @@
struct tx_descriptor *saved = thread_descriptor;
revision_t saved_private_rev = stm_private_rev_num;
char *saved_read_barrier_cache = stm_read_barrier_cache;
+ int saved_active = stm_active;
+ char *saved_nursery_current = stm_nursery_current;
+ char *saved_nursery_nextlimit = stm_nursery_nextlimit;
assert(saved_private_rev == *saved->private_revision_ref);
assert(saved_read_barrier_cache == *saved->read_barrier_cache_ref);
+ assert(saved_active == *saved->active_ref);
+ assert(saved_nursery_current == *saved->nursery_current_ref);
+ assert(saved_nursery_nextlimit == *saved->nursery_nextlimit_ref);
for (d = stm_tx_head; d; d = d->tx_next) {
/* Force a minor collection to run in the thread 'd'.
@@ -934,20 +940,49 @@
/* Hack: temporarily pretend that we "are" the other thread...
*/
assert(d->shadowstack_end_ref && *d->shadowstack_end_ref);
- thread_descriptor = d;
- stm_private_rev_num = *d->private_revision_ref;
+ /* set thread locals to expected values */
+ thread_descriptor = d;
+ stm_private_rev_num = *d->private_revision_ref;
stm_read_barrier_cache = *d->read_barrier_cache_ref;
+ stm_active = *d->active_ref;
+ stm_nursery_current = *d->nursery_current_ref;
+ stm_nursery_nextlimit = *d->nursery_nextlimit_ref;
+ /* save, then point _refs to the new thread-locals */
+ revision_t *d_private_revision_ref = d->private_revision_ref;
+ char **d_read_barrier_cache_ref = d->read_barrier_cache_ref;
+ int *d_active_ref = d->active_ref;
+ char **d_nursery_current_ref = d->nursery_current_ref;
+ char **d_nursery_nextlimit_ref = d->nursery_nextlimit_ref;
+ d->private_revision_ref = &stm_private_rev_num;
+ d->read_barrier_cache_ref = &stm_read_barrier_cache;
+ d->active_ref = &stm_active;
+ d->nursery_current_ref = &stm_nursery_current;
+ d->nursery_nextlimit_ref = &stm_nursery_nextlimit;
+ /* we impersonated the other thread. */
stmgc_minor_collect_no_abort();
- assert(stm_private_rev_num == *d->private_revision_ref);
- *d->read_barrier_cache_ref = stm_read_barrier_cache;
-
- thread_descriptor = saved;
- stm_private_rev_num = saved_private_rev;
- stm_read_barrier_cache = saved_read_barrier_cache;
+ /* priv_rev didn't change! others may have */
+ assert(*d_private_revision_ref == stm_private_rev_num);
+ *d_read_barrier_cache_ref = stm_read_barrier_cache;
+ *d_active_ref = stm_active;
+ *d_nursery_current_ref = stm_nursery_current;
+ *d_nursery_nextlimit_ref = stm_nursery_nextlimit;
+ /* restore _ref pointers in other thread */
+ d->private_revision_ref = d_private_revision_ref;
+ d->read_barrier_cache_ref = d_read_barrier_cache_ref;
+ d->active_ref = d_active_ref;
+ d->nursery_current_ref = d_nursery_current_ref;
+ d->nursery_nextlimit_ref = d_nursery_nextlimit_ref;
}
}
+ /* restore current thread */
+ thread_descriptor = saved;
+ stm_private_rev_num = saved_private_rev;
+ stm_read_barrier_cache = saved_read_barrier_cache;
+ stm_active = saved_active;
+ stm_nursery_current = saved_nursery_current;
+ stm_nursery_nextlimit = saved_nursery_nextlimit;
stmgc_minor_collect_no_abort();
}
diff --git a/rpython/translator/stm/src_stm/nursery.c b/rpython/translator/stm/src_stm/nursery.c
--- a/rpython/translator/stm/src_stm/nursery.c
+++ b/rpython/translator/stm/src_stm/nursery.c
@@ -37,8 +37,8 @@
assert(d->nursery_base == NULL);
d->nursery_base = stm_malloc(GC_NURSERY); /* start of nursery */
d->nursery_end = d->nursery_base + GC_NURSERY; /* end of nursery */
- *d->nursery_current_ref = d->nursery_base; /* current position */
- *d->nursery_nextlimit_ref = d->nursery_base; /* next section limit */
+ stm_nursery_current = d->nursery_base; /* current position */
+ stm_nursery_nextlimit = d->nursery_base; /* next section limit */
d->nursery_cleared = NC_REGULAR;
dprintf(("minor: nursery is at [%p to %p]\n", d->nursery_base,
@@ -64,7 +64,7 @@
void stmgc_minor_collect_soon(void)
{
struct tx_descriptor *d = thread_descriptor;
- *d->nursery_current_ref = d->nursery_end;
+ stm_nursery_current = d->nursery_end;
}
inline static gcptr allocate_nursery(size_t size, revision_t tid)
@@ -72,11 +72,11 @@
/* if 'tid == -1', we must not collect */
struct tx_descriptor *d = thread_descriptor;
gcptr P;
- char *cur = *d->nursery_current_ref;
+ char *cur = stm_nursery_current;
char *end = cur + size;
assert((size & 3) == 0);
- *d->nursery_current_ref = end;
- if (end > *d->nursery_nextlimit_ref) {
+ stm_nursery_current = end;
+ if (end > stm_nursery_nextlimit) {
P = allocate_next_section(size, tid);
}
else {
@@ -593,7 +593,7 @@
First fix 'nursery_current', left to a bogus value by the caller.
*/
struct tx_descriptor *d = thread_descriptor;
- *d->nursery_current_ref -= allocate_size;
+ stm_nursery_current -= allocate_size;
/* Are we asking for a "reasonable" number of bytes, i.e. a value
at most equal to one section?
@@ -613,8 +613,8 @@
}
/* Are we at the end of the nursery? */
- if (*d->nursery_nextlimit_ref == d->nursery_end ||
- *d->nursery_current_ref == d->nursery_end) { // stmgc_minor_collect_soon()
+ if (stm_nursery_nextlimit == d->nursery_end ||
+ stm_nursery_current == d->nursery_end) { // stmgc_minor_collect_soon()
/* Yes */
if (tid == -1)
return NULL; /* cannot collect */
@@ -630,12 +630,12 @@
/* Clear the next section */
if (d->nursery_cleared != NC_ALREADY_CLEARED)
- memset(*d->nursery_nextlimit_ref, 0, GC_NURSERY_SECTION);
- *d->nursery_nextlimit_ref += GC_NURSERY_SECTION;
+ memset(stm_nursery_nextlimit, 0, GC_NURSERY_SECTION);
+ stm_nursery_nextlimit += GC_NURSERY_SECTION;
/* Return the object from there */
- gcptr P = (gcptr)(*d->nursery_current_ref);
- *d->nursery_current_ref += allocate_size;
+ gcptr P = (gcptr)(stm_nursery_current);
+ stm_nursery_current += allocate_size;
assert(*d->nursery_current_ref <= *d->nursery_nextlimit_ref);
P->h_tid = tid;
diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision
--- a/rpython/translator/stm/src_stm/revision
+++ b/rpython/translator/stm/src_stm/revision
@@ -1,1 +1,1 @@
-79aa5685d286
+8a3b7748ba7f
diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h
--- a/rpython/translator/stm/src_stm/stmgc.h
+++ b/rpython/translator/stm/src_stm/stmgc.h
@@ -136,7 +136,7 @@
/* change the default transaction length, and ask if now would be a good
time to break the transaction (by returning from the 'callback' above
with a positive value). */
-void stm_set_transaction_length(long length_max);
+void stm_set_transaction_length(long length_max); /* save roots! */
_Bool stm_should_break_transaction(void);
/* change the atomic counter by 'delta' and return the new value. Used
@@ -163,7 +163,7 @@
stm_inspect_abort_info(). (XXX details not documented yet) */
void stm_abort_info_push(gcptr obj, long fieldoffsets[]);
void stm_abort_info_pop(long count);
-char *stm_inspect_abort_info(void); /* turns inevitable */
+char *stm_inspect_abort_info(void); /* turns inevitable, push roots! */
/* mostly for debugging support */
void stm_abort_and_retry(void);
diff --git a/rpython/translator/stm/src_stm/stmsync.c b/rpython/translator/stm/src_stm/stmsync.c
--- a/rpython/translator/stm/src_stm/stmsync.c
+++ b/rpython/translator/stm/src_stm/stmsync.c
@@ -10,7 +10,7 @@
static revision_t sync_required = 0;
void stm_set_transaction_length(long length_max)
-{
+{ /* save roots around this call! */
BecomeInevitable("set_transaction_length");
if (length_max <= 0) {
length_max = 1;
@@ -43,7 +43,7 @@
d->reads_size_limit_nonatomic));
/* if is_inevitable(), reads_size_limit_nonatomic should be 0
(and thus reads_size_limit too, if !d->atomic.) */
- if (*d->active_ref == 2)
+ if (stm_active == 2)
assert(d->reads_size_limit_nonatomic == 0);
#endif
@@ -168,7 +168,7 @@
has configured 'reads_size_limit_nonatomic' to a smaller value.
When such a shortened transaction succeeds, the next one will
see its length limit doubled, up to the maximum. */
- if (counter == 0 && *d->active_ref != 2) {
+ if (counter == 0 && stm_active != 2) {
unsigned long limit = d->reads_size_limit_nonatomic;
if (limit != 0 && limit < (stm_regular_length_limit >> 1))
limit = (limit << 1) | 1;
@@ -183,7 +183,7 @@
/* atomic transaction: a common case is that callback() returned
even though we are atomic because we need a major GC. For
that case, release and reaquire the rw lock here. */
- assert(*d->active_ref >= 1);
+ assert(stm_active >= 1);
stm_possible_safe_point();
}
@@ -218,7 +218,7 @@
{ /* must save roots around this call */
struct tx_descriptor *d = thread_descriptor;
if (d->atomic) {
- assert(*d->active_ref >= 1);
+ assert(stm_active >= 1);
stm_possible_safe_point();
}
else {
@@ -267,7 +267,7 @@
int stm_in_transaction(void)
{
struct tx_descriptor *d = thread_descriptor;
- return d && *d->active_ref;
+ return d && stm_active;
}
/************************************************************/
@@ -337,7 +337,7 @@
void stm_partial_commit_and_resume_other_threads(void)
{ /* push gc roots! */
struct tx_descriptor *d = thread_descriptor;
- assert(*d->active_ref == 2);
+ assert(stm_active == 2);
int atomic = d->atomic;
/* Give up atomicity during commit. This still works because
@@ -391,7 +391,7 @@
/* Warning, may block waiting for rwlock_in_transaction while another
thread runs a major GC */
- assert(*thread_descriptor->active_ref);
+ assert(stm_active);
assert(in_single_thread != thread_descriptor);
stm_stop_sharedlock();
More information about the pypy-commit
mailing list