[pypy-commit] stmgc c8-overheads-instrumentation: Merge latest changes from master
tobweber
pypy.commits at gmail.com
Mon Jul 17 05:22:49 EDT 2017
Author: Tobias Weber <tobias_weber89 at gmx.de>
Branch: c8-overheads-instrumentation
Changeset: r2110:683f252182e3
Date: 2017-07-14 17:41 +0200
http://bitbucket.org/pypy/stmgc/changeset/683f252182e3/
Log: Merge latest changes from master
diff --git a/c8/stm/core.c b/c8/stm/core.c
--- a/c8/stm/core.c
+++ b/c8/stm/core.c
@@ -379,6 +379,14 @@
static void readd_wb_executed_flags(void);
static void check_all_write_barrier_flags(char *segbase, struct list_s *list);
+static void signal_commit_to_inevitable_transaction(void) {
+ struct stm_priv_segment_info_s* inevitable_segement = get_inevitable_thread_segment();
+ if (inevitable_segement != 0) {
+ // the inevitable thread is still running: set its "please commit" flag (is ignored by the inevitable thread if it is atomic)
+ inevitable_segement->commit_if_not_atomic = true;
+ }
+}
+
static void wait_for_inevitable(void)
{
intptr_t detached = 0;
@@ -395,6 +403,8 @@
try to detach an inevitable transaction regularly */
detached = fetch_detached_transaction();
if (detached == 0) {
+ // the inevitable trx was not detached or it was detached but is atomic
+ signal_commit_to_inevitable_transaction();
EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE);
if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, 0.00001))
goto wait_some_more;
@@ -1168,6 +1178,7 @@
_do_start_transaction(tl);
continue_timer();
+ STM_PSEGMENT->commit_if_not_atomic = false;
if (repeat_count == 0) { /* else, 'nursery_mark' was already set
in abort_data_structures_from_segment_num() */
STM_SEGMENT->nursery_mark = ((stm_char *)_stm_nursery_start +
@@ -1641,7 +1652,7 @@
void _stm_become_inevitable(const char *msg)
{
- int num_waits = 0;
+ int num_waits = 1;
timing_become_inevitable();
@@ -1652,50 +1663,48 @@
if (msg != MSG_INEV_DONT_SLEEP) {
dprintf(("become_inevitable: %s\n", msg));
- if (any_soon_finished_or_inevitable_thread_segment() &&
- num_waits <= NB_SEGMENTS) {
+ if (any_soon_finished_or_inevitable_thread_segment()) {
#if STM_TESTS /* for tests: another transaction */
stm_abort_transaction(); /* is already inevitable, abort */
#endif
- bool timed_out = false;
+ signal_commit_to_inevitable_transaction();
s_mutex_lock();
if (any_soon_finished_or_inevitable_thread_segment() &&
- !safe_point_requested()) {
+ !safe_point_requested() &&
+ num_waits <= NB_SEGMENTS) {
/* wait until C_SEGMENT_FREE_OR_SAFE_POINT_REQ is signalled */
EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE);
- if (!cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ,
- 0.000054321))
- timed_out = true;
+ if (cond_wait_timeout(C_SEGMENT_FREE_OR_SAFE_POINT_REQ, 0.00001)) {
+ num_waits++;
+ }
}
s_mutex_unlock();
-
- if (timed_out) {
- /* try to detach another inevitable transaction, but
- only after waiting a bit. This is necessary to avoid
- deadlocks in some situations, which are hopefully
- not too common. We don't want two threads constantly
- detaching each other. */
- intptr_t detached = fetch_detached_transaction();
- if (detached != 0) {
- EMIT_WAIT_DONE();
- commit_fetched_detached_transaction(detached);
- }
- }
- else {
- num_waits++;
+ /* XXX try to detach another inevitable transaction, but
+ only after waiting a bit. This is necessary to avoid
+ deadlocks in some situations, which are hopefully
+ not too common. We don't want two threads constantly
+ detaching each other. */
+ intptr_t detached = fetch_detached_transaction();
+ if (detached != 0) {
+ EMIT_WAIT_DONE();
+ commit_fetched_detached_transaction(detached);
+ EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE);
}
goto retry_from_start;
}
- EMIT_WAIT_DONE();
- if (!_validate_and_turn_inevitable())
- goto retry_from_start;
+ else {
+ EMIT_WAIT_DONE();
+ if (!_validate_and_turn_inevitable()) {
+ EMIT_WAIT(STM_WAIT_OTHER_INEVITABLE);
+ goto retry_from_start;
+ }
+ }
}
- else {
- if (!_validate_and_turn_inevitable())
- return;
+ else if (!_validate_and_turn_inevitable()) {
+ return;
}
/* There may be a concurrent commit of a detached Tx going on.
@@ -1707,6 +1716,7 @@
stm_spin_loop();
assert(_stm_detached_inevitable_from_thread == 0);
+ STM_PSEGMENT->commit_if_not_atomic = false;
soon_finished_or_inevitable_thread_segment();
STM_PSEGMENT->transaction_state = TS_INEVITABLE;
diff --git a/c8/stm/core.h b/c8/stm/core.h
--- a/c8/stm/core.h
+++ b/c8/stm/core.h
@@ -169,6 +169,9 @@
/* For stm_enable_atomic() */
uintptr_t atomic_nesting_levels;
+
+ // TODO signal flag that is checked in throw_away_nursery() for making immediate commit
+ bool commit_if_not_atomic;
};
enum /* safe_point */ {
diff --git a/c8/stm/detach.c b/c8/stm/detach.c
--- a/c8/stm/detach.c
+++ b/c8/stm/detach.c
@@ -215,6 +215,7 @@
}
}
+// TODO write tests, verify is working, verify no overflows with adaptive mode
uintptr_t stm_is_atomic(stm_thread_local_t *tl)
{
assert(STM_SEGMENT->running_thread == tl);
@@ -228,14 +229,18 @@
return STM_PSEGMENT->atomic_nesting_levels;
}
+// max intptr_t value is 7FFFFFFFFFFFFFFF on 64-bit => larger than 2 * huge value
#define HUGE_INTPTR_VALUE 0x3000000000000000L
void stm_enable_atomic(stm_thread_local_t *tl)
{
if (!stm_is_atomic(tl)) {
+ // do for outermost atomic block only
tl->self_or_0_if_atomic = 0;
/* increment 'nursery_mark' by HUGE_INTPTR_VALUE, so that
- stm_should_break_transaction() returns always false */
+ stm_should_break_transaction() returns always false.
+ preserves the previous nursery_mark, unless it is < 0
+ or >= huge value */
intptr_t mark = (intptr_t)STM_SEGMENT->nursery_mark;
if (mark < 0)
mark = 0;
@@ -255,6 +260,7 @@
STM_PSEGMENT->atomic_nesting_levels--;
if (STM_PSEGMENT->atomic_nesting_levels == 0) {
+ // revert changes by stm_enable_atomic only if we left the outermost atomic block
tl->self_or_0_if_atomic = (intptr_t)tl;
/* decrement 'nursery_mark' by HUGE_INTPTR_VALUE, to cancel
what was done in stm_enable_atomic() */
diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c
--- a/c8/stm/nursery.c
+++ b/c8/stm/nursery.c
@@ -500,6 +500,14 @@
pseg->pub.nursery_current = (stm_char *)_stm_nursery_start;
pseg->pub.nursery_mark -= nursery_used;
+ assert((pseg->transaction_state == TS_INEVITABLE) || !pseg->commit_if_not_atomic);
+ if (pseg->commit_if_not_atomic
+ && pseg->transaction_state == TS_INEVITABLE
+ && pseg->pub.running_thread->self_or_0_if_atomic != 0) {
+ // transaction is inevitable, not atomic, and commit has been signalled by waiting thread: commit immediately
+ pseg->pub.nursery_mark = 0;
+ }
+
/* free any object left from 'young_outside_nursery' */
if (!tree_is_cleared(pseg->young_outside_nursery)) {
wlog_t *item;
diff --git a/c8/stm/sync.c b/c8/stm/sync.c
--- a/c8/stm/sync.c
+++ b/c8/stm/sync.c
@@ -293,6 +293,19 @@
return false;
}
+static struct stm_priv_segment_info_s* get_inevitable_thread_segment(void)
+{
+ struct stm_priv_segment_info_s* segment;
+ int num;
+ for (num = 1; num < NB_SEGMENTS; num++) {
+ segment = get_priv_segment(num);
+ if (segment->transaction_state == TS_INEVITABLE) {
+ return segment;
+ }
+ }
+ return 0;
+}
+
__attribute__((unused))
static bool _seems_to_be_running_transaction(void)
{
diff --git a/c8/stm/sync.h b/c8/stm/sync.h
--- a/c8/stm/sync.h
+++ b/c8/stm/sync.h
@@ -29,6 +29,7 @@
static void release_thread_segment(stm_thread_local_t *tl);
static void soon_finished_or_inevitable_thread_segment(void);
static bool any_soon_finished_or_inevitable_thread_segment(void);
+static struct stm_priv_segment_info_s* get_inevitable_thread_segment(void);
enum sync_type_e {
STOP_OTHERS_UNTIL_MUTEX_UNLOCK,
More information about the pypy-commit
mailing list