[pypy-commit] pypy stmgc-c8-gil-like: import stmgc, branch c8-gil-like
arigo
noreply at buildbot.pypy.org
Fri Jun 12 19:00:34 CEST 2015
Author: Armin Rigo <arigo at tunes.org>
Branch: stmgc-c8-gil-like
Changeset: r78048:ffc83930d682
Date: 2015-06-12 16:53 +0200
http://bitbucket.org/pypy/pypy/changeset/ffc83930d682/
Log: import stmgc, branch c8-gil-like
diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision
--- a/rpython/translator/stm/src_stm/revision
+++ b/rpython/translator/stm/src_stm/revision
@@ -1,1 +1,1 @@
-f0d995d5609d
+bf0dfe206de5
diff --git a/rpython/translator/stm/src_stm/stm/atomic.h b/rpython/translator/stm/src_stm/stm/atomic.h
--- a/rpython/translator/stm/src_stm/stm/atomic.h
+++ b/rpython/translator/stm/src_stm/stm/atomic.h
@@ -24,15 +24,21 @@
#if defined(__i386__) || defined(__amd64__)
-# define HAVE_FULL_EXCHANGE_INSN
static inline void spin_loop(void) { asm("pause" : : : "memory"); }
static inline void write_fence(void) { asm("" : : : "memory"); }
+/*# define atomic_exchange(ptr, old, new) do { \
+ (old) = __sync_lock_test_and_set(ptr, new); \
+ } while (0)*/
#else
static inline void spin_loop(void) { asm("" : : : "memory"); }
static inline void write_fence(void) { __sync_synchronize(); }
+/*# define atomic_exchange(ptr, old, new) do { \
+ (old) = *(ptr); \
+ } while (UNLIKELY(!__sync_bool_compare_and_swap(ptr, old, new))); */
+
#endif
diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c
--- a/rpython/translator/stm/src_stm/stm/core.c
+++ b/rpython/translator/stm/src_stm/stm/core.c
@@ -324,10 +324,7 @@
/* Don't check this 'cl'. This entry is already checked */
if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) {
- //assert(first_cl->next == INEV_RUNNING);
- /* the above assert may fail when running a major collection
- while the commit of the inevitable transaction is in progress
- and the element is already attached */
+ assert(first_cl->next == INEV_RUNNING);
return true;
}
@@ -496,11 +493,23 @@
static void wait_for_other_inevitable(struct stm_commit_log_entry_s *old)
{
+ intptr_t detached = fetch_detached_transaction();
+ if (detached != 0) {
+ commit_fetched_detached_transaction(detached);
+ return;
+ }
+
timing_event(STM_SEGMENT->running_thread, STM_WAIT_OTHER_INEVITABLE);
while (old->next == INEV_RUNNING && !safe_point_requested()) {
spin_loop();
usleep(10); /* XXXXXX */
+
+ detached = fetch_detached_transaction();
+ if (detached != 0) {
+ commit_fetched_detached_transaction(detached);
+ break;
+ }
}
timing_event(STM_SEGMENT->running_thread, STM_WAIT_DONE);
}
@@ -509,7 +518,8 @@
static void readd_wb_executed_flags(void);
static void check_all_write_barrier_flags(char *segbase, struct list_s *list);
-static void _validate_and_attach(struct stm_commit_log_entry_s *new)
+static bool _validate_and_attach(struct stm_commit_log_entry_s *new,
+ bool can_sleep)
{
struct stm_commit_log_entry_s *old;
@@ -571,6 +581,8 @@
/* XXXXXX for now just sleep. We should really ask to inev
transaction to do the commit for us, and then we can
continue running. */
+ if (!can_sleep)
+ return false;
dprintf(("_validate_and_attach(%p) failed, "
"waiting for inevitable\n", new));
wait_for_other_inevitable(old);
@@ -591,18 +603,17 @@
if (is_commit) {
/* compare with _validate_and_add_to_commit_log */
- STM_PSEGMENT->transaction_state = TS_NONE;
- STM_PSEGMENT->safe_point = SP_NO_TRANSACTION;
-
list_clear(STM_PSEGMENT->modified_old_objects);
STM_PSEGMENT->last_commit_log_entry = new;
release_modification_lock_wr(STM_SEGMENT->segment_num);
}
+ return true;
}
-static void _validate_and_turn_inevitable(void)
+static bool _validate_and_turn_inevitable(bool can_sleep)
{
- _validate_and_attach((struct stm_commit_log_entry_s *)INEV_RUNNING);
+ return _validate_and_attach((struct stm_commit_log_entry_s *)INEV_RUNNING,
+ can_sleep);
}
static void _validate_and_add_to_commit_log(void)
@@ -611,6 +622,8 @@
new = _create_commit_log_entry();
if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) {
+ assert(_stm_detached_inevitable_from_thread == 0); /* running it */
+
old = STM_PSEGMENT->last_commit_log_entry;
new->rev_num = old->rev_num + 1;
OPT_ASSERT(old->next == INEV_RUNNING);
@@ -621,17 +634,18 @@
STM_PSEGMENT->modified_old_objects);
/* compare with _validate_and_attach: */
- STM_PSEGMENT->transaction_state = TS_NONE;
- STM_PSEGMENT->safe_point = SP_NO_TRANSACTION;
+ acquire_modification_lock_wr(STM_SEGMENT->segment_num);
list_clear(STM_PSEGMENT->modified_old_objects);
STM_PSEGMENT->last_commit_log_entry = new;
/* do it: */
bool yes = __sync_bool_compare_and_swap(&old->next, INEV_RUNNING, new);
OPT_ASSERT(yes);
+
+ release_modification_lock_wr(STM_SEGMENT->segment_num);
}
else {
- _validate_and_attach(new);
+ _validate_and_attach(new, /*can_sleep=*/true);
}
}
@@ -1123,7 +1137,7 @@
-static void _stm_start_transaction(stm_thread_local_t *tl)
+static void _do_start_transaction(stm_thread_local_t *tl)
{
assert(!_stm_in_transaction(tl));
@@ -1181,7 +1195,7 @@
stm_validate();
}
-long stm_start_transaction(stm_thread_local_t *tl)
+long _stm_start_transaction(stm_thread_local_t *tl)
{
s_mutex_lock();
#ifdef STM_NO_AUTOMATIC_SETJMP
@@ -1189,23 +1203,10 @@
#else
long repeat_count = stm_rewind_jmp_setjmp(tl);
#endif
- _stm_start_transaction(tl);
+ _do_start_transaction(tl);
return repeat_count;
}
-void stm_start_inevitable_transaction(stm_thread_local_t *tl)
-{
- /* used to be more efficient, starting directly an inevitable transaction,
- but there is no real point any more, I believe */
- rewind_jmp_buf rjbuf;
- stm_rewind_jmp_enterframe(tl, &rjbuf);
-
- stm_start_transaction(tl);
- stm_become_inevitable(tl, "start_inevitable_transaction");
-
- stm_rewind_jmp_leaveframe(tl, &rjbuf);
-}
-
#ifdef STM_NO_AUTOMATIC_SETJMP
void _test_run_abort(stm_thread_local_t *tl) __attribute__((noreturn));
int stm_is_inevitable(void)
@@ -1224,6 +1225,7 @@
{
stm_thread_local_t *tl = STM_SEGMENT->running_thread;
+ assert(_has_mutex());
STM_PSEGMENT->safe_point = SP_NO_TRANSACTION;
STM_PSEGMENT->transaction_state = TS_NONE;
@@ -1231,7 +1233,15 @@
list_clear(STM_PSEGMENT->objects_pointing_to_nursery);
list_clear(STM_PSEGMENT->old_objects_with_cards_set);
list_clear(STM_PSEGMENT->large_overflow_objects);
- timing_event(tl, event);
+ if (tl != NULL)
+ timing_event(tl, event);
+
+ /* If somebody is waiting for us to reach a safe point, we simply
+ signal it now and leave this transaction. This should be enough
+ for synchronize_all_threads() to retry and notice that we are
+ no longer SP_RUNNING. */
+ if (STM_SEGMENT->nursery_end != NURSERY_END)
+ cond_signal(C_AT_SAFE_POINT);
release_thread_segment(tl);
/* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */
@@ -1280,24 +1290,55 @@
}
-void stm_commit_transaction(void)
+void _stm_commit_transaction(void)
+{
+ assert(STM_PSEGMENT->running_pthread == pthread_self());
+ _core_commit_transaction(/*external=*/ false);
+}
+
+static void _core_commit_transaction(bool external)
{
exec_local_finalizers();
assert(!_has_mutex());
assert(STM_PSEGMENT->safe_point == SP_RUNNING);
- assert(STM_PSEGMENT->running_pthread == pthread_self());
+ assert(STM_PSEGMENT->transaction_state != TS_NONE);
+ if (globally_unique_transaction) {
+ stm_fatalerror("cannot commit between stm_stop_all_other_threads "
+ "and stm_resume_all_other_threads");
+ }
- dprintf(("> stm_commit_transaction()\n"));
- minor_collection(1);
+ dprintf(("> stm_commit_transaction(external=%d)\n", (int)external));
+ minor_collection(/*commit=*/ true, external);
+ if (!external && is_major_collection_requested()) {
+ s_mutex_lock();
+ if (is_major_collection_requested()) { /* if still true */
+ major_collection_with_mutex();
+ }
+ s_mutex_unlock();
+ }
push_large_overflow_objects_to_other_segments();
/* push before validate. otherwise they are reachable too early */
+ if (external) {
+ /* from this point on, unlink the original 'stm_thread_local_t *'
+ from its segment. Better do it as soon as possible, because
+ other threads might be spin-looping, waiting for the -1 to
+ disappear. */
+ STM_SEGMENT->running_thread = NULL;
+ write_fence();
+ assert(_stm_detached_inevitable_from_thread == -1);
+ _stm_detached_inevitable_from_thread = 0;
+ }
+
bool was_inev = STM_PSEGMENT->transaction_state == TS_INEVITABLE;
_validate_and_add_to_commit_log();
- stm_rewind_jmp_forget(STM_SEGMENT->running_thread);
+ if (!was_inev) {
+ assert(!external);
+ stm_rewind_jmp_forget(STM_SEGMENT->running_thread);
+ }
/* XXX do we still need a s_mutex_lock() section here? */
s_mutex_lock();
@@ -1314,23 +1355,9 @@
invoke_and_clear_user_callbacks(0); /* for commit */
- /* >>>>> there may be a FORK() happening in the safepoint below <<<<<*/
- enter_safe_point_if_requested();
- assert(STM_SEGMENT->nursery_end == NURSERY_END);
-
- /* if a major collection is required, do it here */
- if (is_major_collection_requested()) {
- major_collection_with_mutex();
- }
-
- _verify_cards_cleared_in_all_lists(get_priv_segment(STM_SEGMENT->segment_num));
-
- if (globally_unique_transaction && was_inev) {
- committed_globally_unique_transaction();
- }
-
/* done */
stm_thread_local_t *tl = STM_SEGMENT->running_thread;
+ assert(external == (tl == NULL));
_finish_transaction(STM_TRANSACTION_COMMIT);
/* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */
@@ -1338,7 +1365,8 @@
/* between transactions, call finalizers. this will execute
a transaction itself */
- invoke_general_finalizers(tl);
+ if (tl != NULL)
+ invoke_general_finalizers(tl);
}
static void reset_modified_from_backup_copies(int segment_num)
@@ -1502,32 +1530,36 @@
void _stm_become_inevitable(const char *msg)
{
- if (STM_PSEGMENT->transaction_state == TS_REGULAR) {
+ assert(STM_PSEGMENT->transaction_state == TS_REGULAR);
+ _stm_collectable_safe_point();
+
+ if (msg != MSG_INEV_DONT_SLEEP) {
dprintf(("become_inevitable: %s\n", msg));
- _stm_collectable_safe_point();
timing_become_inevitable();
-
- _validate_and_turn_inevitable();
- STM_PSEGMENT->transaction_state = TS_INEVITABLE;
-
- stm_rewind_jmp_forget(STM_SEGMENT->running_thread);
- invoke_and_clear_user_callbacks(0); /* for commit */
+ _validate_and_turn_inevitable(/*can_sleep=*/true);
}
else {
- assert(STM_PSEGMENT->transaction_state == TS_INEVITABLE);
+ if (!_validate_and_turn_inevitable(/*can_sleep=*/false))
+ return;
+ timing_become_inevitable();
}
+ STM_PSEGMENT->transaction_state = TS_INEVITABLE;
+
+ stm_rewind_jmp_forget(STM_SEGMENT->running_thread);
+ invoke_and_clear_user_callbacks(0); /* for commit */
}
+#if 0
void stm_become_globally_unique_transaction(stm_thread_local_t *tl,
const char *msg)
{
- stm_become_inevitable(tl, msg); /* may still abort */
+ stm_become_inevitable(tl, msg);
s_mutex_lock();
synchronize_all_threads(STOP_OTHERS_AND_BECOME_GLOBALLY_UNIQUE);
s_mutex_unlock();
}
-
+#endif
void stm_stop_all_other_threads(void)
{
diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h
--- a/rpython/translator/stm/src_stm/stm/core.h
+++ b/rpython/translator/stm/src_stm/stm/core.h
@@ -170,6 +170,12 @@
TS_INEVITABLE,
};
+#define MSG_INEV_DONT_SLEEP ((const char *)1)
+
+#define in_transaction(tl) \
+ (get_segment((tl)->last_associated_segment_num)->running_thread == (tl))
+
+
/* Commit Log things */
struct stm_undo_s {
union {
@@ -293,6 +299,7 @@
static void _signal_handler(int sig, siginfo_t *siginfo, void *context);
static bool _stm_validate(void);
+static void _core_commit_transaction(bool external);
static inline bool was_read_remote(char *base, object_t *obj)
{
diff --git a/rpython/translator/stm/src_stm/stm/detach.c b/rpython/translator/stm/src_stm/stm/detach.c
new file mode 100644
--- /dev/null
+++ b/rpython/translator/stm/src_stm/stm/detach.c
@@ -0,0 +1,175 @@
+/* Imported by rpython/translator/stm/import_stmgc.py */
+#ifndef _STM_CORE_H_
+# error "must be compiled via stmgc.c"
+#endif
+#include <errno.h>
+
+
+/* Idea: if stm_leave_transactional_zone() is quickly followed by
+ stm_enter_transactional_zone() in the same thread, then we should
+ simply try to have one inevitable transaction that does both sides.
+ This is useful if there are many such small interruptions.
+
+ stm_leave_transactional_zone() tries to make sure the transaction
+ is inevitable, and then sticks the current 'stm_thread_local_t *'
+ into _stm_detached_inevitable_from_thread.
+ stm_enter_transactional_zone() has a fast-path if the same
+ 'stm_thread_local_t *' is still there.
+
+ If a different thread grabs it, it atomically replaces the value in
+ _stm_detached_inevitable_from_thread with -1, commits it (this part
+ involves reading for example the shadowstack of the thread that
+ originally detached), and at the point where we know the original
+ stm_thread_local_t is no longer relevant, we reset
+ _stm_detached_inevitable_from_thread to 0.
+*/
+
+volatile intptr_t _stm_detached_inevitable_from_thread;
+
+
+static void setup_detach(void)
+{
+ _stm_detached_inevitable_from_thread = 0;
+}
+
+
+void _stm_leave_noninevitable_transactional_zone(void)
+{
+ int saved_errno = errno;
+ dprintf(("leave_noninevitable_transactional_zone\n"));
+ _stm_become_inevitable(MSG_INEV_DONT_SLEEP);
+
+ /* did it work? */
+ if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { /* yes */
+ dprintf(("leave_noninevitable_transactional_zone: now inevitable\n"));
+ stm_thread_local_t *tl = STM_SEGMENT->running_thread;
+ _stm_detach_inevitable_transaction(tl);
+ }
+ else { /* no */
+ dprintf(("leave_noninevitable_transactional_zone: commit\n"));
+ _stm_commit_transaction();
+ }
+ errno = saved_errno;
+}
+
+static void commit_external_inevitable_transaction(void)
+{
+ assert(STM_PSEGMENT->transaction_state == TS_INEVITABLE); /* can't abort */
+ _core_commit_transaction(/*external=*/ true);
+}
+
+void _stm_reattach_transaction(stm_thread_local_t *tl)
+{
+ intptr_t old;
+ int saved_errno = errno;
+ restart:
+ old = _stm_detached_inevitable_from_thread;
+ if (old != 0) {
+ if (old == -1) {
+ /* busy-loop: wait until _stm_detached_inevitable_from_thread
+ is reset to a value different from -1 */
+ dprintf(("reattach_transaction: busy wait...\n"));
+ while (_stm_detached_inevitable_from_thread == -1)
+ spin_loop();
+
+ /* then retry */
+ goto restart;
+ }
+
+ if (!__sync_bool_compare_and_swap(&_stm_detached_inevitable_from_thread,
+ old, -1))
+ goto restart;
+
+ stm_thread_local_t *old_tl = (stm_thread_local_t *)old;
+ int remote_seg_num = old_tl->last_associated_segment_num;
+ dprintf(("reattach_transaction: commit detached from seg %d\n",
+ remote_seg_num));
+
+ tl->last_associated_segment_num = remote_seg_num;
+ ensure_gs_register(remote_seg_num);
+ commit_external_inevitable_transaction();
+ }
+ dprintf(("reattach_transaction: start a new transaction\n"));
+ _stm_start_transaction(tl);
+ errno = saved_errno;
+}
+
+void stm_force_transaction_break(stm_thread_local_t *tl)
+{
+ dprintf(("> stm_force_transaction_break()\n"));
+ assert(STM_SEGMENT->running_thread == tl);
+ _stm_commit_transaction();
+ _stm_start_transaction(tl);
+}
+
+static intptr_t fetch_detached_transaction(void)
+{
+ intptr_t cur;
+ restart:
+ cur = _stm_detached_inevitable_from_thread;
+ if (cur == 0) { /* fast-path */
+ return 0; /* _stm_detached_inevitable_from_thread not changed */
+ }
+ if (cur == -1) {
+ /* busy-loop: wait until _stm_detached_inevitable_from_thread
+ is reset to a value different from -1 */
+ while (_stm_detached_inevitable_from_thread == -1)
+ spin_loop();
+ goto restart;
+ }
+ if (!__sync_bool_compare_and_swap(&_stm_detached_inevitable_from_thread,
+ cur, -1))
+ goto restart;
+
+ /* this is the only case where we grabbed a detached transaction.
+ _stm_detached_inevitable_from_thread is still -1, until
+ commit_fetched_detached_transaction() is called. */
+ assert(_stm_detached_inevitable_from_thread == -1);
+ return cur;
+}
+
+static void commit_fetched_detached_transaction(intptr_t old)
+{
+ /* Here, 'seg_num' is the segment that contains the detached
+ inevitable transaction from fetch_detached_transaction(),
+ probably belonging to an unrelated thread. We fetched it,
+ which means that nobody else can concurrently fetch it now, but
+ everybody will see that there is still a concurrent inevitable
+ transaction. This should guarantee there are no race
+ conditions.
+ */
+ int mysegnum = STM_SEGMENT->segment_num;
+ int segnum = ((stm_thread_local_t *)old)->last_associated_segment_num;
+ dprintf(("commit_fetched_detached_transaction from seg %d\n", segnum));
+ assert(segnum > 0);
+
+ if (segnum != mysegnum) {
+ set_gs_register(get_segment_base(segnum));
+ }
+ commit_external_inevitable_transaction();
+
+ if (segnum != mysegnum) {
+ set_gs_register(get_segment_base(mysegnum));
+ }
+}
+
+static void commit_detached_transaction_if_from(stm_thread_local_t *tl)
+{
+ intptr_t old;
+ restart:
+ old = _stm_detached_inevitable_from_thread;
+ if (old == (intptr_t)tl) {
+ if (!__sync_bool_compare_and_swap(&_stm_detached_inevitable_from_thread,
+ old, -1))
+ goto restart;
+ commit_fetched_detached_transaction(old);
+ return;
+ }
+ if (old == -1) {
+ /* busy-loop: wait until _stm_detached_inevitable_from_thread
+ is reset to a value different from -1 */
+ while (_stm_detached_inevitable_from_thread == -1)
+ spin_loop();
+ goto restart;
+ }
+}
diff --git a/rpython/translator/stm/src_stm/stm/detach.h b/rpython/translator/stm/src_stm/stm/detach.h
new file mode 100644
--- /dev/null
+++ b/rpython/translator/stm/src_stm/stm/detach.h
@@ -0,0 +1,5 @@
+/* Imported by rpython/translator/stm/import_stmgc.py */
+static void setup_detach(void);
+static intptr_t fetch_detached_transaction(void);
+static void commit_fetched_detached_transaction(intptr_t old);
+static void commit_detached_transaction_if_from(stm_thread_local_t *tl);
diff --git a/rpython/translator/stm/src_stm/stm/extra.c b/rpython/translator/stm/src_stm/stm/extra.c
--- a/rpython/translator/stm/src_stm/stm/extra.c
+++ b/rpython/translator/stm/src_stm/stm/extra.c
@@ -8,7 +8,7 @@
{
dprintf(("register_callbacks: tl=%p key=%p callback=%p index=%ld\n",
tl, key, callback, index));
- if (tl->associated_segment_num == -1) {
+ if (!in_transaction(tl)) {
/* check that the provided thread-local is really running a
transaction, and do nothing otherwise. */
dprintf((" NOT IN TRANSACTION\n"));
diff --git a/rpython/translator/stm/src_stm/stm/finalizer.c b/rpython/translator/stm/src_stm/stm/finalizer.c
--- a/rpython/translator/stm/src_stm/stm/finalizer.c
+++ b/rpython/translator/stm/src_stm/stm/finalizer.c
@@ -494,11 +494,11 @@
rewind_jmp_buf rjbuf;
stm_rewind_jmp_enterframe(tl, &rjbuf);
- stm_start_transaction(tl);
+ _stm_start_transaction(tl);
_execute_finalizers(&g_finalizers);
- stm_commit_transaction();
+ _stm_commit_transaction();
stm_rewind_jmp_leaveframe(tl, &rjbuf);
__sync_lock_release(&lock);
diff --git a/rpython/translator/stm/src_stm/stm/forksupport.c b/rpython/translator/stm/src_stm/stm/forksupport.c
--- a/rpython/translator/stm/src_stm/stm/forksupport.c
+++ b/rpython/translator/stm/src_stm/stm/forksupport.c
@@ -40,7 +40,8 @@
bool was_in_transaction = _stm_in_transaction(this_tl);
if (!was_in_transaction)
- stm_start_transaction(this_tl);
+ _stm_start_transaction(this_tl);
+ assert(in_transaction(this_tl));
stm_become_inevitable(this_tl, "fork");
/* Note that the line above can still fail and abort, which should
@@ -72,7 +73,7 @@
s_mutex_unlock();
if (!was_in_transaction) {
- stm_commit_transaction();
+ _stm_commit_transaction();
}
dprintf(("forksupport_parent: continuing to run\n"));
@@ -83,7 +84,8 @@
struct stm_priv_segment_info_s *pr = get_priv_segment(i);
stm_thread_local_t *tl = pr->pub.running_thread;
dprintf(("forksupport_child: abort in seg%ld\n", i));
- assert(tl->associated_segment_num == i);
+ assert(tl->last_associated_segment_num == i);
+ assert(in_transaction(tl));
assert(pr->transaction_state != TS_INEVITABLE);
set_gs_register(get_segment_base(i));
assert(STM_SEGMENT->segment_num == i);
@@ -150,14 +152,14 @@
/* Restore a few things: the new pthread_self(), and the %gs
register */
- int segnum = fork_this_tl->associated_segment_num;
+ int segnum = fork_this_tl->last_associated_segment_num;
assert(1 <= segnum && segnum < NB_SEGMENTS);
*_get_cpth(fork_this_tl) = pthread_self();
set_gs_register(get_segment_base(segnum));
assert(STM_SEGMENT->segment_num == segnum);
if (!fork_was_in_transaction) {
- stm_commit_transaction();
+ _stm_commit_transaction();
}
/* Done */
diff --git a/rpython/translator/stm/src_stm/stm/fprintcolor.h b/rpython/translator/stm/src_stm/stm/fprintcolor.h
--- a/rpython/translator/stm/src_stm/stm/fprintcolor.h
+++ b/rpython/translator/stm/src_stm/stm/fprintcolor.h
@@ -37,5 +37,6 @@
/* ------------------------------------------------------------ */
+__attribute__((unused))
static void stm_fatalerror(const char *format, ...)
__attribute__((format (printf, 1, 2), noreturn));
diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c
--- a/rpython/translator/stm/src_stm/stm/nursery.c
+++ b/rpython/translator/stm/src_stm/stm/nursery.c
@@ -309,6 +309,7 @@
else
assert(finalbase <= ssbase && ssbase <= current);
+ dprintf(("collect_roots_in_nursery:\n"));
while (current > ssbase) {
--current;
uintptr_t x = (uintptr_t)current->ss;
@@ -320,6 +321,7 @@
else {
/* it is an odd-valued marker, ignore */
}
+ dprintf((" %p: %p -> %p\n", current, (void *)x, current->ss));
}
minor_trace_if_young(&tl->thread_local_obj);
@@ -519,6 +521,7 @@
static void _do_minor_collection(bool commit)
{
dprintf(("minor_collection commit=%d\n", (int)commit));
+ assert(!STM_SEGMENT->no_safe_point_here);
STM_PSEGMENT->minor_collect_will_commit_now = commit;
@@ -561,11 +564,12 @@
assert(MINOR_NOTHING_TO_DO(STM_PSEGMENT));
}
-static void minor_collection(bool commit)
+static void minor_collection(bool commit, bool external)
{
assert(!_has_mutex());
- stm_safe_point();
+ if (!external)
+ stm_safe_point();
timing_event(STM_SEGMENT->running_thread, STM_GC_MINOR_START);
@@ -579,7 +583,7 @@
if (level > 0)
force_major_collection_request();
- minor_collection(/*commit=*/ false);
+ minor_collection(/*commit=*/ false, /*external=*/ false);
#ifdef STM_TESTS
/* tests don't want aborts in stm_allocate, thus
diff --git a/rpython/translator/stm/src_stm/stm/nursery.h b/rpython/translator/stm/src_stm/stm/nursery.h
--- a/rpython/translator/stm/src_stm/stm/nursery.h
+++ b/rpython/translator/stm/src_stm/stm/nursery.h
@@ -10,7 +10,7 @@
object_t *obj, uint8_t mark_value,
bool mark_all, bool really_clear);
-static void minor_collection(bool commit);
+static void minor_collection(bool commit, bool external);
static void check_nursery_at_transaction_start(void);
static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg);
static void major_do_validation_and_minor_collections(void);
diff --git a/rpython/translator/stm/src_stm/stm/setup.c b/rpython/translator/stm/src_stm/stm/setup.c
--- a/rpython/translator/stm/src_stm/stm/setup.c
+++ b/rpython/translator/stm/src_stm/stm/setup.c
@@ -134,8 +134,12 @@
setup_pages();
setup_forksupport();
setup_finalizer();
+ setup_detach();
set_gs_register(get_segment_base(0));
+
+ dprintf(("nursery: %p -> %p\n", (void *)NURSERY_START,
+ (void *)NURSERY_END));
}
void stm_teardown(void)
@@ -244,7 +248,6 @@
/* assign numbers consecutively, but that's for tests; we could also
assign the same number to all of them and they would get their own
numbers automatically. */
- tl->associated_segment_num = -1;
tl->last_associated_segment_num = num + 1;
tl->thread_local_counter = ++thread_local_counters;
*_get_cpth(tl) = pthread_self();
@@ -264,6 +267,8 @@
void stm_unregister_thread_local(stm_thread_local_t *tl)
{
+ commit_detached_transaction_if_from(tl);
+
s_mutex_lock();
assert(tl->prev != NULL);
assert(tl->next != NULL);
diff --git a/rpython/translator/stm/src_stm/stm/sync.c b/rpython/translator/stm/src_stm/stm/sync.c
--- a/rpython/translator/stm/src_stm/stm/sync.c
+++ b/rpython/translator/stm/src_stm/stm/sync.c
@@ -2,6 +2,7 @@
#include <sys/syscall.h>
#include <sys/prctl.h>
#include <asm/prctl.h>
+#include <time.h>
#ifndef _STM_CORE_H_
# error "must be compiled via stmgc.c"
#endif
@@ -21,25 +22,29 @@
static void setup_sync(void)
{
- if (pthread_mutex_init(&sync_ctl.global_mutex, NULL) != 0)
- stm_fatalerror("mutex initialization: %m");
+ int err = pthread_mutex_init(&sync_ctl.global_mutex, NULL);
+ if (err != 0)
+ stm_fatalerror("mutex initialization: %d", err);
long i;
for (i = 0; i < _C_TOTAL; i++) {
- if (pthread_cond_init(&sync_ctl.cond[i], NULL) != 0)
- stm_fatalerror("cond initialization: %m");
+ err = pthread_cond_init(&sync_ctl.cond[i], NULL);
+ if (err != 0)
+ stm_fatalerror("cond initialization: %d", err);
}
}
static void teardown_sync(void)
{
- if (pthread_mutex_destroy(&sync_ctl.global_mutex) != 0)
- stm_fatalerror("mutex destroy: %m");
+ int err = pthread_mutex_destroy(&sync_ctl.global_mutex);
+ if (err != 0)
+ stm_fatalerror("mutex destroy: %d", err);
long i;
for (i = 0; i < _C_TOTAL; i++) {
- if (pthread_cond_destroy(&sync_ctl.cond[i]) != 0)
- stm_fatalerror("cond destroy: %m");
+ err = pthread_cond_destroy(&sync_ctl.cond[i]);
+ if (err != 0)
+ stm_fatalerror("cond destroy: %d", err);
}
memset(&sync_ctl, 0, sizeof(sync_ctl));
@@ -59,19 +64,30 @@
stm_fatalerror("syscall(arch_prctl, ARCH_SET_GS): %m");
}
+static void ensure_gs_register(long segnum)
+{
+ /* XXX use this instead of set_gs_register() in many places */
+ if (STM_SEGMENT->segment_num != segnum) {
+ set_gs_register(get_segment_base(segnum));
+ assert(STM_SEGMENT->segment_num == segnum);
+ }
+}
+
static inline void s_mutex_lock(void)
{
assert(!_has_mutex_here);
- if (UNLIKELY(pthread_mutex_lock(&sync_ctl.global_mutex) != 0))
- stm_fatalerror("pthread_mutex_lock: %m");
+ int err = pthread_mutex_lock(&sync_ctl.global_mutex);
+ if (UNLIKELY(err != 0))
+ stm_fatalerror("pthread_mutex_lock: %d", err);
assert((_has_mutex_here = true, 1));
}
static inline void s_mutex_unlock(void)
{
assert(_has_mutex_here);
- if (UNLIKELY(pthread_mutex_unlock(&sync_ctl.global_mutex) != 0))
- stm_fatalerror("pthread_mutex_unlock: %m");
+ int err = pthread_mutex_unlock(&sync_ctl.global_mutex);
+ if (UNLIKELY(err != 0))
+ stm_fatalerror("pthread_mutex_unlock: %d", err);
assert((_has_mutex_here = false, 1));
}
@@ -83,26 +99,70 @@
#endif
assert(_has_mutex_here);
- if (UNLIKELY(pthread_cond_wait(&sync_ctl.cond[ctype],
- &sync_ctl.global_mutex) != 0))
- stm_fatalerror("pthread_cond_wait/%d: %m", (int)ctype);
+ int err = pthread_cond_wait(&sync_ctl.cond[ctype],
+ &sync_ctl.global_mutex);
+ if (UNLIKELY(err != 0))
+ stm_fatalerror("pthread_cond_wait/%d: %d", (int)ctype, err);
+}
+
+static inline void timespec_delay(struct timespec *t, double incr)
+{
+#ifdef CLOCK_REALTIME
+ clock_gettime(CLOCK_REALTIME, t);
+#else
+ struct timeval tv;
+ RPY_GETTIMEOFDAY(&tv);
+ t->tv_sec = tv.tv_sec;
+ t->tv_nsec = tv.tv_usec * 1000 + 999;
+#endif
+ /* assumes that "incr" is not too large, less than 1 second */
+ long nsec = t->tv_nsec + (long)(incr * 1000000000.0);
+ if (nsec >= 1000000000) {
+ t->tv_sec += 1;
+ nsec -= 1000000000;
+ assert(nsec < 1000000000);
+ }
+ t->tv_nsec = nsec;
+}
+
+static inline bool cond_wait_timeout(enum cond_type_e ctype, double delay)
+{
+#ifdef STM_NO_COND_WAIT
+ stm_fatalerror("*** cond_wait/%d called!", (int)ctype);
+#endif
+
+ assert(_has_mutex_here);
+
+ struct timespec t;
+ timespec_delay(&t, delay);
+
+ int err = pthread_cond_timedwait(&sync_ctl.cond[ctype],
+ &sync_ctl.global_mutex, &t);
+ if (err == 0)
+ return true; /* success */
+ if (LIKELY(err == ETIMEDOUT))
+ return false; /* timeout */
+ stm_fatalerror("pthread_cond_timedwait/%d: %d", (int)ctype, err);
}
static inline void cond_signal(enum cond_type_e ctype)
{
- if (UNLIKELY(pthread_cond_signal(&sync_ctl.cond[ctype]) != 0))
- stm_fatalerror("pthread_cond_signal/%d: %m", (int)ctype);
+ int err = pthread_cond_signal(&sync_ctl.cond[ctype]);
+ if (UNLIKELY(err != 0))
+ stm_fatalerror("pthread_cond_signal/%d: %d", (int)ctype, err);
}
static inline void cond_broadcast(enum cond_type_e ctype)
{
- if (UNLIKELY(pthread_cond_broadcast(&sync_ctl.cond[ctype]) != 0))
- stm_fatalerror("pthread_cond_broadcast/%d: %m", (int)ctype);
+ int err = pthread_cond_broadcast(&sync_ctl.cond[ctype]);
+ if (UNLIKELY(err != 0))
+ stm_fatalerror("pthread_cond_broadcast/%d: %d", (int)ctype, err);
}
/************************************************************/
+#if 0
void stm_wait_for_current_inevitable_transaction(void)
{
restart:
@@ -125,7 +185,7 @@
}
s_mutex_unlock();
}
-
+#endif
static bool acquire_thread_segment(stm_thread_local_t *tl)
@@ -155,10 +215,12 @@
num = (num+1) % (NB_SEGMENTS-1);
if (sync_ctl.in_use1[num+1] == 0) {
/* we're getting 'num', a different number. */
- dprintf(("acquired different segment: %d->%d\n",
- tl->last_associated_segment_num, num+1));
+ int old_num = tl->last_associated_segment_num;
+ dprintf(("acquired different segment: %d->%d\n", old_num, num+1));
tl->last_associated_segment_num = num+1;
set_gs_register(get_segment_base(num+1));
+ dprintf((" %d->%d\n", old_num, num+1));
+ (void)old_num;
goto got_num;
}
}
@@ -176,24 +238,31 @@
sync_ctl.in_use1[num+1] = 1;
assert(STM_SEGMENT->segment_num == num+1);
assert(STM_SEGMENT->running_thread == NULL);
- tl->associated_segment_num = tl->last_associated_segment_num;
+ assert(tl->last_associated_segment_num == STM_SEGMENT->segment_num);
+ assert(!in_transaction(tl));
STM_SEGMENT->running_thread = tl;
+ assert(in_transaction(tl));
return true;
}
static void release_thread_segment(stm_thread_local_t *tl)
{
+ int segnum;
assert(_has_mutex());
cond_signal(C_SEGMENT_FREE);
assert(STM_SEGMENT->running_thread == tl);
- assert(tl->associated_segment_num == tl->last_associated_segment_num);
- tl->associated_segment_num = -1;
- STM_SEGMENT->running_thread = NULL;
+ segnum = STM_SEGMENT->segment_num;
+ if (tl != NULL) {
+ assert(tl->last_associated_segment_num == segnum);
+ assert(in_transaction(tl));
+ STM_SEGMENT->running_thread = NULL;
+ assert(!in_transaction(tl));
+ }
- assert(sync_ctl.in_use1[tl->last_associated_segment_num] == 1);
- sync_ctl.in_use1[tl->last_associated_segment_num] = 0;
+ assert(sync_ctl.in_use1[segnum] == 1);
+ sync_ctl.in_use1[segnum] = 0;
}
__attribute__((unused))
@@ -204,22 +273,15 @@
bool _stm_in_transaction(stm_thread_local_t *tl)
{
- if (tl->associated_segment_num == -1) {
- return false;
- }
- else {
- int num = tl->associated_segment_num;
- OPT_ASSERT(1 <= num && num < NB_SEGMENTS);
- OPT_ASSERT(num == tl->last_associated_segment_num);
- OPT_ASSERT(get_segment(num)->running_thread == tl);
- return true;
- }
+ int num = tl->last_associated_segment_num;
+ OPT_ASSERT(1 <= num && num < NB_SEGMENTS);
+ return in_transaction(tl);
}
void _stm_test_switch(stm_thread_local_t *tl)
{
assert(_stm_in_transaction(tl));
- set_gs_register(get_segment_base(tl->associated_segment_num));
+ set_gs_register(get_segment_base(tl->last_associated_segment_num));
assert(STM_SEGMENT->running_thread == tl);
exec_local_finalizers();
}
@@ -267,16 +329,19 @@
}
assert(!pause_signalled);
pause_signalled = true;
+ dprintf(("request to pause\n"));
}
static inline long count_other_threads_sp_running(void)
{
/* Return the number of other threads in SP_RUNNING.
- Asserts that SP_RUNNING threads still have the NSE_SIGxxx. */
+ Asserts that SP_RUNNING threads still have the NSE_SIGxxx.
+ (A detached inevitable transaction is still SP_RUNNING.) */
long i;
long result = 0;
- int my_num = STM_SEGMENT->segment_num;
+ int my_num;
+ my_num = STM_SEGMENT->segment_num;
for (i = 1; i < NB_SEGMENTS; i++) {
if (i != my_num && get_priv_segment(i)->safe_point == SP_RUNNING) {
assert(get_segment(i)->nursery_end <= _STM_NSE_SIGNAL_MAX);
@@ -299,6 +364,7 @@
if (get_segment(i)->nursery_end == NSE_SIGPAUSE)
get_segment(i)->nursery_end = NURSERY_END;
}
+ dprintf(("request removed\n"));
cond_broadcast(C_REQUEST_REMOVED);
}
@@ -316,6 +382,8 @@
if (STM_SEGMENT->nursery_end == NURSERY_END)
break; /* no safe point requested */
+ dprintf(("enter safe point\n"));
+ assert(!STM_SEGMENT->no_safe_point_here);
assert(STM_SEGMENT->nursery_end == NSE_SIGPAUSE);
assert(pause_signalled);
@@ -330,11 +398,15 @@
cond_wait(C_REQUEST_REMOVED);
STM_PSEGMENT->safe_point = SP_RUNNING;
timing_event(STM_SEGMENT->running_thread, STM_WAIT_DONE);
+ assert(!STM_SEGMENT->no_safe_point_here);
+ dprintf(("left safe point\n"));
}
}
static void synchronize_all_threads(enum sync_type_e sync_type)
{
+ restart:
+ assert(_has_mutex());
enter_safe_point_if_requested();
/* Only one thread should reach this point concurrently. This is
@@ -353,8 +425,19 @@
/* If some other threads are SP_RUNNING, we cannot proceed now.
Wait until all other threads are suspended. */
while (count_other_threads_sp_running() > 0) {
+
+ intptr_t detached = fetch_detached_transaction();
+ if (detached != 0) {
+ remove_requests_for_safe_point(); /* => C_REQUEST_REMOVED */
+ s_mutex_unlock();
+ commit_fetched_detached_transaction(detached);
+ s_mutex_lock();
+ goto restart;
+ }
+
STM_PSEGMENT->safe_point = SP_WAIT_FOR_C_AT_SAFE_POINT;
- cond_wait(C_AT_SAFE_POINT);
+ cond_wait_timeout(C_AT_SAFE_POINT, 0.00001);
+ /* every 10 microsec, try again fetch_detached_transaction() */
STM_PSEGMENT->safe_point = SP_RUNNING;
if (must_abort()) {
diff --git a/rpython/translator/stm/src_stm/stm/sync.h b/rpython/translator/stm/src_stm/stm/sync.h
--- a/rpython/translator/stm/src_stm/stm/sync.h
+++ b/rpython/translator/stm/src_stm/stm/sync.h
@@ -17,6 +17,7 @@
static bool _has_mutex(void);
#endif
static void set_gs_register(char *value);
+static void ensure_gs_register(long segnum);
/* acquire and release one of the segments for running the given thread
diff --git a/rpython/translator/stm/src_stm/stmgc.c b/rpython/translator/stm/src_stm/stmgc.c
--- a/rpython/translator/stm/src_stm/stmgc.c
+++ b/rpython/translator/stm/src_stm/stmgc.c
@@ -19,6 +19,7 @@
#include "stm/rewind_setjmp.h"
#include "stm/finalizer.h"
#include "stm/locks.h"
+#include "stm/detach.h"
#include "stm/misc.c"
#include "stm/list.c"
#include "stm/smallmalloc.c"
@@ -41,3 +42,4 @@
#include "stm/rewind_setjmp.c"
#include "stm/finalizer.c"
#include "stm/hashtable.c"
+#include "stm/detach.c"
diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h
--- a/rpython/translator/stm/src_stm/stmgc.h
+++ b/rpython/translator/stm/src_stm/stmgc.h
@@ -13,6 +13,7 @@
#include <limits.h>
#include <unistd.h>
+#include "stm/atomic.h"
#include "stm/rewind_setjmp.h"
#if LONG_MAX == 2147483647
@@ -39,6 +40,7 @@
struct stm_segment_info_s {
uint8_t transaction_read_version;
+ uint8_t no_safe_point_here; /* set from outside, triggers an assert */
int segment_num;
char *segment_base;
stm_char *nursery_current;
@@ -69,8 +71,7 @@
(this field is not modified on a successful commit) */
long last_abort__bytes_in_nursery;
/* the next fields are handled internally by the library */
- int associated_segment_num;
- int last_associated_segment_num;
+ int last_associated_segment_num; /* always a valid seg num */
int thread_local_counter;
struct stm_thread_local_s *prev, *next;
void *creating_pthread[2];
@@ -83,6 +84,17 @@
void _stm_write_slowpath_card(object_t *, uintptr_t);
object_t *_stm_allocate_slowpath(ssize_t);
object_t *_stm_allocate_external(ssize_t);
+
+extern volatile intptr_t _stm_detached_inevitable_from_thread;
+long _stm_start_transaction(stm_thread_local_t *tl);
+void _stm_commit_transaction(void);
+void _stm_leave_noninevitable_transactional_zone(void);
+#define _stm_detach_inevitable_transaction(tl) do { \
+ write_fence(); \
+ assert(_stm_detached_inevitable_from_thread == 0); \
+ _stm_detached_inevitable_from_thread = (intptr_t)(tl); \
+} while (0)
+void _stm_reattach_transaction(stm_thread_local_t *tl);
void _stm_become_inevitable(const char*);
void _stm_collectable_safe_point(void);
@@ -380,23 +392,6 @@
rewind_jmp_enum_shadowstack(&(tl)->rjthread, callback)
-/* Starting and ending transactions. stm_read(), stm_write() and
- stm_allocate() should only be called from within a transaction.
- The stm_start_transaction() call returns the number of times it
- returned, starting at 0. If it is > 0, then the transaction was
- aborted and restarted this number of times. */
-long stm_start_transaction(stm_thread_local_t *tl);
-void stm_start_inevitable_transaction(stm_thread_local_t *tl);
-void stm_commit_transaction(void);
-
-/* Temporary fix? Call this outside a transaction. If there is an
- inevitable transaction running somewhere else, wait until it finishes. */
-void stm_wait_for_current_inevitable_transaction(void);
-
-/* Abort the currently running transaction. This function never
- returns: it jumps back to the stm_start_transaction(). */
-void stm_abort_transaction(void) __attribute__((noreturn));
-
#ifdef STM_NO_AUTOMATIC_SETJMP
int stm_is_inevitable(void);
#else
@@ -405,6 +400,73 @@
}
#endif
+
+/* Entering and leaving a "transactional code zone": a (typically very
+ large) section in the code where we are running a transaction.
+ This is the STM equivalent to "acquire the GIL" and "release the
+ GIL", respectively. stm_read(), stm_write(), stm_allocate(), and
+ other functions should only be called from within a transaction.
+
+ Note that transactions, in the STM sense, cover _at least_ one
+ transactional code zone. They may be longer; for example, if one
+ thread does a lot of stm_enter_transactional_zone() +
+ stm_become_inevitable() + stm_leave_transactional_zone(), as is
+ typical in a thread that does a lot of C function calls, then we
+ get only a few bigger inevitable transactions that cover the many
+ short transactional zones. This is done by having
+ stm_leave_transactional_zone() turn the current transaction
+ inevitable and detach it from the running thread (if there is no
+ other inevitable transaction running so far). Then
+ stm_enter_transactional_zone() will try to reattach to it. This is
+ far more efficient than constantly starting and committing
+ transactions.
+
+ stm_enter_transactional_zone() and stm_leave_transactional_zone()
+ preserve the value of errno.
+*/
+#ifdef STM_DEBUGPRINT
+#include <stdio.h>
+#endif
+static inline void stm_enter_transactional_zone(stm_thread_local_t *tl) {
+ if (__sync_bool_compare_and_swap(&_stm_detached_inevitable_from_thread,
+ (intptr_t)tl, 0)) {
+#ifdef STM_DEBUGPRINT
+ fprintf(stderr, "stm_enter_transactional_zone fast path\n");
+#endif
+ }
+ else {
+ _stm_reattach_transaction(tl);
+ /* _stm_detached_inevitable_from_thread should be 0 here, but
+ it can already have been changed from a parallel thread
+ (assuming we're not inevitable ourselves) */
+ }
+}
+static inline void stm_leave_transactional_zone(stm_thread_local_t *tl) {
+ assert(STM_SEGMENT->running_thread == tl);
+ if (stm_is_inevitable()) {
+#ifdef STM_DEBUGPRINT
+ fprintf(stderr, "stm_leave_transactional_zone fast path\n");
+#endif
+ _stm_detach_inevitable_transaction(tl);
+ }
+ else {
+ _stm_leave_noninevitable_transactional_zone();
+ }
+}
+
+/* stm_force_transaction_break() is in theory equivalent to
+ stm_leave_transactional_zone() immediately followed by
+ stm_enter_transactional_zone(); however, it is supposed to be
+ called in CPU-heavy threads that had a transaction run for a while,
+ and so it *always* forces a commit and starts the next transaction.
+ The new transaction is never inevitable. */
+void stm_force_transaction_break(stm_thread_local_t *tl);
+
+/* Abort the currently running transaction. This function never
+ returns: it jumps back to the start of the transaction (which must
+ not be inevitable). */
+void stm_abort_transaction(void) __attribute__((noreturn));
+
/* Turn the current transaction inevitable.
stm_become_inevitable() itself may still abort the transaction instead
of returning. */
@@ -413,6 +475,8 @@
assert(STM_SEGMENT->running_thread == tl);
if (!stm_is_inevitable())
_stm_become_inevitable(msg);
+ /* now, we're running the inevitable transaction, so this var should be 0 */
+ assert(_stm_detached_inevitable_from_thread == 0);
}
/* Forces a safe-point if needed. Normally not needed: this is
@@ -467,8 +531,8 @@
other threads. A very heavy-handed way to make sure that no other
transaction is running concurrently. Avoid as much as possible.
Other transactions will continue running only after this transaction
- commits. (xxx deprecated and may be removed) */
-void stm_become_globally_unique_transaction(stm_thread_local_t *tl, const char *msg);
+ commits. (deprecated, not working any more according to demo_random2) */
+//void stm_become_globally_unique_transaction(stm_thread_local_t *tl, const char *msg);
/* Moves the transaction forward in time by validating the read and
write set with all commits that happened since the last validation
More information about the pypy-commit
mailing list