[pypy-commit] stmgc c7: WIP: allow for arbitrary number of pthreads using fixed
Remi Meier
noreply at buildbot.pypy.org
Fri Jan 31 13:28:51 CET 2014
Author: Remi Meier
Branch: c7
Changeset: r695:59a2204a4c1b
Date: 2014-01-31 13:29 +0100
http://bitbucket.org/pypy/stmgc/changeset/59a2204a4c1b/
Log: WIP: allow for arbitrary number of pthreads using fixed number of
thread segments. first test with duhton works
diff --git a/c7/core.c b/c7/core.c
--- a/c7/core.c
+++ b/c7/core.c
@@ -5,9 +5,7 @@
#include <string.h>
#include <unistd.h>
#include <sys/mman.h>
-#include <sys/syscall.h>
-#include <asm/prctl.h>
-#include <sys/prctl.h>
+
#include <pthread.h>
#include "core.h"
@@ -165,8 +163,27 @@
}
}
+void _stm_setup_static_thread(void)
+{
+ int thread_num = __sync_fetch_and_add(&num_threads_started, 1);
+ assert(thread_num < 2); /* only 2 threads for now */
+ _stm_restore_local_state(thread_num);
+ _STM_TL->nursery_current = (localchar_t*)(FIRST_NURSERY_PAGE * 4096);
+ memset((void*)real_address((object_t*)_STM_TL->nursery_current), 0x0,
+ (FIRST_AFTER_NURSERY_PAGE - FIRST_NURSERY_PAGE) * 4096); /* clear nursery */
+
+ _STM_TL->shadow_stack = NULL;
+ _STM_TL->shadow_stack_base = NULL;
+
+ _STM_TL->old_objects_to_trace = stm_list_create();
+
+ _STM_TL->modified_objects = stm_list_create();
+ _STM_TL->uncommitted_objects = stm_list_create();
+ assert(!_STM_TL->active);
+ _stm_assert_clean_tl();
+}
void stm_setup(void)
{
@@ -244,44 +261,19 @@
char *heap = REAL_ADDRESS(get_thread_base(0), first_heap * 4096UL);
assert(memset(heap, 0xcd, HEAP_PAGES * 4096)); // testing
stm_largemalloc_init(heap, HEAP_PAGES * 4096UL);
+
+ for (i = 0; i < NB_THREADS; i++) {
+ _stm_setup_static_thread();
+ }
}
-#define INVALID_GS_VALUE 0x6D6D6D6D
-static void set_gs_register(uint64_t value)
+
+void _stm_teardown_static_thread(int thread_num)
{
- int result = syscall(SYS_arch_prctl, ARCH_SET_GS, value);
- assert(result == 0);
-}
-
-void stm_setup_thread(void)
-{
- int thread_num = __sync_fetch_and_add(&num_threads_started, 1);
- assert(thread_num < 2); /* only 2 threads for now */
-
_stm_restore_local_state(thread_num);
-
- _STM_TL->nursery_current = (localchar_t*)(FIRST_NURSERY_PAGE * 4096);
- memset((void*)real_address((object_t*)_STM_TL->nursery_current), 0x0,
- (FIRST_AFTER_NURSERY_PAGE - FIRST_NURSERY_PAGE) * 4096); /* clear nursery */
- _STM_TL->shadow_stack = (object_t**)malloc(LENGTH_SHADOW_STACK * sizeof(void*));
- _STM_TL->shadow_stack_base = _STM_TL->shadow_stack;
-
- _STM_TL->old_objects_to_trace = stm_list_create();
-
- _STM_TL->modified_objects = stm_list_create();
- _STM_TL->uncommitted_objects = stm_list_create();
- assert(!_STM_TL->active);
-}
-
-bool _stm_is_in_transaction(void)
-{
- return _STM_TL->active;
-}
-
-void _stm_teardown_thread(void)
-{
+ _stm_assert_clean_tl();
_stm_reset_shared_lock();
stm_list_free(_STM_TL->modified_objects);
@@ -295,12 +287,16 @@
assert(_STM_TL->old_objects_to_trace->count == 0);
stm_list_free(_STM_TL->old_objects_to_trace);
-
- set_gs_register(INVALID_GS_VALUE);
+
+ _stm_restore_local_state(-1); // invalid
}
-void _stm_teardown(void)
+void stm_teardown(void)
{
+ for (; num_threads_started > 0; num_threads_started--) {
+ _stm_teardown_static_thread(num_threads_started - 1);
+ }
+
assert(inevitable_lock == 0);
munmap(object_pages, TOTAL_MEMORY);
_stm_reset_pages();
@@ -308,14 +304,7 @@
object_pages = NULL;
}
-void _stm_restore_local_state(int thread_num)
-{
- char *thread_base = get_thread_base(thread_num);
- set_gs_register((uintptr_t)thread_base);
- assert(_STM_TL->thread_num == thread_num);
- assert(_STM_TL->thread_base == thread_base);
-}
static void reset_transaction_read_version(void)
{
@@ -378,10 +367,11 @@
void stm_start_transaction(jmpbufptr_t *jmpbufptr)
{
+ /* GS invalid before this point! */
+ _stm_stop_safe_point(LOCK_COLLECT|THREAD_YIELD);
+
assert(!_STM_TL->active);
- _stm_stop_safe_point(LOCK_COLLECT);
-
uint8_t old_rv = _STM_TL->transaction_read_version;
_STM_TL->transaction_read_version = old_rv + 1;
if (UNLIKELY(old_rv == 0xff))
@@ -442,8 +432,11 @@
_STM_TL->active = 0;
- _stm_start_safe_point(LOCK_EXCLUSIVE|LOCK_COLLECT);
+
fprintf(stderr, "%c", 'C'+_STM_TL->thread_num*32);
+
+ _stm_start_safe_point(LOCK_EXCLUSIVE|LOCK_COLLECT|THREAD_YIELD);
+ /* GS invalid after this point! */
}
@@ -495,15 +488,17 @@
assert(_STM_TL->jmpbufptr != NULL);
assert(_STM_TL->jmpbufptr != (jmpbufptr_t *)-1); /* for tests only */
_STM_TL->active = 0;
- /* _STM_TL->need_abort = 0; */
-
- _stm_start_safe_point(LOCK_COLLECT);
-
- fprintf(stderr, "%c", 'A'+_STM_TL->thread_num*32);
+ _STM_TL->need_abort = 0;
/* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */
reset_modified_from_other_threads();
stm_list_clear(_STM_TL->modified_objects);
- __builtin_longjmp(*_STM_TL->jmpbufptr, 1);
+ jmpbufptr_t *buf = _STM_TL->jmpbufptr; /* _STM_TL not valid during safe-point */
+ fprintf(stderr, "%c", 'A'+_STM_TL->thread_num*32);
+
+ _stm_start_safe_point(LOCK_COLLECT|THREAD_YIELD);
+ /* GS invalid after this point! */
+
+ __builtin_longjmp(*buf, 1);
}
diff --git a/c7/core.h b/c7/core.h
--- a/c7/core.h
+++ b/c7/core.h
@@ -93,21 +93,25 @@
struct _thread_local1_s {
jmpbufptr_t *jmpbufptr;
uint8_t transaction_read_version;
+
+ /* static threads, not pthreads */
+ int thread_num;
+ char *thread_base;
- int thread_num;
uint8_t active; /* 1 normal, 2 inevitable, 0 no trans. */
bool need_abort;
- char *thread_base;
- struct stm_list_s *modified_objects;
-
+
object_t **old_shadow_stack;
object_t **shadow_stack;
object_t **shadow_stack_base;
+ localchar_t *nursery_current;
+
+ struct stm_list_s *modified_objects;
+
struct alloc_for_size_s alloc[LARGE_OBJECT_WORDS];
struct stm_list_s *uncommitted_objects;
- localchar_t *nursery_current;
struct stm_list_s *old_objects_to_trace;
};
#define _STM_TL ((_thread_local1_t *)4352)
@@ -126,6 +130,7 @@
#define LIKELY(x) __builtin_expect(x, true)
#define UNLIKELY(x) __builtin_expect(x, false)
+#define IMPLY(a, b) (!(a) || (b))
#define REAL_ADDRESS(object_pages, src) ((object_pages) + (uintptr_t)(src))
@@ -177,6 +182,7 @@
}
+
/* ==================== API ==================== */
static inline void stm_read(object_t *obj)
@@ -206,16 +212,17 @@
extern void stmcb_trace(struct object_s *, void (object_t **));
void _stm_restore_local_state(int thread_num);
-void _stm_teardown(void);
-void _stm_teardown_thread(void);
+void stm_teardown(void);
bool _stm_is_in_transaction(void);
+void _stm_assert_clean_tl(void);
bool _stm_was_read(object_t *obj);
bool _stm_was_written(object_t *obj);
object_t *stm_allocate(size_t size);
void stm_setup(void);
-void stm_setup_thread(void);
+void stm_setup_pthread(void);
+
void stm_start_transaction(jmpbufptr_t *jmpbufptr);
void stm_stop_transaction(void);
diff --git a/c7/list.h b/c7/list.h
--- a/c7/list.h
+++ b/c7/list.h
@@ -2,7 +2,7 @@
#define _STM_LIST_H
#include "core.h"
-
+#include <stdlib.h>
struct stm_list_s {
uintptr_t count;
diff --git a/c7/nursery.c b/c7/nursery.c
--- a/c7/nursery.c
+++ b/c7/nursery.c
@@ -41,7 +41,8 @@
object_t *stm_allocate_prebuilt(size_t size)
{
- return _stm_allocate_old(size); /* XXX */
+ object_t* res = _stm_allocate_old(size); /* XXX */
+ return res;
}
localchar_t *_stm_alloc_next_page(size_t size_class)
diff --git a/c7/stmsync.c b/c7/stmsync.c
--- a/c7/stmsync.c
+++ b/c7/stmsync.c
@@ -1,12 +1,18 @@
#include <assert.h>
#include <string.h>
#include <unistd.h>
-
+#include <stdio.h>
+#include <sys/syscall.h>
+#include <sys/prctl.h>
+#include <asm/prctl.h>
+#include <semaphore.h>
#include "stmsync.h"
#include "core.h"
#include "reader_writer_lock.h"
+#include "list.h"
+#define INVALID_GS_VALUE 0x6D6D6D6D
/* a multi-reader, single-writer lock: transactions normally take a reader
lock, so don't conflict with each other; when we need to do a global GC,
@@ -15,6 +21,141 @@
rwticket rw_shared_lock; /* the "GIL" */
rwticket rw_collection_lock; /* for major collections */
+sem_t static_thread_semaphore;
+uint8_t static_threads[NB_THREADS]; /* 1 if running a pthread */
+__thread struct _thread_local1_s *pthread_tl = NULL;
+
+
+
+
+void _stm_acquire_tl_segment();
+void _stm_release_tl_segment();
+
+static void set_gs_register(uint64_t value)
+{
+ int result = syscall(SYS_arch_prctl, ARCH_SET_GS, value);
+ assert(result == 0);
+}
+
+bool _stm_is_in_transaction(void)
+{
+ return pthread_tl->active;
+}
+
+
+void _stm_restore_local_state(int thread_num)
+{
+ if (thread_num == -1) { /* mostly for debugging */
+ set_gs_register(INVALID_GS_VALUE);
+ return;
+ }
+
+ char *thread_base = get_thread_base(thread_num);
+ set_gs_register((uintptr_t)thread_base);
+
+ assert(_STM_TL->thread_num == thread_num);
+ assert(_STM_TL->thread_base == thread_base);
+}
+
+
+void _stm_yield_thread_segment()
+{
+ _stm_release_tl_segment();
+
+ /* release our static thread: */
+ static_threads[_STM_TL->thread_num] = 0;
+ sem_post(&static_thread_semaphore);
+
+ _stm_restore_local_state(-1); /* invalid */
+}
+
+void _stm_grab_thread_segment()
+{
+ /* acquire a static thread: */
+ sem_wait(&static_thread_semaphore);
+ int thread_num = 0;
+ while (1) {
+ if (!__sync_lock_test_and_set(&static_threads[thread_num], 1))
+ break;
+ thread_num = (thread_num + 1) % NB_THREADS;
+ }
+
+ _stm_restore_local_state(thread_num);
+ _stm_acquire_tl_segment();
+}
+
+
+void _stm_assert_clean_tl()
+{
+ /* between a pthread switch, these are the things
+ that must be guaranteed */
+
+ /* already set are
+ thread_num, thread_base: to the current static thread
+ nursery_current: nursery should be cleared
+ active, need_abort: no transaction running
+ modified_objects: empty
+ alloc: re-usable by this thread
+ uncommitted_objects: empty
+ old_objects_to_trace: empty
+ !!shadow_stack...: still belongs to previous thread
+ */
+ assert(stm_list_is_empty(_STM_TL->modified_objects));
+ assert(stm_list_is_empty(_STM_TL->uncommitted_objects));
+ assert(stm_list_is_empty(_STM_TL->old_objects_to_trace));
+
+ assert(!_STM_TL->active);
+ /* assert(!_STM_TL->need_abort); may happen, but will be cleared by
+ start_transaction() */
+ assert(_STM_TL->nursery_current == (localchar_t*)(FIRST_NURSERY_PAGE * 4096));
+}
+
+void _stm_acquire_tl_segment()
+{
+ /* makes tl-segment ours! */
+ _stm_assert_clean_tl();
+
+ _STM_TL->shadow_stack = pthread_tl->shadow_stack;
+ _STM_TL->shadow_stack_base = pthread_tl->shadow_stack_base;
+ _STM_TL->old_shadow_stack = pthread_tl->old_shadow_stack;
+}
+
+void _stm_release_tl_segment()
+{
+ /* makes tl-segment ours! */
+ _stm_assert_clean_tl();
+
+ pthread_tl->shadow_stack = _STM_TL->shadow_stack;
+ pthread_tl->shadow_stack_base = _STM_TL->shadow_stack_base;
+ pthread_tl->old_shadow_stack = _STM_TL->old_shadow_stack;
+}
+
+void stm_setup_pthread(void)
+{
+ struct _thread_local1_s* tl = malloc(sizeof(struct _thread_local1_s));
+ assert(!pthread_tl);
+ pthread_tl = tl;
+
+ /* get us a clean thread segment */
+ _stm_grab_thread_segment();
+ _stm_assert_clean_tl();
+
+ /* allocate shadow stack for this thread */
+ _STM_TL->shadow_stack = (object_t**)malloc(LENGTH_SHADOW_STACK * sizeof(void*));
+ _STM_TL->shadow_stack_base = _STM_TL->shadow_stack;
+
+ /* copy everything from _STM_TL */
+ memcpy(tl, REAL_ADDRESS(get_thread_base(_STM_TL->thread_num), _STM_TL),
+ sizeof(struct _thread_local1_s));
+
+ /* go into safe-point again: */
+ _stm_yield_thread_segment();
+}
+
+
+
+
+
void _stm_reset_shared_lock()
{
@@ -27,32 +168,40 @@
assert(!rwticket_wrunlock(&rw_collection_lock));
memset(&rw_collection_lock, 0, sizeof(rwticket));
+
+ int i;
+ for (i = 0; i < NB_THREADS; i++)
+ assert(static_threads[i] == 0);
+ memset(static_threads, 0, sizeof(static_threads));
+ sem_init(&static_thread_semaphore, 0, NB_THREADS);
+ sem_getvalue(&static_thread_semaphore, &i);
+ assert(i == NB_THREADS);
}
-void stm_acquire_collection_lock()
-{
- /* we must have the exclusive lock here and
- not the colletion lock!! */
- /* XXX: for more than 2 threads, need a way
- to signal other threads with need_major_collect
- so that they don't leave COLLECT-safe-points
- when this flag is set. Otherwise we simply
- wait arbitrarily long until all threads reach
- COLLECT-safe-points by chance at the same time. */
- while (1) {
- if (!rwticket_wrtrylock(&rw_collection_lock))
- break; /* acquired! */
+/* void stm_acquire_collection_lock() */
+/* { */
+/* /\* we must have the exclusive lock here and */
+/* not the colletion lock!! *\/ */
+/* /\* XXX: for more than 2 threads, need a way */
+/* to signal other threads with need_major_collect */
+/* so that they don't leave COLLECT-safe-points */
+/* when this flag is set. Otherwise we simply */
+/* wait arbitrarily long until all threads reach */
+/* COLLECT-safe-points by chance at the same time. *\/ */
+/* while (1) { */
+/* if (!rwticket_wrtrylock(&rw_collection_lock)) */
+/* break; /\* acquired! *\/ */
- stm_stop_exclusive_lock();
- usleep(1);
- stm_start_exclusive_lock();
- if (_STM_TL->need_abort) {
- stm_stop_exclusive_lock();
- stm_start_shared_lock();
- stm_abort_transaction();
- }
- }
-}
+/* stm_stop_exclusive_lock(); */
+/* usleep(1); */
+/* stm_start_exclusive_lock(); */
+/* if (_STM_TL->need_abort) { */
+/* stm_stop_exclusive_lock(); */
+/* stm_start_shared_lock(); */
+/* stm_abort_transaction(); */
+/* } */
+/* } */
+/* } */
void stm_start_shared_lock(void)
{
@@ -75,30 +224,51 @@
}
/* _stm_start_safe_point(LOCK_EXCLUSIVE|LOCK_COLLECT)
- -> release the exclusive lock and also the collect-read-lock */
+ -> release the exclusive lock and also the collect-read-lock
+
+ THREAD_YIELD: gives up its (current thread's) GS segment
+ so that other threads can grab it and run. This will
+ make _STM_TL and all thread-local addresses unusable
+ for the current thread. (requires LOCK_COLLECT)
+*/
void _stm_start_safe_point(uint8_t flags)
{
+ assert(IMPLY(flags & THREAD_YIELD, flags & LOCK_COLLECT));
+
if (flags & LOCK_EXCLUSIVE)
stm_stop_exclusive_lock();
else
stm_stop_shared_lock();
- if (flags & LOCK_COLLECT)
+ if (flags & LOCK_COLLECT) {
rwticket_rdunlock(&rw_collection_lock);
+
+ if (flags & THREAD_YIELD) {
+ _stm_yield_thread_segment();
+ }
+ }
}
/*
_stm_stop_safe_point(LOCK_COLLECT|LOCK_EXCLUSIVE);
-> reacquire the collect-read-lock and the exclusive lock
+
+ THREAD_YIELD: wait until we get a GS segment assigned
+ and then continue (requires LOCK_COLLECT)
*/
void _stm_stop_safe_point(uint8_t flags)
{
+ assert(IMPLY(flags & THREAD_YIELD, flags & LOCK_COLLECT));
+ if (flags & THREAD_YIELD) {
+ _stm_grab_thread_segment();
+ }
+
if (flags & LOCK_EXCLUSIVE)
stm_start_exclusive_lock();
else
stm_start_shared_lock();
- if (!(flags & LOCK_COLLECT)) { /* if we released the collection lock */
+ if (flags & LOCK_COLLECT) { /* if we released the collection lock */
/* acquire read-collection. always succeeds because
if there was a write-collection holder we would
also not have gotten the shared_lock */
@@ -110,12 +280,8 @@
/* restore to shared-mode with the collection lock */
stm_stop_exclusive_lock();
stm_start_shared_lock();
- if (flags & LOCK_COLLECT)
- rwticket_rdlock(&rw_collection_lock);
stm_abort_transaction();
} else {
- if (flags & LOCK_COLLECT)
- rwticket_rdlock(&rw_collection_lock);
stm_abort_transaction();
}
}
diff --git a/c7/stmsync.h b/c7/stmsync.h
--- a/c7/stmsync.h
+++ b/c7/stmsync.h
@@ -8,9 +8,12 @@
void _stm_start_safe_point(uint8_t flags);
void _stm_stop_safe_point(uint8_t flags);
void _stm_reset_shared_lock(void);
+void _stm_grab_thread_segment(void);
+void _stm_yield_thread_segment(void);
enum {
LOCK_COLLECT = (1 << 0),
LOCK_EXCLUSIVE = (1 << 1),
+ THREAD_YIELD = (1 << 2),
};
diff --git a/c7/test/support.py b/c7/test/support.py
--- a/c7/test/support.py
+++ b/c7/test/support.py
@@ -151,6 +151,7 @@
bool _checked_stm_become_inevitable() {
jmpbufptr_t here;
+ int tn = _STM_TL->thread_num;
if (__builtin_setjmp(here) == 0) { // returned directly
assert(_STM_TL->jmpbufptr == (jmpbufptr_t*)-1);
_STM_TL->jmpbufptr = &here;
@@ -158,12 +159,13 @@
_STM_TL->jmpbufptr = (jmpbufptr_t*)-1;
return 0;
}
- _STM_TL->jmpbufptr = (jmpbufptr_t*)-1;
+ _stm_dbg_get_tl(tn)->jmpbufptr = (jmpbufptr_t*)-1;
return 1;
}
bool _checked_stm_write(object_t *object) {
jmpbufptr_t here;
+ int tn = _STM_TL->thread_num;
if (__builtin_setjmp(here) == 0) { // returned directly
assert(_STM_TL->jmpbufptr == (jmpbufptr_t*)-1);
_STM_TL->jmpbufptr = &here;
@@ -171,25 +173,27 @@
_STM_TL->jmpbufptr = (jmpbufptr_t*)-1;
return 0;
}
- _STM_TL->jmpbufptr = (jmpbufptr_t*)-1;
+ _stm_dbg_get_tl(tn)->jmpbufptr = (jmpbufptr_t*)-1;
return 1;
}
bool _stm_stop_transaction(void) {
jmpbufptr_t here;
+ int tn = _STM_TL->thread_num;
if (__builtin_setjmp(here) == 0) { // returned directly
assert(_STM_TL->jmpbufptr == (jmpbufptr_t*)-1);
_STM_TL->jmpbufptr = &here;
stm_stop_transaction();
- _STM_TL->jmpbufptr = (jmpbufptr_t*)-1;
+ _stm_dbg_get_tl(tn)->jmpbufptr = (jmpbufptr_t*)-1;
return 0;
}
- _STM_TL->jmpbufptr = (jmpbufptr_t*)-1;
+ _stm_dbg_get_tl(tn)->jmpbufptr = (jmpbufptr_t*)-1;
return 1;
}
bool _stm_check_stop_safe_point(void) {
jmpbufptr_t here;
+ int tn = _STM_TL->thread_num;
if (__builtin_setjmp(here) == 0) { // returned directly
assert(_STM_TL->jmpbufptr == (jmpbufptr_t*)-1);
_STM_TL->jmpbufptr = &here;
@@ -197,20 +201,21 @@
_STM_TL->jmpbufptr = (jmpbufptr_t*)-1;
return 0;
}
- _STM_TL->jmpbufptr = (jmpbufptr_t*)-1;
+ _stm_dbg_get_tl(tn)->jmpbufptr = (jmpbufptr_t*)-1;
return 1;
}
bool _stm_check_abort_transaction(void) {
jmpbufptr_t here;
+ int tn = _STM_TL->thread_num;
if (__builtin_setjmp(here) == 0) { // returned directly
assert(_STM_TL->jmpbufptr == (jmpbufptr_t*)-1);
_STM_TL->jmpbufptr = &here;
stm_abort_transaction();
- _STM_TL->jmpbufptr = (jmpbufptr_t*)-1;
+ _stm_dbg_get_tl(tn)->jmpbufptr = (jmpbufptr_t*)-1;
return 0;
}
- _STM_TL->jmpbufptr = (jmpbufptr_t*)-1;
+ _stm_dbg_get_tl(tn)->jmpbufptr = (jmpbufptr_t*)-1;
return 1;
}
diff --git a/duhton/duhton.c b/duhton/duhton.c
--- a/duhton/duhton.c
+++ b/duhton/duhton.c
@@ -43,23 +43,22 @@
}
stm_start_inevitable_transaction();
DuObject *code = Du_Compile(filename, interactive);
- _du_save1(code);
- stm_stop_transaction();
- _du_restore1(code);
+
if (code == NULL) {
printf("\n");
break;
}
- /*Du_Print(code, 1);
- printf("\n");*/
- stm_start_inevitable_transaction();
+
DuObject *res = Du_Eval(code, Du_Globals);
if (interactive) {
Du_Print(res, 1);
}
+
_du_save1(stm_thread_local_obj);
+ _stm_minor_collect(); /* hack... */
+ _du_restore1(stm_thread_local_obj);
+
stm_stop_transaction();
- _du_restore1(stm_thread_local_obj);
Du_TransactionRun();
if (!interactive)
diff --git a/duhton/glob.c b/duhton/glob.c
--- a/duhton/glob.c
+++ b/duhton/glob.c
@@ -686,8 +686,10 @@
Du_FatalError("run-transactions: expected no argument");
_du_save1(stm_thread_local_obj);
+ _stm_minor_collect(); /* hack... */
+ _du_restore1(stm_thread_local_obj);
+
stm_stop_transaction();
- _du_restore1(stm_thread_local_obj);
Du_TransactionRun();
@@ -771,9 +773,9 @@
assert(num_threads == 2);
stm_setup();
- stm_setup_thread();
- stm_setup_thread();
- _stm_restore_local_state(0);
+ stm_setup_pthread();
+
+ stm_start_inevitable_transaction();
init_prebuilt_object_objects();
init_prebuilt_symbol_objects();
@@ -784,7 +786,6 @@
all_threads_count = num_threads;
all_threads = (pthread_t*)malloc(sizeof(pthread_t) * num_threads);
- stm_start_inevitable_transaction();
DuFrame_SetBuiltinMacro(Du_Globals, "progn", Du_Progn);
DuFrame_SetBuiltinMacro(Du_Globals, "setq", du_setq);
DuFrame_SetBuiltinMacro(Du_Globals, "print", du_print);
@@ -833,11 +834,5 @@
void Du_Finalize(void)
{
- _stm_restore_local_state(1);
- _stm_teardown_thread();
-
- _stm_restore_local_state(0);
- _stm_teardown_thread();
-
- _stm_teardown();
+ stm_teardown();
}
diff --git a/duhton/transaction.c b/duhton/transaction.c
--- a/duhton/transaction.c
+++ b/duhton/transaction.c
@@ -62,9 +62,11 @@
return;
stm_start_inevitable_transaction();
+
DuConsObject *root = du_pending_transactions;
_du_write1(root);
root->cdr = stm_thread_local_obj;
+
stm_stop_transaction();
stm_thread_local_obj = NULL;
@@ -173,8 +175,8 @@
void *run_thread(void *thread_id)
{
jmpbufptr_t here;
- int thread_num = (uintptr_t)thread_id;
- _stm_restore_local_state(thread_num);
+ stm_setup_pthread();
+
stm_thread_local_obj = NULL;
while (1) {
@@ -185,10 +187,14 @@
while (__builtin_setjmp(here) == 1) { }
stm_start_transaction(&here);
+
run_transaction(cell);
+
_du_save1(stm_thread_local_obj);
+ _stm_minor_collect(); /* hack.. */
+ _du_restore1(stm_thread_local_obj);
+
stm_stop_transaction();
- _du_restore1(stm_thread_local_obj);
}
More information about the pypy-commit
mailing list