[pypy-commit] pypy stmgc-c4: import weakref branch
Raemi
noreply at buildbot.pypy.org
Mon Jul 22 17:56:40 CEST 2013
Author: Remi Meier <remi.meier at gmail.com>
Branch: stmgc-c4
Changeset: r65525:a4260ea734e1
Date: 2013-07-22 16:42 +0200
http://bitbucket.org/pypy/pypy/changeset/a4260ea734e1/
Log: import weakref branch
diff --git a/rpython/translator/stm/src_stm/et.c b/rpython/translator/stm/src_stm/et.c
--- a/rpython/translator/stm/src_stm/et.c
+++ b/rpython/translator/stm/src_stm/et.c
@@ -7,6 +7,29 @@
*/
#include "stmimpl.h"
+#ifdef _GC_DEBUG
+char tmp_buf[128];
+char* stm_dbg_get_hdr_str(gcptr obj)
+{
+ char *cur;
+ char *flags[] = GC_FLAG_NAMES;
+ int i;
+
+ i = 0;
+ cur = tmp_buf;
+ cur += sprintf(cur, "%p:", obj);
+ while (flags[i]) {
+ if (obj->h_tid & (STM_FIRST_GCFLAG << i)) {
+ cur += sprintf(cur, "%s|", flags[i]);
+ }
+ i++;
+ }
+ cur += sprintf(cur, "tid=%ld", stm_get_tid(obj));
+ return tmp_buf;
+}
+#endif
+
+
__thread struct tx_descriptor *thread_descriptor = NULL;
@@ -546,6 +569,7 @@
gcptr stm_WriteBarrier(gcptr P)
{
+ assert(!(P->h_tid & GCFLAG_IMMUTABLE));
if (is_private(P))
{
/* If we have GCFLAG_WRITE_BARRIER in P, then list it into
@@ -1092,7 +1116,7 @@
#endif
L->h_revision = new_revision;
- gcptr stub = stm_stub_malloc(d->public_descriptor);
+ gcptr stub = stm_stub_malloc(d->public_descriptor, 0);
stub->h_tid = (L->h_tid & STM_USER_TID_MASK) | GCFLAG_PUBLIC
| GCFLAG_STUB
| GCFLAG_OLD;
diff --git a/rpython/translator/stm/src_stm/et.h b/rpython/translator/stm/src_stm/et.h
--- a/rpython/translator/stm/src_stm/et.h
+++ b/rpython/translator/stm/src_stm/et.h
@@ -73,6 +73,8 @@
static const revision_t GCFLAG_STUB /*debug*/ = STM_FIRST_GCFLAG << 8;
static const revision_t GCFLAG_PRIVATE_FROM_PROTECTED = STM_FIRST_GCFLAG << 9;
static const revision_t GCFLAG_HAS_ID = STM_FIRST_GCFLAG << 10;
+static const revision_t GCFLAG_IMMUTABLE = STM_FIRST_GCFLAG << 11;
+
/* this value must be reflected in PREBUILT_FLAGS in stmgc.h */
#define GCFLAG_PREBUILT (GCFLAG_VISITED | \
@@ -90,6 +92,8 @@
"BACKUP_COPY", \
"STUB", \
"PRIVATE_FROM_PROTECTED", \
+ "HAS_ID", \
+ "IMMUTABLE", \
NULL }
#define IS_POINTER(v) (!((v) & 1)) /* even-valued number */
@@ -197,4 +201,7 @@
void DescriptorInit(void);
void DescriptorDone(void);
+#ifdef _GC_DEBUG
+char* stm_dbg_get_hdr_str(gcptr obj);
+#endif
#endif /* _ET_H */
diff --git a/rpython/translator/stm/src_stm/extra.c b/rpython/translator/stm/src_stm/extra.c
--- a/rpython/translator/stm/src_stm/extra.c
+++ b/rpython/translator/stm/src_stm/extra.c
@@ -4,7 +4,7 @@
void stm_copy_to_old_id_copy(gcptr obj, gcptr id)
{
- //assert(!is_in_nursery(thread_descriptor, id));
+ //assert(!stmgc_is_in_nursery(thread_descriptor, id));
assert(id->h_tid & GCFLAG_OLD);
size_t size = stmgc_size(obj);
@@ -108,10 +108,12 @@
else {
/* must create shadow original object XXX: or use
backup, if exists */
-
- /* XXX use stmgcpage_malloc() directly, we don't need to copy
- * the contents yet */
- gcptr O = stmgc_duplicate_old(p);
+ gcptr O = (gcptr)stmgcpage_malloc(stmgc_size(p));
+ memcpy(O, p, stmgc_size(p)); /* at least major collections
+ depend on some content of id_copy.
+ remove after fixing that XXX */
+ O->h_tid |= GCFLAG_OLD;
+
p->h_original = (revision_t)O;
p->h_tid |= GCFLAG_HAS_ID;
diff --git a/rpython/translator/stm/src_stm/gcpage.c b/rpython/translator/stm/src_stm/gcpage.c
--- a/rpython/translator/stm/src_stm/gcpage.c
+++ b/rpython/translator/stm/src_stm/gcpage.c
@@ -223,11 +223,13 @@
if (!(id_copy->h_tid & GCFLAG_PREBUILT_ORIGINAL)) {
id_copy->h_tid &= ~GCFLAG_PUBLIC_TO_PRIVATE;
/* see fix_outdated() */
- id_copy->h_tid |= GCFLAG_VISITED;
+ if (!(id_copy->h_tid & GCFLAG_VISITED)) {
+ id_copy->h_tid |= GCFLAG_VISITED;
- /* XXX: may not always need tracing? */
- //if (!(id_copy->h_tid & GCFLAG_STUB))
- // gcptrlist_insert(&objects_to_trace, id_copy);
+ /* XXX: may not always need tracing? */
+ if (!(id_copy->h_tid & GCFLAG_STUB))
+ gcptrlist_insert(&objects_to_trace, id_copy);
+ }
}
else {
/* prebuilt originals won't get collected anyway
@@ -237,6 +239,14 @@
}
}
+static void visit(gcptr *pobj);
+
+gcptr stmgcpage_visit(gcptr obj)
+{
+ visit(&obj);
+ return obj;
+}
+
static void visit(gcptr *pobj)
{
gcptr obj = *pobj;
@@ -276,10 +286,10 @@
keep_original_alive(prev_obj);
assert(*pobj == prev_obj);
- gcptr obj1 = obj;
- visit(&obj1); /* recursion, but should be only once */
+ /* recursion, but should be only once */
+ obj = stmgcpage_visit(obj);
assert(prev_obj->h_tid & GCFLAG_STUB);
- prev_obj->h_revision = ((revision_t)obj1) | 2;
+ prev_obj->h_revision = ((revision_t)obj) | 2;
return;
}
}
@@ -452,11 +462,11 @@
assert(gcptrlist_size(&d->public_with_young_copy) == 0);
assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0);
assert(gcptrlist_size(&d->public_descriptor->stolen_young_stubs) == 0);
+ assert(gcptrlist_size(&d->old_objects_to_trace) == 0);
/* NOT NECESSARILY EMPTY:
- list_of_read_objects
- private_from_protected
- public_to_private
- - old_objects_to_trace
*/
assert(gcptrlist_size(&d->list_of_read_objects) ==
d->num_read_objects_known_old);
@@ -488,8 +498,15 @@
/* If we're aborting this transaction anyway, we don't need to do
* more here.
*/
- if (d->active < 0)
- return; /* already "aborted" during forced minor collection */
+ if (d->active < 0) {
+ /* already "aborted" during forced minor collection
+ clear list of read objects so that a possible minor collection
+ before the abort doesn't trip
+ fix_list_of_read_objects should not run */
+ gcptrlist_clear(&d->list_of_read_objects);
+ d->num_read_objects_known_old = 0;
+ return;
+ }
if (d->active == 2) {
/* inevitable transaction: clear the list of read objects */
@@ -518,6 +535,9 @@
dprintf(("ABRT_COLLECT_MAJOR %p: "
"%p was read but modified already\n", d, obj));
AbortTransactionAfterCollect(d, ABRT_COLLECT_MAJOR);
+ /* fix_list_of_read_objects should not run */
+ gcptrlist_clear(&d->list_of_read_objects);
+ d->num_read_objects_known_old = 0;
return;
}
@@ -776,9 +796,13 @@
assert(gcptrlist_size(&objects_to_trace) == 0);
mark_prebuilt_roots();
mark_all_stack_roots();
- visit_all_objects();
+ do {
+ visit_all_objects();
+ stm_visit_old_weakrefs();
+ } while (gcptrlist_size(&objects_to_trace) != 0);
gcptrlist_delete(&objects_to_trace);
clean_up_lists_of_read_objects_and_fix_outdated_flags();
+ stm_clean_old_weakrefs();
mc_total_in_use = mc_total_reserved = 0;
free_all_unused_local_pages();
diff --git a/rpython/translator/stm/src_stm/gcpage.h b/rpython/translator/stm/src_stm/gcpage.h
--- a/rpython/translator/stm/src_stm/gcpage.h
+++ b/rpython/translator/stm/src_stm/gcpage.h
@@ -46,7 +46,8 @@
/* These fields are in tx_public_descriptor rather than tx_descriptor.
The indirection allows us to keep around the lists of pages even
- after the thread finishes, until the next major collection.
+ after the thread finishes. Such a "zombie" tx_public_descriptor
+ is reused by the next thread that starts.
*/
#define GCPAGE_FIELDS_DECL \
/* The array 'pages_for_size' contains GC_SMALL_REQUESTS \
@@ -66,7 +67,10 @@
/* A set of all non-small objects (outside the nursery). \
We could also have a single global set, but this avoids \
locking in stmgcpage_malloc/free. */ \
- struct G2L nonsmall_objects;
+ struct G2L nonsmall_objects; \
+ \
+ /* Weakref support */ \
+ struct GcPtrList old_weakrefs;
#define LOCAL_GCPAGES() (thread_descriptor->public_descriptor)
@@ -81,6 +85,7 @@
void stmgcpage_add_prebuilt_root(gcptr obj);
void stmgcpage_possibly_major_collect(int force);
long stmgcpage_count(int quantity);
+gcptr stmgcpage_visit(gcptr);
extern struct GcPtrList stm_prebuilt_gcroots;
diff --git a/rpython/translator/stm/src_stm/nursery.c b/rpython/translator/stm/src_stm/nursery.c
--- a/rpython/translator/stm/src_stm/nursery.c
+++ b/rpython/translator/stm/src_stm/nursery.c
@@ -1,8 +1,7 @@
/* Imported by rpython/translator/stm/import_stmgc.py */
#include "stmimpl.h"
-
-static int is_in_nursery(struct tx_descriptor *d, gcptr obj)
+int stmgc_is_in_nursery(struct tx_descriptor *d, gcptr obj)
{
return (d->nursery_base <= (char*)obj && ((char*)obj) < d->nursery_end);
}
@@ -55,6 +54,7 @@
gcptrlist_delete(&d->old_objects_to_trace);
gcptrlist_delete(&d->public_with_young_copy);
+ gcptrlist_delete(&d->young_weakrefs);
}
void stmgc_minor_collect_soon(void)
@@ -101,6 +101,13 @@
return P;
}
+gcptr stm_allocate_immutable(size_t size, unsigned long tid)
+{
+ gcptr P = stm_allocate(size, tid);
+ P->h_tid |= GCFLAG_IMMUTABLE;
+ return P;
+}
+
gcptr stmgc_duplicate(gcptr P)
{
size_t size = stmgc_size(P);
@@ -148,7 +155,7 @@
gcptr fresh_old_copy;
struct tx_descriptor *d = thread_descriptor;
- if (!is_in_nursery(d, obj)) {
+ if (!stmgc_is_in_nursery(d, obj)) {
/* not a nursery object */
}
else {
@@ -375,7 +382,7 @@
for (i = d->list_of_read_objects.size - 1; i >= limit; --i) {
gcptr obj = items[i];
- if (!is_in_nursery(d, obj)) {
+ if (!stmgc_is_in_nursery(d, obj)) {
/* non-young or visited young objects are kept */
continue;
}
@@ -409,6 +416,7 @@
{
assert(gcptrlist_size(&d->old_objects_to_trace) == 0);
assert(gcptrlist_size(&d->public_with_young_copy) == 0);
+ assert(gcptrlist_size(&d->young_weakrefs) == 0);
assert(gcptrlist_size(&d->public_descriptor->stolen_objects) == 0);
spinlock_release(d->public_descriptor->collection_lock);
@@ -444,6 +452,8 @@
surviving young-but-outside-the-nursery objects have been flagged
with GCFLAG_OLD
*/
+ stm_move_young_weakrefs(d);
+
teardown_minor_collect(d);
assert(!stm_has_got_any_lock(d));
@@ -510,6 +520,7 @@
!g2l_any_entry(&d->young_objects_outside_nursery)*/ ) {
/* there is no young object */
assert(gcptrlist_size(&d->public_with_young_copy) == 0);
+ assert(gcptrlist_size(&d->young_weakrefs) == 0);
assert(gcptrlist_size(&d->list_of_read_objects) >=
d->num_read_objects_known_old);
assert(gcptrlist_size(&d->private_from_protected) >=
diff --git a/rpython/translator/stm/src_stm/nursery.h b/rpython/translator/stm/src_stm/nursery.h
--- a/rpython/translator/stm/src_stm/nursery.h
+++ b/rpython/translator/stm/src_stm/nursery.h
@@ -51,7 +51,10 @@
still in the same transaction, to know that the initial \
part of the lists cannot contain young objects any more. */ \
long num_private_from_protected_known_old; \
- long num_read_objects_known_old;
+ long num_read_objects_known_old; \
+ \
+ /* Weakref support */ \
+ struct GcPtrList young_weakrefs;
struct tx_descriptor; /* from et.h */
@@ -65,5 +68,6 @@
size_t stmgc_size(gcptr);
void stmgc_trace(gcptr, void visit(gcptr *));
void stmgc_minor_collect_soon(void);
+int stmgc_is_in_nursery(struct tx_descriptor *d, gcptr obj);
#endif
diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision
--- a/rpython/translator/stm/src_stm/revision
+++ b/rpython/translator/stm/src_stm/revision
@@ -1,1 +1,1 @@
-dd0aff1663a1
+4cad3aa5a20b
diff --git a/rpython/translator/stm/src_stm/steal.c b/rpython/translator/stm/src_stm/steal.c
--- a/rpython/translator/stm/src_stm/steal.c
+++ b/rpython/translator/stm/src_stm/steal.c
@@ -2,11 +2,13 @@
#include "stmimpl.h"
-gcptr stm_stub_malloc(struct tx_public_descriptor *pd)
+gcptr stm_stub_malloc(struct tx_public_descriptor *pd, size_t minsize)
{
assert(pd->collection_lock != 0);
+ if (minsize < sizeof(struct stm_stub_s))
+ minsize = sizeof(struct stm_stub_s);
- gcptr p = stmgcpage_malloc(sizeof(struct stm_stub_s));
+ gcptr p = stmgcpage_malloc(minsize);
STUB_THREAD(p) = pd;
return p;
}
@@ -22,9 +24,56 @@
{
gcptr stub, obj = *pobj;
if (obj == NULL || (obj->h_tid & (GCFLAG_PUBLIC | GCFLAG_OLD)) ==
- (GCFLAG_PUBLIC | GCFLAG_OLD))
+ (GCFLAG_PUBLIC | GCFLAG_OLD))
return;
+ if (obj->h_tid & GCFLAG_IMMUTABLE) {
+ assert(!(obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED));
+ if (obj->h_tid & GCFLAG_PUBLIC) {
+ /* young public, replace with stolen old copy */
+ assert(obj->h_tid & GCFLAG_NURSERY_MOVED);
+ assert(IS_POINTER(obj->h_revision));
+ stub = (gcptr)obj->h_revision;
+ assert(!IS_POINTER(stub->h_revision)); /* not outdated */
+ goto done;
+ }
+
+ /* old or young protected! mark as PUBLIC */
+ if (!(obj->h_tid & GCFLAG_OLD)) {
+ /* young protected */
+ gcptr O;
+
+ if (obj->h_tid & GCFLAG_HAS_ID) {
+ /* use id-copy for us */
+ O = (gcptr)obj->h_original;
+ obj->h_tid &= ~GCFLAG_HAS_ID;
+ stm_copy_to_old_id_copy(obj, O);
+ O->h_original = 0;
+ } else {
+ O = stmgc_duplicate_old(obj);
+
+ /* young and without original? */
+ if (!(obj->h_original))
+ obj->h_original = (revision_t)O;
+ }
+ obj->h_tid |= (GCFLAG_NURSERY_MOVED | GCFLAG_PUBLIC);
+ obj->h_revision = (revision_t)O;
+
+ O->h_tid |= GCFLAG_PUBLIC;
+ /* here it is fine if it stays in read caches because
+ the object is immutable anyway and there are no
+ write_barriers allowed. */
+ dprintf(("steal prot immutable -> public: %p -> %p\n", obj, O));
+ stub = O;
+ goto done;
+ }
+ /* old protected: */
+ dprintf(("prot immutable -> public: %p\n", obj));
+ obj->h_tid |= GCFLAG_PUBLIC;
+
+ return;
+ }
+
/* we use 'all_stubs', a dictionary, in order to try to avoid
duplicate stubs for the same object. XXX maybe it would be
better to use a fast approximative cache that stays around for
@@ -39,8 +88,20 @@
assert(stub->h_revision == (((revision_t)obj) | 2));
goto done;
- not_found:
- stub = stm_stub_malloc(sd->foreign_pd);
+ not_found:;
+ size_t size = 0;
+ if (!obj->h_original && !(obj->h_tid & GCFLAG_OLD)) {
+ /* There shouldn't be a public, young object without
+ a h_original. But there can be priv/protected ones.
+ We have a young protected copy without an h_original
+ The stub we allocate will be the h_original, but
+ it must be big enough to be copied over by a major
+ collection later. */
+ assert(!(obj->h_tid & GCFLAG_PUBLIC));
+
+ size = stmgc_size(obj);
+ }
+ stub = stm_stub_malloc(sd->foreign_pd, size);
stub->h_tid = (obj->h_tid & STM_USER_TID_MASK) | GCFLAG_PUBLIC
| GCFLAG_STUB
| GCFLAG_OLD;
@@ -52,10 +113,9 @@
stub->h_original = (revision_t)obj;
}
else {
- /* There shouldn't be a public, young object without
- a h_original. But there can be protected ones. */
- assert(!(obj->h_tid & GCFLAG_PUBLIC));
- obj->h_original = (revision_t)stub;
+ /* this is the big-stub case described above */
+ obj->h_original = (revision_t)stub;
+ stub->h_original = 0; /* stub_malloc does not set to 0... */
if (obj->h_tid & GCFLAG_PRIVATE_FROM_PROTECTED) {
((gcptr)obj->h_revision)->h_original = (revision_t)stub;
}
diff --git a/rpython/translator/stm/src_stm/steal.h b/rpython/translator/stm/src_stm/steal.h
--- a/rpython/translator/stm/src_stm/steal.h
+++ b/rpython/translator/stm/src_stm/steal.h
@@ -10,7 +10,7 @@
#define STUB_THREAD(h) (((struct stm_stub_s *)(h))->s_thread)
-gcptr stm_stub_malloc(struct tx_public_descriptor *);
+gcptr stm_stub_malloc(struct tx_public_descriptor *, size_t minsize);
void stm_steal_stub(gcptr);
gcptr stm_get_stolen_obj(long index); /* debugging */
void stm_normalize_stolen_objects(struct tx_descriptor *);
diff --git a/rpython/translator/stm/src_stm/stmgc.c b/rpython/translator/stm/src_stm/stmgc.c
--- a/rpython/translator/stm/src_stm/stmgc.c
+++ b/rpython/translator/stm/src_stm/stmgc.c
@@ -11,5 +11,6 @@
#include "gcpage.c"
#include "stmsync.c"
#include "extra.c"
+#include "weakref.c"
#include "dbgmem.c"
#include "fprintcolor.c"
diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h
--- a/rpython/translator/stm/src_stm/stmgc.h
+++ b/rpython/translator/stm/src_stm/stmgc.h
@@ -30,6 +30,9 @@
/* allocate an object out of the local nursery */
gcptr stm_allocate(size_t size, unsigned long tid);
+/* allocate an object that is be immutable. it cannot be changed with
+ a stm_write_barrier() or after the next commit */
+gcptr stm_allocate_immutable(size_t size, unsigned long tid);
/* returns a never changing hash for the object */
revision_t stm_hash(gcptr);
@@ -55,11 +58,19 @@
int stm_enter_callback_call(void);
void stm_leave_callback_call(int);
-/* read/write barriers (the most general versions only for now) */
-#if 0 // (optimized version below)
-gcptr stm_read_barrier(gcptr);
-gcptr stm_write_barrier(gcptr);
-#endif
+/* read/write barriers (the most general versions only for now).
+
+ - the read barrier must be applied before reading from an object.
+ the result is valid as long as we're in the same transaction,
+ and stm_write_barrier() is not called on the same object.
+
+ - the write barrier must be applied before writing to an object.
+ the result is valid for a shorter period of time: we have to
+ do stm_write_barrier() again if we ended the transaction, or
+ if we did a potential collection (e.g. stm_allocate()).
+*/
+static inline gcptr stm_read_barrier(gcptr);
+static inline gcptr stm_write_barrier(gcptr);
/* start a new transaction, calls callback(), and when it returns
finish that transaction. callback() is called with the 'arg'
@@ -115,6 +126,14 @@
void stm_minor_collect(void);
void stm_major_collect(void);
+/* weakref support: allocate a weakref object, and set it to point
+ weakly to 'obj'. The weak pointer offset is hard-coded to be at
+ 'size - WORD'. Important: stmcb_trace() must NOT trace it.
+ Weakrefs are *immutable*! Don't attempt to use stm_write_barrier()
+ on them. */
+gcptr stm_weakref_allocate(size_t size, unsigned long tid, gcptr obj);
+
+
/**************** END OF PUBLIC INTERFACE *****************/
/************************************************************/
diff --git a/rpython/translator/stm/src_stm/stmimpl.h b/rpython/translator/stm/src_stm/stmimpl.h
--- a/rpython/translator/stm/src_stm/stmimpl.h
+++ b/rpython/translator/stm/src_stm/stmimpl.h
@@ -37,5 +37,6 @@
#include "steal.h"
#include "stmsync.h"
#include "extra.h"
+#include "weakref.h"
#endif
More information about the pypy-commit
mailing list