[pypy-commit] stmgc c8-card-marking: Merge with c8-overflow-objs (not compiling)
Raemi
noreply at buildbot.pypy.org
Fri Feb 27 22:44:04 CET 2015
Author: Remi Meier <remi.meier at gmail.com>
Branch: c8-card-marking
Changeset: r1678:96f5911028e7
Date: 2015-02-27 14:30 +0100
http://bitbucket.org/pypy/stmgc/changeset/96f5911028e7/
Log: Merge with c8-overflow-objs (not compiling)
diff --git a/c8/stm/core.c b/c8/stm/core.c
--- a/c8/stm/core.c
+++ b/c8/stm/core.c
@@ -636,7 +636,8 @@
assert(!_is_in_nursery(obj));
assert(obj->stm_flags & GCFLAG_WRITE_BARRIER);
- if (obj->stm_flags & GCFLAG_WB_EXECUTED) {
+ if (obj->stm_flags & GCFLAG_WB_EXECUTED
+|| isoverflow) {
/* already executed WB once in this transaction. do GC
part again: */
write_gc_only_path(obj, mark_card);
@@ -922,7 +923,7 @@
}
assert(list_is_empty(STM_PSEGMENT->modified_old_objects));
- assert(list_is_empty(STM_PSEGMENT->new_objects));
+ assert(list_is_empty(STM_PSEGMENT->large_overflow_objects));
assert(list_is_empty(STM_PSEGMENT->objects_pointing_to_nursery));
assert(list_is_empty(STM_PSEGMENT->young_weakrefs));
assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery));
@@ -986,7 +987,7 @@
_verify_cards_cleared_in_all_lists(get_priv_segment(STM_SEGMENT->segment_num));
list_clear(STM_PSEGMENT->objects_pointing_to_nursery);
list_clear(STM_PSEGMENT->old_objects_with_cards_set);
- list_clear(STM_PSEGMENT->new_objects);
+ list_clear(STM_PSEGMENT->large_overflow_objects);
release_thread_segment(tl);
/* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */
@@ -1006,15 +1007,17 @@
#endif
}
-static void push_new_objects_to_other_segments(void)
+static void push_large_overflow_objects_to_other_segments(void)
{
+ if (list_is_empty(STM_PSEGMENT->large_overflow_objects))
+ return;
+
+ /* XXX: also pushes small ones right now */
struct stm_priv_segment_info_s *pseg = get_priv_segment(STM_SEGMENT->segment_num);
acquire_privatization_lock(STM_SEGMENT->segment_num);
- LIST_FOREACH_R(STM_PSEGMENT->new_objects, object_t *,
+ LIST_FOREACH_R(STM_PSEGMENT->large_overflow_objects, object_t *,
({
- assert(item->stm_flags & GCFLAG_WB_EXECUTED);
- item->stm_flags &= ~GCFLAG_WB_EXECUTED;
- if (obj_should_use_cards(pseg->pub.segment_base, item))
+ assert(!(item->stm_flags & GCFLAG_WB_EXECUTED)); if (obj_should_use_cards(pseg->pub.segment_base, item))
_reset_object_cards(pseg, item, CARD_CLEAR, false);
synchronize_object_enqueue(item, true);
}));
@@ -1029,7 +1032,7 @@
in handle_segfault_in_page() that also copies
unknown-to-the-segment/uncommitted things.
*/
- list_clear(STM_PSEGMENT->new_objects);
+ list_clear(STM_PSEGMENT->large_overflow_objects);
}
@@ -1044,19 +1047,32 @@
dprintf(("> stm_commit_transaction()\n"));
minor_collection(1);
- push_new_objects_to_other_segments();
+ push_large_overflow_objects_to_other_segments();
/* push before validate. otherwise they are reachable too early */
bool was_inev = STM_PSEGMENT->transaction_state == TS_INEVITABLE;
_validate_and_add_to_commit_log();
+ stm_rewind_jmp_forget(STM_SEGMENT->running_thread);
+
/* XXX do we still need a s_mutex_lock() section here? */
s_mutex_lock();
+ commit_finalizers();
+ /* update 'overflow_number' if needed */
+ if (STM_PSEGMENT->overflow_number_has_been_used) {
+ highest_overflow_number += GCFLAG_OVERFLOW_NUMBER_bit0;
+ assert(highest_overflow_number != /* XXX else, overflow! */
+ (uint32_t)-GCFLAG_OVERFLOW_NUMBER_bit0);
+ STM_PSEGMENT->overflow_number = highest_overflow_number;
+ STM_PSEGMENT->overflow_number_has_been_used = false;
+ }
+
+ invoke_and_clear_user_callbacks(0); /* for commit */
+
+ /* >>>>> there may be a FORK() happening in the safepoint below <<<<<*/
enter_safe_point_if_requested();
assert(STM_SEGMENT->nursery_end == NURSERY_END);
- stm_rewind_jmp_forget(STM_SEGMENT->running_thread);
-
/* if a major collection is required, do it here */
if (is_major_collection_requested()) {
synchronize_all_threads(STOP_OTHERS_UNTIL_MUTEX_UNLOCK);
@@ -1068,10 +1084,6 @@
_verify_cards_cleared_in_all_lists(get_priv_segment(STM_SEGMENT->segment_num));
- commit_finalizers();
-
- invoke_and_clear_user_callbacks(0); /* for commit */
-
if (globally_unique_transaction && was_inev) {
committed_globally_unique_transaction();
}
@@ -1184,7 +1196,7 @@
list_clear(pseg->objects_pointing_to_nursery);
list_clear(pseg->old_objects_with_cards_set);
- list_clear(pseg->new_objects);
+ list_clear(pseg->large_overflow_objects);
list_clear(pseg->young_weakrefs);
#pragma pop_macro("STM_SEGMENT")
#pragma pop_macro("STM_PSEGMENT")
diff --git a/c8/stm/core.h b/c8/stm/core.h
--- a/c8/stm/core.h
+++ b/c8/stm/core.h
@@ -43,6 +43,14 @@
GCFLAG_CARDS_SET = _STM_GCFLAG_CARDS_SET,
GCFLAG_VISITED = 0x10,
GCFLAG_FINALIZATION_ORDERING = 0x20,
+ /* All remaining bits of the 32-bit 'stm_flags' field are taken by
+ the "overflow number". This is a number that identifies the
+ "overflow objects" from the current transaction among all old
+ objects. More precisely, overflow objects are objects from the
+ current transaction that have been flushed out of the nursery,
+ which occurs if the same transaction allocates too many objects.
+ */
+ GCFLAG_OVERFLOW_NUMBER_bit0 = 0x40 /* must be last */
};
#define SYNC_QUEUE_SIZE 31
@@ -98,8 +106,9 @@
/* list of objects created in the current transaction and
that survived at least one minor collection. They need
to be synchronized to other segments on commit, but they
- do not need to be in the commit log entry. */
- struct list_s *new_objects;
+ do not need to be in the commit log entry.
+ XXX: for now it also contains small overflow objs */
+ struct list_s *large_overflow_objects;
uint8_t privatization_lock; // XXX KILL
@@ -111,6 +120,14 @@
struct tree_s *callbacks_on_commit_and_abort[2];
+ /* This is the number stored in the overflowed objects (a multiple of
+ GCFLAG_OVERFLOW_NUMBER_bit0). It is incremented when the
+ transaction is done, but only if we actually overflowed any
+ object; otherwise, no object has got this number. */
+ uint32_t overflow_number;
+ bool overflow_number_has_been_used;
+
+
struct stm_commit_log_entry_s *last_commit_log_entry;
struct stm_shadowentry_s *shadowstack_at_start_of_transaction;
@@ -203,6 +220,9 @@
#define REAL_ADDRESS(segment_base, src) ((segment_base) + (uintptr_t)(src))
+#define IS_OVERFLOW_OBJ(pseg, obj) (((obj)->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) \
+ == (pseg)->overflow_number)
+
static inline uintptr_t get_index_to_card_index(uintptr_t index) {
return (index / CARD_SIZE) + 1;
}
diff --git a/c8/stm/finalizer.c b/c8/stm/finalizer.c
--- a/c8/stm/finalizer.c
+++ b/c8/stm/finalizer.c
@@ -98,14 +98,14 @@
list_clear(lst);
}
- /* also deals with newly created objects: they are at the tail of
+ /* also deals with overflow objects: they are at the tail of
old_objects_with_light_finalizers (this list is kept in order
and we cannot add any already-committed object) */
lst = pseg->old_objects_with_light_finalizers;
count = list_count(lst);
while (count > 0) {
object_t *obj = (object_t *)list_item(lst, --count);
- if (!(obj->stm_flags & GCFLAG_WB_EXECUTED))
+ if (!IS_OVERFLOW_OBJ(pseg, obj))
break;
lst->count = count;
if (must_fix_gs) {
@@ -264,11 +264,14 @@
LIST_APPEND(_finalizer_tmpstack, obj);
}
-static inline struct list_s *finalizer_trace(char *base, object_t *obj,
- struct list_s *lst)
+static inline struct list_s *finalizer_trace(
+ struct stm_priv_segment_info_s *pseg, object_t *obj, struct list_s *lst)
{
- if (!is_new_object(obj))
+ char *base;
+ if (!is_overflow_obj_safe(pseg, obj))
base = stm_object_pages;
+ else
+ base = pseg->pub.segment_base;
struct object_s *realobj = (struct object_s *)REAL_ADDRESS(base, obj);
_finalizer_tmpstack = lst;
@@ -277,7 +280,8 @@
}
-static void _recursively_bump_finalization_state_from_2_to_3(char *base, object_t *obj)
+static void _recursively_bump_finalization_state_from_2_to_3(
+ struct stm_priv_segment_info_s *pseg, object_t *obj)
{
assert(_finalization_state(obj) == 2);
struct list_s *tmpstack = _finalizer_emptystack;
@@ -289,7 +293,7 @@
realobj->stm_flags &= ~GCFLAG_FINALIZATION_ORDERING;
/* trace */
- tmpstack = finalizer_trace(base, obj, tmpstack);
+ tmpstack = finalizer_trace(pseg, obj, tmpstack);
}
if (list_is_empty(tmpstack))
@@ -300,14 +304,16 @@
_finalizer_emptystack = tmpstack;
}
-static void _recursively_bump_finalization_state_from_1_to_2(char *base, object_t *obj)
+static void _recursively_bump_finalization_state_from_1_to_2(
+ struct stm_priv_segment_info_s *pseg, object_t *obj)
{
assert(_finalization_state(obj) == 1);
/* The call will add GCFLAG_VISITED recursively, thus bump state 1->2 */
- mark_visit_possibly_new_object(base, obj);
+ mark_visit_possibly_new_object(obj, pseg);
}
-static struct list_s *mark_finalize_step1(char *base, struct finalizers_s *f)
+static struct list_s *mark_finalize_step1(
+ struct stm_priv_segment_info_s *pseg, struct finalizers_s *f)
{
if (f == NULL)
return NULL;
@@ -336,21 +342,22 @@
int state = _finalization_state(y);
if (state <= 0) {
_bump_finalization_state_from_0_to_1(y);
- pending = finalizer_trace(base, y, pending);
+ pending = finalizer_trace(pseg, y, pending);
}
else if (state == 2) {
- _recursively_bump_finalization_state_from_2_to_3(base, y);
+ _recursively_bump_finalization_state_from_2_to_3(pseg, y);
}
}
_finalizer_pending = pending;
assert(_finalization_state(x) == 1);
- _recursively_bump_finalization_state_from_1_to_2(base, x);
+ _recursively_bump_finalization_state_from_1_to_2(pseg, x);
}
return marked;
}
-static void mark_finalize_step2(char *base, struct finalizers_s *f,
- struct list_s *marked)
+static void mark_finalize_step2(
+ struct stm_priv_segment_info_s *pseg, struct finalizers_s *f,
+ struct list_s *marked)
{
if (f == NULL)
return;
@@ -367,7 +374,7 @@
if (run_finalizers == NULL)
run_finalizers = list_create();
LIST_APPEND(run_finalizers, x);
- _recursively_bump_finalization_state_from_2_to_3(base, x);
+ _recursively_bump_finalization_state_from_2_to_3(pseg, x);
}
else {
struct list_s *lst = f->objects_with_finalizers;
@@ -403,29 +410,28 @@
long j;
for (j = 1; j < NB_SEGMENTS; j++) {
struct stm_priv_segment_info_s *pseg = get_priv_segment(j);
- marked_seg[j] = mark_finalize_step1(pseg->pub.segment_base,
- pseg->finalizers);
+ marked_seg[j] = mark_finalize_step1(pseg, pseg->finalizers);
}
- marked_seg[0] = mark_finalize_step1(stm_object_pages, &g_finalizers);
+ marked_seg[0] = mark_finalize_step1(get_priv_segment(0), &g_finalizers);
LIST_FREE(_finalizer_pending);
for (j = 1; j < NB_SEGMENTS; j++) {
struct stm_priv_segment_info_s *pseg = get_priv_segment(j);
- mark_finalize_step2(pseg->pub.segment_base, pseg->finalizers,
- marked_seg[j]);
+ mark_finalize_step2(pseg, pseg->finalizers, marked_seg[j]);
}
- mark_finalize_step2(stm_object_pages, &g_finalizers, marked_seg[0]);
+ mark_finalize_step2(get_priv_segment(0), &g_finalizers, marked_seg[0]);
LIST_FREE(_finalizer_emptystack);
}
-static void mark_visit_from_finalizer1(char *base, struct finalizers_s *f)
+static void mark_visit_from_finalizer1(
+ struct stm_priv_segment_info_s *pseg, struct finalizers_s *f)
{
if (f != NULL && f->run_finalizers != NULL) {
LIST_FOREACH_R(f->run_finalizers, object_t * /*item*/,
({
- mark_visit_possibly_new_object(base, item);
+ mark_visit_possibly_new_object(item, pseg);
}));
}
}
@@ -435,9 +441,9 @@
long j;
for (j = 1; j < NB_SEGMENTS; j++) {
struct stm_priv_segment_info_s *pseg = get_priv_segment(j);
- mark_visit_from_finalizer1(pseg->pub.segment_base, pseg->finalizers);
+ mark_visit_from_finalizer1(pseg, pseg->finalizers);
}
- mark_visit_from_finalizer1(stm_object_pages, &g_finalizers);
+ mark_visit_from_finalizer1(get_priv_segment(0), &g_finalizers);
}
static void _execute_finalizers(struct finalizers_s *f)
diff --git a/c8/stm/forksupport.c b/c8/stm/forksupport.c
--- a/c8/stm/forksupport.c
+++ b/c8/stm/forksupport.c
@@ -84,11 +84,23 @@
stm_thread_local_t *tl = pr->pub.running_thread;
dprintf(("forksupport_child: abort in seg%ld\n", i));
assert(tl->associated_segment_num == i);
- assert(pr->transaction_state == TS_REGULAR);
+ assert(pr->transaction_state != TS_INEVITABLE);
set_gs_register(get_segment_base(i));
assert(STM_SEGMENT->segment_num == i);
s_mutex_lock();
+ if (pr->transaction_state == TS_NONE) {
+ /* just committed, TS_NONE but still has running_thread */
+
+ /* do _finish_transaction() */
+ STM_PSEGMENT->safe_point = SP_NO_TRANSACTION;
+ list_clear(STM_PSEGMENT->objects_pointing_to_nursery);
+ list_clear(STM_PSEGMENT->large_overflow_objects);
+
+ s_mutex_unlock();
+ return;
+ }
+
#ifndef NDEBUG
pr->running_pthread = pthread_self();
#endif
diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c
--- a/c8/stm/gcpage.c
+++ b/c8/stm/gcpage.c
@@ -200,11 +200,16 @@
/************************************************************/
+static bool is_overflow_obj_safe(struct stm_priv_segment_info_s *pseg, object_t *obj)
+{
+ /* this function first also checks if the page is accessible in order
+ to not cause segfaults during major gc (it does exactly the same
+ as IS_OVERFLOW_OBJ otherwise) */
+ if (get_page_status_in(pseg->pub.segment_num, (uintptr_t)obj / 4096UL) == PAGE_NO_ACCESS)
+ return false;
-static bool is_new_object(object_t *obj)
-{
- struct object_s *realobj = (struct object_s*)REAL_ADDRESS(stm_object_pages, obj); /* seg0 */
- return realobj->stm_flags & GCFLAG_WB_EXECUTED;
+ struct object_s *realobj = (struct object_s*)REAL_ADDRESS(pseg->pub.segment_base, obj);
+ return IS_OVERFLOW_OBJ(pseg, realobj);
}
@@ -230,7 +235,10 @@
}
-static void mark_and_trace(object_t *obj, char *segment_base)
+static void mark_and_trace(
+ object_t *obj,
+ char *segment_base, /* to trace obj in */
+ struct stm_priv_segment_info_s *pseg) /* to trace children in */
{
/* mark the obj and trace all reachable objs from it */
@@ -242,36 +250,40 @@
stmcb_trace(realobj, &mark_record_trace);
/* trace all references found in sharing seg0 (should always be
- up-to-date and not cause segfaults, except for new objs) */
+ up-to-date and not cause segfaults, except for overflow objs) */
+ segment_base = pseg->pub.segment_base;
while (!list_is_empty(marked_objects_to_trace)) {
obj = (object_t *)list_pop_item(marked_objects_to_trace);
- char *base = is_new_object(obj) ? segment_base : stm_object_pages;
+ char *base = is_overflow_obj_safe(pseg, obj) ? segment_base : stm_object_pages;
realobj = (struct object_s *)REAL_ADDRESS(base, obj);
stmcb_trace(realobj, &mark_record_trace);
}
}
-static inline void mark_visit_object(object_t *obj, char *segment_base)
+static inline void mark_visit_object(
+ object_t *obj,
+ char *segment_base, /* to trace ojb in */
+ struct stm_priv_segment_info_s *pseg) /* to trace children in */
{
/* if already visited, don't trace */
if (obj == NULL || mark_visited_test_and_set(obj))
return;
- mark_and_trace(obj, segment_base);
+ mark_and_trace(obj, segment_base, pseg);
}
-static void mark_visit_possibly_new_object(char *segment_base, object_t *obj)
+static void mark_visit_possibly_new_object(object_t *obj, struct stm_priv_segment_info_s *pseg)
{
/* if newly allocated object, we trace in segment_base, otherwise in
the sharing seg0 */
if (obj == NULL)
return;
- if (is_new_object(obj)) {
- mark_visit_object(obj, segment_base);
+ if (is_overflow_obj_safe(pseg, obj)) {
+ mark_visit_object(obj, pseg->pub.segment_base, pseg);
} else {
- mark_visit_object(obj, stm_object_pages);
+ mark_visit_object(obj, stm_object_pages, pseg);
}
}
@@ -282,8 +294,10 @@
end = (const struct stm_shadowentry_s *)(slice + size);
for (; p < end; p++)
if ((((uintptr_t)p->ss) & 3) == 0) {
- assert(!is_new_object(p->ss));
- mark_visit_object(p->ss, stm_object_pages); // seg0
+ mark_visit_object(p->ss, stm_object_pages, // seg0
+ /* there should be no overflow objs not already
+ visited, so any pseg is fine really: */
+ get_priv_segment(STM_SEGMENT->segment_num));
}
return NULL;
}
@@ -350,7 +364,7 @@
and thus make all pages accessible. */
assert_obj_accessible_in(i, item);
- assert(!is_new_object(item)); /* should never be in that list */
+ assert(!is_overflow_obj_safe(get_priv_segment(i), item)); /* should never be in that list */
if (!mark_visited_test_and_set(item)) {
/* trace shared, committed version: only do this if we didn't
@@ -358,9 +372,9 @@
objs before mark_visit_from_modified_objects AND if we
do mark_and_trace on an obj that is modified in >1 segment,
the tracing always happens in seg0 (see mark_and_trace). */
- mark_and_trace(item, stm_object_pages);
+ mark_and_trace(item, stm_object_pages, get_priv_segment(i));
}
- mark_and_trace(item, base); /* private, modified version */
+ mark_and_trace(item, base, get_priv_segment(i)); /* private, modified version */
}));
list_clear(uniques);
@@ -372,7 +386,11 @@
{
if (testing_prebuilt_objs != NULL) {
LIST_FOREACH_R(testing_prebuilt_objs, object_t * /*item*/,
- mark_visit_object(item, stm_object_pages)); // seg0
+ mark_visit_object(item, stm_object_pages, // seg0
+ /* any pseg is fine, as we already traced modified
+ objs and thus covered all overflow objs reachable
+ from here */
+ get_priv_segment(STM_SEGMENT->segment_num)));
}
stm_thread_local_t *tl = stm_all_thread_locals;
@@ -380,7 +398,7 @@
/* look at all objs on the shadow stack (they are old but may
be uncommitted so far, so only exist in the associated_segment_num).
- IF they are uncommitted new objs, trace in the actual segment,
+ IF they are uncommitted overflow objs, trace in the actual segment,
otherwise, since we just executed a minor collection, they were
all synced to the sharing seg0. Thus we can trace them there.
@@ -392,17 +410,17 @@
If 'tl' is currently running, its 'last_associated_segment_num'
field is the segment number that contains the correct
version of its overflowed objects. */
- char *segment_base = get_segment_base(tl->last_associated_segment_num);
+ struct stm_priv_segment_info_s *pseg = get_priv_segment(tl->last_associated_segment_num);
struct stm_shadowentry_s *current = tl->shadowstack;
struct stm_shadowentry_s *base = tl->shadowstack_base;
while (current-- != base) {
if ((((uintptr_t)current->ss) & 3) == 0) {
- mark_visit_possibly_new_object(segment_base, current->ss);
+ mark_visit_possibly_new_object(current->ss, pseg);
}
}
- mark_visit_possibly_new_object(segment_base, tl->thread_local_obj);
+ mark_visit_possibly_new_object(tl->thread_local_obj, pseg);
tl = tl->next;
} while (tl != stm_all_thread_locals);
@@ -413,8 +431,8 @@
for (i = 1; i < NB_SEGMENTS; i++) {
if (get_priv_segment(i)->transaction_state != TS_NONE) {
mark_visit_possibly_new_object(
- get_segment_base(i),
- get_priv_segment(i)->threadlocal_at_start_of_transaction);
+ get_priv_segment(i)->threadlocal_at_start_of_transaction,
+ get_priv_segment(i));
stm_rewind_jmp_enum_shadowstack(
get_segment(i)->running_thread,
@@ -423,49 +441,6 @@
}
}
-static void ready_new_objects(void)
-{
-#pragma push_macro("STM_PSEGMENT")
-#pragma push_macro("STM_SEGMENT")
-#undef STM_PSEGMENT
-#undef STM_SEGMENT
- /* objs in new_objects only have garbage in the sharing seg0,
- since it is used to mark objs as visited, we must make
- sure the flag is cleared at the start of a major collection.
- (XXX: ^^^ may be optional if we have the part below)
-
- Also, we need to be able to recognize these objects in order
- to only trace them in the segment they are valid in. So we
- also make sure to set WB_EXECUTED in the sharing seg0. No
- other objs than new_objects have WB_EXECUTED in seg0 (since
- there can only be committed versions there).
- */
-
- long i;
- for (i = 1; i < NB_SEGMENTS; i++) {
- struct stm_priv_segment_info_s *pseg = get_priv_segment(i);
- struct list_s *lst = pseg->new_objects;
-
- LIST_FOREACH_R(lst, object_t* /*item*/,
- ({
- struct object_s *realobj;
- /* WB_EXECUTED always set in this segment */
- assert(realobj = (struct object_s*)REAL_ADDRESS(pseg->pub.segment_base, item));
- assert(realobj->stm_flags & GCFLAG_WB_EXECUTED);
-
- /* clear VISITED (garbage) and ensure WB_EXECUTED in seg0 */
- mark_visited_test_and_clear(item);
- realobj = (struct object_s*)REAL_ADDRESS(stm_object_pages, item);
- realobj->stm_flags |= GCFLAG_WB_EXECUTED;
-
- /* make sure this flag is cleared as well */
- realobj->stm_flags &= ~GCFLAG_FINALIZATION_ORDERING;
- }));
- }
-#pragma pop_macro("STM_SEGMENT")
-#pragma pop_macro("STM_PSEGMENT")
-}
-
static void clean_up_segment_lists(void)
{
@@ -494,10 +469,7 @@
({
struct object_s *realobj = (struct object_s *)
REAL_ADDRESS(pseg->pub.segment_base, (uintptr_t)item);
-
- assert(realobj->stm_flags & GCFLAG_WB_EXECUTED);
assert(!(realobj->stm_flags & GCFLAG_WRITE_BARRIER));
-
realobj->stm_flags |= GCFLAG_WRITE_BARRIER;
OPT_ASSERT(!(realobj->stm_flags & GCFLAG_CARDS_SET));
@@ -525,8 +497,8 @@
list_clear(lst);
- /* remove from new_objects all objects that die */
- lst = pseg->new_objects;
+ /* remove from large_overflow_objects all objects that die */
+ lst = pseg->large_overflow_objects;
uintptr_t n = list_count(lst);
while (n-- > 0) {
object_t *obj = (object_t *)list_item(lst, n);
@@ -703,8 +675,6 @@
DEBUG_EXPECT_SEGFAULT(false);
- ready_new_objects();
-
/* marking */
LIST_CREATE(marked_objects_to_trace);
mark_visit_from_modified_objects();
diff --git a/c8/stm/nursery.c b/c8/stm/nursery.c
--- a/c8/stm/nursery.c
+++ b/c8/stm/nursery.c
@@ -38,7 +38,7 @@
}
static inline bool _is_from_same_transaction(object_t *obj) {
- return _is_young(obj) || (obj->stm_flags & GCFLAG_WB_EXECUTED);
+ return _is_young(obj) || IS_OVERFLOW_OBJ(STM_PSEGMENT, obj);
}
long stm_can_move(object_t *obj)
@@ -132,12 +132,12 @@
nobj_sync_now = ((uintptr_t)nobj) | FLAG_SYNC_LARGE;
}
- /* if this is not during commit, we will add them to the new_objects
- list and push them to other segments on commit. Thus we can add
- the WB_EXECUTED flag so that they don't end up in modified_old_objects */
+ /* if this is not during commit, we make them overflow objects
+ and push them to other segments on commit. */
assert(!(nobj->stm_flags & GCFLAG_WB_EXECUTED));
+ assert((nobj->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) == 0);
if (!STM_PSEGMENT->minor_collect_will_commit_now) {
- nobj->stm_flags |= GCFLAG_WB_EXECUTED;
+ nobj->stm_flags |= STM_PSEGMENT->overflow_number;
}
/* Must trace the object later */
@@ -339,16 +339,17 @@
assert(!(obj->stm_flags & GCFLAG_CARDS_SET));
if (obj_sync_now & FLAG_SYNC_LARGE) {
+ /* XXX: SYNC_LARGE even set for small objs right now */
/* this is a newly allocated obj in this transaction. We must
either synchronize the object to other segments now, or
- add the object to new_objects list */
+ add the object to large_overflow_objects list */
struct stm_priv_segment_info_s *pseg = get_priv_segment(STM_SEGMENT->segment_num);
if (pseg->minor_collect_will_commit_now) {
acquire_privatization_lock(pseg->pub.segment_num);
synchronize_object_enqueue(obj);
release_privatization_lock(pseg->pub.segment_num);
} else {
- LIST_APPEND(pseg->new_objects, obj);
+ LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj);
}
_cards_cleared_in_object(pseg, obj);
}
@@ -357,7 +358,7 @@
lst = STM_PSEGMENT->objects_pointing_to_nursery;
}
- /* flush all new objects to other segments now */
+ /* flush all overflow objects to other segments now */
if (STM_PSEGMENT->minor_collect_will_commit_now) {
acquire_privatization_lock(STM_SEGMENT->segment_num);
synchronize_objects_flush();
@@ -475,6 +476,11 @@
dprintf(("minor_collection commit=%d\n", (int)commit));
STM_PSEGMENT->minor_collect_will_commit_now = commit;
+ if (!commit) {
+ /* 'STM_PSEGMENT->overflow_number' is used now by this collection,
+ in the sense that it's copied to the overflow objects */
+ STM_PSEGMENT->overflow_number_has_been_used = true;
+ }
collect_cardrefs_to_nursery();
diff --git a/c8/stm/nursery.h b/c8/stm/nursery.h
--- a/c8/stm/nursery.h
+++ b/c8/stm/nursery.h
@@ -2,6 +2,8 @@
#define NSE_SIGPAUSE _STM_NSE_SIGNAL_MAX
#define NSE_SIGABORT _STM_NSE_SIGNAL_ABORT
+static uint32_t highest_overflow_number;
+
static void _cards_cleared_in_object(struct stm_priv_segment_info_s *pseg, object_t *obj);
static void _reset_object_cards(struct stm_priv_segment_info_s *pseg,
object_t *obj, uint8_t mark_value,
diff --git a/c8/stm/setup.c b/c8/stm/setup.c
--- a/c8/stm/setup.c
+++ b/c8/stm/setup.c
@@ -100,7 +100,7 @@
pr->pub.segment_num = i;
pr->pub.segment_base = segment_base;
pr->modified_old_objects = list_create();
- pr->new_objects = list_create();
+ pr->large_overflow_objects = list_create();
pr->young_weakrefs = list_create();
pr->old_weakrefs = list_create();
pr->objects_pointing_to_nursery = list_create();
@@ -113,6 +113,8 @@
pr->old_objects_with_light_finalizers = list_create();
pr->last_commit_log_entry = &commit_log_root;
+ pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * i;
+ highest_overflow_number = pr->overflow_number;
pr->pub.transaction_read_version = 0xff;
}
@@ -150,8 +152,8 @@
list_free(pr->objects_pointing_to_nursery);
list_free(pr->old_objects_with_cards_set);
list_free(pr->modified_old_objects);
- assert(list_is_empty(pr->new_objects));
- list_free(pr->new_objects);
+ assert(list_is_empty(pr->large_overflow_objects));
+ list_free(pr->large_overflow_objects);
list_free(pr->young_weakrefs);
list_free(pr->old_weakrefs);
tree_free(pr->young_outside_nursery);
diff --git a/c8/test/test_gcpage.py b/c8/test/test_gcpage.py
--- a/c8/test/test_gcpage.py
+++ b/c8/test/test_gcpage.py
@@ -473,3 +473,27 @@
stm_major_collect()
assert stm_get_char(s) == '\0'
self.commit_transaction()
+
+
+ def test_overflow_on_ss_in_major_gc(self):
+ self.start_transaction()
+ o = stm_allocate_refs(100)
+ p = stm_allocate(16)
+ stm_set_ref(o, 0, p)
+ self.push_root(o)
+ stm_minor_collect()
+ o = self.pop_root()
+ p = stm_get_ref(o, 0)
+ assert stm_get_char(p) == '\0'
+ self.push_root(o)
+
+ self.switch(1)
+
+ self.start_transaction()
+ stm_major_collect()
+ self.commit_transaction()
+
+ self.switch(0)
+ # p not freed
+ assert stm_get_char(p) == '\0'
+ self.commit_transaction()
More information about the pypy-commit
mailing list