[pypy-commit] stmgc default: Starting on allocating big objects (larger than 36 words)
arigo
noreply at buildbot.pypy.org
Wed Jun 26 18:54:57 CEST 2013
Author: Armin Rigo <arigo at tunes.org>
Branch:
Changeset: r290:a49bf0eadb2d
Date: 2013-06-26 18:54 +0200
http://bitbucket.org/pypy/stmgc/changeset/a49bf0eadb2d/
Log: Starting on allocating big objects (larger than 36 words)
diff --git a/c4/dbgmem.c b/c4/dbgmem.c
--- a/c4/dbgmem.c
+++ b/c4/dbgmem.c
@@ -60,7 +60,8 @@
for (i = 0; i < nb_pages; i++)
accessible_pages[base + i] = 42;
- dprintf(("stm_malloc(%ld): %p\n", (long)sz, result));
+ dprintf(("stm_malloc(%zu): %p\n", sz, result));
+ assert(((intptr_t)(result + sz) & (PAGE_SIZE-1)) == 0);
return result;
}
diff --git a/c4/gcpage.c b/c4/gcpage.c
--- a/c4/gcpage.c
+++ b/c4/gcpage.c
@@ -88,25 +88,39 @@
revision_t next, target;
restart:
next = ACCESS_ONCE(countdown_next_major_coll);
- if (next >= size)
+ if (next >= size) {
target = next - size;
- else
+ }
+ else {
+ /* we cannot do right now a major collection, but we can speed up
+ the time of the next minor collection (which will be followed
+ by a major collection) */
target = 0;
+ stmgc_minor_collect_soon();
+ }
if (!bool_cas(&countdown_next_major_coll, next, target))
goto restart;
}
-static gcptr allocate_new_page(int size_class)
+static char *alloc_tracked_memory(size_t size)
{
/* Adjust the threshold; the caller is responsible for detecting the
condition that the threshold reached 0. */
- stmgcpage_reduce_threshold(GC_PAGE_SIZE);
+ stmgcpage_reduce_threshold(size);
+ char *result = stm_malloc(size);
+ if (!result) {
+ stm_fatalerror("alloc_tracked_memory: out of memory "
+ "allocating %zu bytes\n", size);
+ }
+ return result;
+}
+
+static gcptr allocate_new_page(int size_class)
+{
/* Allocate and return a new page for the given size_class. */
- page_header_t *page = (page_header_t *)stm_malloc(GC_PAGE_SIZE);
- if (!page) {
- stm_fatalerror("allocate_new_page: out of memory!\n");
- }
+ page_header_t *page = (page_header_t *)alloc_tracked_memory(GC_PAGE_SIZE);
+
struct tx_public_descriptor *gcp = LOCAL_GCPAGES();
gcp->count_pages++;
count_global_pages++;
@@ -155,11 +169,13 @@
}
gcp->free_loc_for_size[size_class] = (gcptr)result->h_revision;
//stm_dbgmem_used_again(result, size_class * WORD, 0);
- dprintf(("stmgcpage_malloc(%ld): %p\n", (long)size, result));
+ dprintf(("stmgcpage_malloc(%zu): %p\n", size, result));
return result;
}
else {
- stm_fatalerror("XXX stmgcpage_malloc: too big!\n");
+ gcptr result = (gcptr)alloc_tracked_memory(size);
+ dprintf(("stmgcpage_malloc(BIG %zu): %p\n", size, result));
+ return result;
}
}
@@ -183,7 +199,7 @@
//stm_dbgmem_not_used(obj, size_class * WORD, 0);
}
else {
- stm_fatalerror("XXX stmgcpage_free: too big!\n");
+ stm_free(obj, stmgc_size(obj));
}
}
diff --git a/c4/nursery.c b/c4/nursery.c
--- a/c4/nursery.c
+++ b/c4/nursery.c
@@ -44,6 +44,12 @@
gcptrlist_delete(&d->public_with_young_copy);
}
+void stmgc_minor_collect_soon(void)
+{
+ struct tx_descriptor *d = thread_descriptor;
+ d->nursery_current = d->nursery_end;
+}
+
static char *collect_and_allocate_size(size_t size); /* forward */
inline static char *allocate_nursery(size_t size, int can_collect)
diff --git a/c4/nursery.h b/c4/nursery.h
--- a/c4/nursery.h
+++ b/c4/nursery.h
@@ -47,5 +47,6 @@
gcptr stmgc_duplicate_old(gcptr);
size_t stmgc_size(gcptr);
void stmgc_trace(gcptr, void visit(gcptr *));
+void stmgc_minor_collect_soon(void);
#endif
diff --git a/c4/test/test_gcpage.py b/c4/test/test_gcpage.py
--- a/c4/test/test_gcpage.py
+++ b/c4/test/test_gcpage.py
@@ -413,3 +413,14 @@
lib.stm_pop_root()
p1b = lib.stm_read_barrier(p1)
check_not_free(p1b)
+
+def test_big_old_object():
+ p1 = oalloc(HDR + 50 * WORD)
+ # assert did not crash
+
+def test_big_old_object_free():
+ p1 = oalloc(HDR + 50 * WORD)
+ p1b = lib.stm_write_barrier(p1)
+ assert p1b == p1
+ lib.stm_commit_transaction()
+ lib.stm_begin_inevitable_transaction()
More information about the pypy-commit
mailing list