[pypy-commit] pypy quad-color-gc: Remove object layout hack, update qcgc codebase

ntruessel pypy.commits at gmail.com
Fri Sep 2 03:11:51 EDT 2016


Author: Nicolas Truessel <ntruessel at njsm.de>
Branch: quad-color-gc
Changeset: r86837:d98166b96e01
Date: 2016-09-02 09:11 +0200
http://bitbucket.org/pypy/pypy/changeset/d98166b96e01/

Log:	Remove object layout hack, update qcgc codebase

diff --git a/rpython/memory/gc/qcgc.py b/rpython/memory/gc/qcgc.py
--- a/rpython/memory/gc/qcgc.py
+++ b/rpython/memory/gc/qcgc.py
@@ -1,5 +1,5 @@
 from rpython.memory.gc.base import GCBase
-#from rpython.memory.support import mangle_hash
+from rpython.memory.support import mangle_hash
 from rpython.rtyper.lltypesystem import rffi, lltype, llgroup, llmemory, llarena
 from rpython.rtyper.lltypesystem.lloperation import llop
 from rpython.rlib.debug import ll_assert
@@ -19,22 +19,19 @@
     gcflag_extra = 0   # or a real GC flag that is always 0 when not collecting
 
     typeid_is_in_field = 'tid'
-    withhash_flag_is_in_field = 'flags', QCGC_HAS_HASH
 
     TRANSLATION_PARAMS = {}
     HDR = lltype.Struct(
             'header',
-            #('hdr', rffi.COpaque('object_t', hints={"is_qcgc_header": True})),
-            ('flags', lltype.Signed),   # XXX: exploits knowledge about object_t
+            ('hdr', rffi.COpaque('object_t', hints={"is_qcgc_header": True})),
             ('tid', lltype.Signed),
             ('hash', lltype.Signed))
     #HDR = rffi.COpaque('object_t')
 
-    def init_gc_object(self, addr, typeid, flags=0):
+    def init_gc_object(self, addr, typeid):
         hdr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR))
-        hdr.flags = rffi.cast(lltype.Signed, flags)
         hdr.tid = rffi.cast(lltype.Signed, typeid)
-        hdr.hash = rffi.cast(lltype.Signed, addr)
+        hdr.hash = rffi.cast(lltype.Signed, 0)
 
     def malloc_fixedsize_clear(self, typeid, size,
                                needs_finalizer=False,
@@ -63,15 +60,10 @@
         (obj + offset_to_length).signed[0] = length
         return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
 
-    def init_gc_object_immortal(self, addr, typeid, flags=0): # XXX: Prebuilt Objects?
+    def init_gc_object_immortal(self, addr, typeid, flags=0):
         assert flags == 0
-        ptr = self.gcheaderbuilder.object_from_header(addr.ptr)
-        prebuilt_hash = lltype.identityhash_nocache(ptr)
-        assert prebuilt_hash != 0
-        flags |= QCGC_PREBUILT_OBJECT
         #
-        self.init_gc_object(addr, typeid.index, flags)
-        llmemory.cast_adr_to_ptr(addr, lltype.Ptr(self.HDR)).hash = prebuilt_hash
+        self.init_gc_object(addr, typeid.index)
 
     def collect(self, gen=1):
         """Do a minor (gen=0) or major (gen>0) collection."""
@@ -84,11 +76,7 @@
         # this. Unfortunately I don't fully understand what this is supposed to
         # do, so I can't optimize it ATM.
         return False
-        # Possible implementation?
-        #llop.gc_writebarrier(dest_addr)
-        #return True
 
-    # XXX: WRITE BARRIER
     def write_barrier(self, addr_struct):
         llop.qcgc_write_barrier(lltype.Void, addr_struct)
 
@@ -97,14 +85,14 @@
         pass
 
     def id_or_identityhash(self, gcobj, is_hash):
-        hdr = self.header(llmemory.cast_ptr_to_adr(gcobj))
-        has_hash = (hdr.flags & QCGC_HAS_HASH)
+        obj = llmemory.cast_ptr_to_adr(gcobj)
+        hdr = self.header(obj)
         i = hdr.hash
         #
-        if is_hash:
-            if has_hash:
-                return i # Do not mangle for objects with built in hash
-            i = i ^ (i >> 5)
+        if i == 0:
+            i = llmemory.cast_adr_to_int(obj)
+            if is_hash:
+                i = mangle_hash(i)
         return i
 
     def id(self, gcobje):
diff --git a/rpython/memory/gctransform/qcgcframework.py b/rpython/memory/gctransform/qcgcframework.py
--- a/rpython/memory/gctransform/qcgcframework.py
+++ b/rpython/memory/gctransform/qcgcframework.py
@@ -36,16 +36,10 @@
 
     def gc_header_for(self, obj, needs_hash=False):
         hdr = self.gcdata.gc.gcheaderbuilder.header_of_object(obj)
-        withhash, flag = self.gcdata.gc.withhash_flag_is_in_field
-        x = getattr(hdr, withhash)
-        TYPE = lltype.typeOf(x)
-        x = lltype.cast_primitive(lltype.Signed, x)
         if needs_hash:
-            x |= flag       # set the flag in the header
+            hdr.hash = lltype.identityhash_nocache(obj._as_ptr())
         else:
-            x &= ~flag      # clear the flag in the header
-        x = lltype.cast_primitive(TYPE, x)
-        setattr(hdr, withhash, x)
+            assert hdr.hash == 0
         return hdr
 
     def push_roots(self, hop, keep_current_args=False):
diff --git a/rpython/translator/c/src/qcgc/allocator.c b/rpython/translator/c/src/qcgc/allocator.c
--- a/rpython/translator/c/src/qcgc/allocator.c
+++ b/rpython/translator/c/src/qcgc/allocator.c
@@ -11,6 +11,7 @@
 
 QCGC_STATIC void bump_allocator_assign(cell_t *ptr, size_t cells);
 QCGC_STATIC void bump_allocator_advance(size_t cells);
+QCGC_STATIC void bump_allocator_renew_block(void);
 
 QCGC_STATIC bool is_small(size_t cells);
 QCGC_STATIC size_t small_index(size_t cells);
@@ -26,6 +27,7 @@
 void qcgc_allocator_initialize(void) {
 	qcgc_allocator_state.arenas =
 		qcgc_arena_bag_create(QCGC_ARENA_BAG_INIT_SIZE);
+	qcgc_allocator_state.free_arenas = qcgc_arena_bag_create(4); // XXX
 
 	// Bump Allocator
 	qcgc_allocator_state.bump_state.bump_ptr = NULL;
@@ -59,10 +61,24 @@
 		qcgc_arena_destroy(qcgc_allocator_state.arenas->items[i]);
 	}
 
+	arena_count = qcgc_allocator_state.free_arenas->count;
+	for (size_t i = 0; i < arena_count; i++) {
+		qcgc_arena_destroy(qcgc_allocator_state.free_arenas->items[i]);
+	}
+
 	free(qcgc_allocator_state.arenas);
+	free(qcgc_allocator_state.free_arenas);
 }
 
 void qcgc_fit_allocator_add(cell_t *ptr, size_t cells) {
+#if CHECKED
+	if (cells > 0) {
+		assert((((object_t *)ptr)->flags & QCGC_PREBUILT_OBJECT) == 0);
+		assert((cell_t *) qcgc_arena_addr(ptr) != ptr);
+		assert(qcgc_arena_get_blocktype(ptr) == BLOCK_FREE ||
+				qcgc_arena_get_blocktype(ptr) == BLOCK_EXTENT);
+	}
+#endif
 	if (cells > 0) {
 		if (is_small(cells)) {
 			size_t index = small_index(cells);
@@ -80,30 +96,17 @@
 	}
 }
 
-QCGC_STATIC void bump_allocator_assign(cell_t *ptr, size_t cells) {
-	qcgc_allocator_state.bump_state.bump_ptr = ptr;
-	qcgc_allocator_state.bump_state.remaining_cells = cells;
-}
-
-QCGC_STATIC void bump_allocator_advance(size_t cells) {
-	qcgc_allocator_state.bump_state.bump_ptr += cells;
-	qcgc_allocator_state.bump_state.remaining_cells -= cells;
-}
-
 /*******************************************************************************
- * Allocators                                                                  *
+ * Bump Allocator                                                              *
  ******************************************************************************/
 
 object_t *qcgc_bump_allocate(size_t bytes) {
+#if CHECKED
+	assert(bytes <= 1<<QCGC_LARGE_ALLOC_THRESHOLD_EXP);
+#endif
 	size_t cells = bytes_to_cells(bytes);
 	if (cells > qcgc_allocator_state.bump_state.remaining_cells) {
-		// Grab a new arena
-		// FIXME: Add remaining memory to fit allocator
-		arena_t *arena = qcgc_arena_create();
-		bump_allocator_assign(&(arena->cells[QCGC_ARENA_FIRST_CELL_INDEX]),
-				QCGC_ARENA_CELLS_COUNT - QCGC_ARENA_FIRST_CELL_INDEX);
-		qcgc_allocator_state.arenas =
-			qcgc_arena_bag_add(qcgc_allocator_state.arenas, arena);
+		bump_allocator_renew_block();
 	}
 	cell_t *mem = qcgc_allocator_state.bump_state.bump_ptr;
 	bump_allocator_advance(cells);
@@ -116,9 +119,92 @@
 #endif
 
 	result->flags |= QCGC_GRAY_FLAG;
+#if CHECKED
+	assert(qcgc_arena_is_coalesced(qcgc_arena_addr((cell_t *)result)));
+	if (qcgc_allocator_state.bump_state.remaining_cells > 0) {
+		assert(qcgc_arena_get_blocktype(
+					qcgc_allocator_state.bump_state.bump_ptr) == BLOCK_FREE);
+		for (size_t i = 1; i < qcgc_allocator_state.bump_state.remaining_cells;
+				i++) {
+			assert(qcgc_arena_get_blocktype(
+						qcgc_allocator_state.bump_state.bump_ptr + i)
+					== BLOCK_EXTENT);
+		}
+	}
+#endif
 	return result;
 }
 
+QCGC_STATIC void bump_allocator_renew_block(void) {
+#if CHECKED
+	if (qcgc_allocator_state.bump_state.remaining_cells > 0) {
+		assert(qcgc_arena_get_blocktype(
+					qcgc_allocator_state.bump_state.bump_ptr) == BLOCK_FREE);
+		for (size_t i = 1; i < qcgc_allocator_state.bump_state.remaining_cells;
+				i++) {
+			assert(qcgc_arena_get_blocktype(
+						qcgc_allocator_state.bump_state.bump_ptr + i)
+					== BLOCK_EXTENT);
+		}
+	}
+#endif
+	// Add remaining memory to fit allocator
+	qcgc_fit_allocator_add(qcgc_allocator_state.bump_state.bump_ptr,
+			qcgc_allocator_state.bump_state.remaining_cells);
+
+	// Try finding some huge block from fit allocator
+	exp_free_list_t *free_list = qcgc_allocator_state.fit_state.
+		large_free_list[QCGC_LARGE_FREE_LISTS - 1];
+	while (free_list->count > 0 && !valid_block(free_list->items[0].ptr,
+				free_list->items[0].size)) {
+		free_list = qcgc_exp_free_list_remove_index(free_list, 0);
+	}
+
+	if (free_list->count > 0) {
+		// Assign huge block to bump allocator
+		bump_allocator_assign(free_list->items[0].ptr,
+				free_list->items[0].size);
+		free_list = qcgc_exp_free_list_remove_index(free_list, 0);
+	} else {
+		// Grab a new arena
+		arena_t *arena = qcgc_arena_create();
+		bump_allocator_assign(&(arena->cells[QCGC_ARENA_FIRST_CELL_INDEX]),
+				QCGC_ARENA_CELLS_COUNT - QCGC_ARENA_FIRST_CELL_INDEX);
+		qcgc_allocator_state.arenas =
+			qcgc_arena_bag_add(qcgc_allocator_state.arenas, arena);
+	}
+
+	qcgc_allocator_state.fit_state.
+		large_free_list[QCGC_LARGE_FREE_LISTS - 1] = free_list;
+#if CHECKED
+	assert(qcgc_allocator_state.bump_state.bump_ptr != NULL);
+	assert(qcgc_arena_get_blocktype(qcgc_allocator_state.bump_state.bump_ptr) ==
+			BLOCK_FREE);
+	for (size_t i = 1; i < qcgc_allocator_state.bump_state.remaining_cells;
+			i++) {
+		assert(qcgc_arena_get_blocktype(
+					qcgc_allocator_state.bump_state.bump_ptr + i)
+				== BLOCK_EXTENT);
+	}
+#endif
+}
+
+QCGC_STATIC void bump_allocator_assign(cell_t *ptr, size_t cells) {
+#if CHECKED
+	assert(qcgc_arena_get_blocktype(ptr) == BLOCK_FREE);
+	for (size_t i = 1; i < cells; i++) {
+		assert(qcgc_arena_get_blocktype(ptr + i) == BLOCK_EXTENT);
+	}
+#endif
+	qcgc_allocator_state.bump_state.bump_ptr = ptr;
+	qcgc_allocator_state.bump_state.remaining_cells = cells;
+}
+
+QCGC_STATIC void bump_allocator_advance(size_t cells) {
+	qcgc_allocator_state.bump_state.bump_ptr += cells;
+	qcgc_allocator_state.bump_state.remaining_cells -= cells;
+}
+
 object_t *qcgc_fit_allocate(size_t bytes) {
 	size_t cells = bytes_to_cells(bytes);
 	cell_t *mem;
@@ -291,7 +377,7 @@
 	cells = cells >> QCGC_LARGE_FREE_LIST_FIRST_EXP;
 
 	// calculates floor(log(cells))
-	return (8 * sizeof(unsigned long)) - __builtin_clzl(cells) - 1;
+	return MIN((8 * sizeof(unsigned long)) - __builtin_clzl(cells) - 1, QCGC_LARGE_FREE_LISTS - 1);
 }
 
 QCGC_STATIC size_t small_index_to_cells(size_t index) {
@@ -306,6 +392,7 @@
 	assert(ptr != NULL);
 	assert(cells > 0);
 #endif
-	return (qcgc_arena_get_blocktype(ptr) == BLOCK_FREE &&
-			qcgc_arena_get_blocktype(ptr + cells) != BLOCK_EXTENT);
+	return (qcgc_arena_get_blocktype(ptr) == BLOCK_FREE && (
+				((qcgc_arena_addr(ptr + cells)) == (arena_t *) (ptr + cells)) ||
+				qcgc_arena_get_blocktype(ptr + cells) != BLOCK_EXTENT));
 }
diff --git a/rpython/translator/c/src/qcgc/allocator.h b/rpython/translator/c/src/qcgc/allocator.h
--- a/rpython/translator/c/src/qcgc/allocator.h
+++ b/rpython/translator/c/src/qcgc/allocator.h
@@ -17,6 +17,7 @@
  *                +---+---+-----+----+
  * size (cells):  | 1 | 2 | ... | 31 |
  *                +---+---+-----+----+
+ * (31 is 2^QCGC_LARGE_FREE_LIST_FIRST_EXP - 1)
  *
  * Large free lists:
  *                        +-----+-----+-----+---------+
@@ -24,18 +25,21 @@
  *                        +-----+-----+-----+---------+
  * minimal size (cells):  | 2^5 | 2^6 | ... | 2^(x+5) |
  *                        +-----+-----+-----+---------+
+ * (5 is QCGC_LARGE_FREE_LIST_FIRST_EXP)
  *
- * where x is chosen such that x + 5 + 1 = QCGC_ARENA_SIZE_EXP - 4 (i.e. the
- * next bin would hold chunks that have the size of at least one arena size,
- * which is impossible as an arena contains overhead)
+ * where x is chosen such that 2^(x + 5) = 2^QCGC_LARGE_ALLOC_THRESHOLD_EXP
+ * (i.e. such that the last bin contains all blocks that are larger or equal
+ * than the threshold for huge blocks. These blocks can be returned to the
+ * bump allocator)
  */
-
-#define QCGC_LARGE_FREE_LISTS (QCGC_ARENA_SIZE_EXP - 4 - QCGC_LARGE_FREE_LIST_FIRST_EXP)
+#define QCGC_LARGE_FREE_LISTS (QCGC_LARGE_ALLOC_THRESHOLD_EXP - QCGC_LARGE_FREE_LIST_FIRST_EXP - 4 + 1)
+// -4 because of turning bytes into cells, +1 because we start to count at 0
 
 #define QCGC_SMALL_FREE_LISTS ((1<<QCGC_LARGE_FREE_LIST_FIRST_EXP) - 1)
 
 struct qcgc_allocator_state {
 	arena_bag_t *arenas;
+	arena_bag_t *free_arenas;
 	struct bump_state {
 		cell_t *bump_ptr;
 		size_t remaining_cells;
diff --git a/rpython/translator/c/src/qcgc/arena.c b/rpython/translator/c/src/qcgc/arena.c
--- a/rpython/translator/c/src/qcgc/arena.c
+++ b/rpython/translator/c/src/qcgc/arena.c
@@ -5,6 +5,10 @@
 #include <sys/mman.h>
 #include <unistd.h>
 
+#if DEBUG_ZERO_ON_SWEEP
+#include <string.h>
+#endif
+
 #include "allocator.h"
 #include "event_logger.h"
 
@@ -146,12 +150,27 @@
 void qcgc_arena_mark_allocated(cell_t *ptr, size_t cells) {
 	size_t index = qcgc_arena_cell_index(ptr);
 	arena_t *arena = qcgc_arena_addr(ptr);
+#if CHECKED
+	assert(get_blocktype(arena, index) == BLOCK_FREE);
+	for (size_t i = 1; i < cells; i++) {
+		assert(get_blocktype(arena, index + i) == BLOCK_EXTENT);
+	}
+#endif
 	set_blocktype(arena, index, BLOCK_WHITE);
 	size_t index_of_next_block = index + cells;
 	if (index_of_next_block < QCGC_ARENA_CELLS_COUNT &&
 			get_blocktype(arena, index_of_next_block) == BLOCK_EXTENT) {
 		set_blocktype(arena, index_of_next_block, BLOCK_FREE);
 	}
+#if CHECKED
+	assert(get_blocktype(arena, index) == BLOCK_WHITE);
+	for (size_t i = 1; i < cells; i++) {
+		assert(get_blocktype(arena, index + i) == BLOCK_EXTENT);
+	}
+	if (index_of_next_block < QCGC_ARENA_CELLS_COUNT) {
+		assert(get_blocktype(arena, index + cells) != BLOCK_EXTENT);
+	}
+#endif
 }
 
 void qcgc_arena_mark_free(cell_t *ptr) {
@@ -164,15 +183,35 @@
 	assert(arena != NULL);
 	assert(qcgc_arena_is_coalesced(arena));
 #endif
+#if DEBUG_ZERO_ON_SWEEP
+	bool zero = true;
+#endif
 	bool free = true;
 	bool coalesce = false;
 	bool add_to_free_list = false;
 	size_t last_free_cell = QCGC_ARENA_FIRST_CELL_INDEX;
+
+	if (qcgc_arena_addr(qcgc_allocator_state.bump_state.bump_ptr) == arena) {
+		for (size_t cell = QCGC_ARENA_FIRST_CELL_INDEX;
+				cell < QCGC_ARENA_CELLS_COUNT;
+				cell++) {
+			if (get_blocktype(arena, cell) == BLOCK_BLACK) {
+				set_blocktype(arena, cell, BLOCK_WHITE);
+			}
+		}
+		return false;
+	}
+
 	for (size_t cell = QCGC_ARENA_FIRST_CELL_INDEX;
 			cell < QCGC_ARENA_CELLS_COUNT;
 			cell++) {
-		switch (qcgc_arena_get_blocktype(arena->cells + cell)) {
+		switch (get_blocktype(arena, cell)) {
 			case BLOCK_EXTENT:
+#if DEBUG_ZERO_ON_SWEEP
+				if (zero) {
+					memset(&arena->cells[cell], 0, sizeof(cell_t));
+				}
+#endif
 				break;
 			case BLOCK_FREE:
 				if (coalesce) {
@@ -181,6 +220,10 @@
 					last_free_cell = cell;
 				}
 				coalesce = true;
+#if DEBUG_ZERO_ON_SWEEP
+				zero = true;
+				memset(&arena->cells[cell], 0, sizeof(cell_t));
+#endif
 				break;
 			case BLOCK_WHITE:
 				if (coalesce) {
@@ -191,25 +234,33 @@
 				}
 				coalesce = true;
 				add_to_free_list = true;
+#if DEBUG_ZERO_ON_SWEEP
+				zero = true;
+				memset(&arena->cells[cell], 0, sizeof(cell_t));
+#endif
 				break;
 			case BLOCK_BLACK:
 				set_blocktype(arena, cell, BLOCK_WHITE);
 				if (add_to_free_list) {
-					qcgc_fit_allocator_add(&(arena->cells[last_free_cell]),
+					qcgc_fit_allocator_add(arena->cells + last_free_cell,
 							cell - last_free_cell);
 				}
 				free = false;
 				coalesce = false;
 				add_to_free_list = false;
+#if DEBUG_ZERO_ON_SWEEP
+				zero = false;
+#endif
 				break;
 		}
 	}
 	if (add_to_free_list && !free) {
-		qcgc_fit_allocator_add(&(arena->cells[last_free_cell]),
+		qcgc_fit_allocator_add(arena->cells + last_free_cell,
 							QCGC_ARENA_CELLS_COUNT - last_free_cell);
 	}
 #if CHECKED
 	assert(qcgc_arena_is_coalesced(arena));
+	assert(free == qcgc_arena_is_empty(arena));
 #endif
 	return free;
 }
diff --git a/rpython/translator/c/src/qcgc/config.h b/rpython/translator/c/src/qcgc/config.h
--- a/rpython/translator/c/src/qcgc/config.h
+++ b/rpython/translator/c/src/qcgc/config.h
@@ -1,13 +1,15 @@
 #pragma once
 
-#define CHECKED 1							// Enable runtime sanity checks
+#define CHECKED 0							// Enable runtime sanity checks
+											// warning: huge performance impact
+#define DEBUG_ZERO_ON_SWEEP 0				// Zero memory on sweep (debug only)
 
 #define QCGC_INIT_ZERO 1					// Init new objects with zero bytes
 
 /**
  * Event logger
  */
-#define EVENT_LOG 1							// Enable event log
+#define EVENT_LOG 0							// Enable event log
 #define LOGFILE "./qcgc_events.log"			// Default logfile
 #define LOG_ALLOCATION 0					// Enable allocation log (warning:
 											// significant performance impact)
@@ -16,7 +18,7 @@
 											// shadow stack
 #define QCGC_ARENA_BAG_INIT_SIZE 16			// Initial size of the arena bag
 #define QCGC_ARENA_SIZE_EXP 20				// Between 16 (64kB) and 20 (1MB)
-#define QCGC_LARGE_ALLOC_THRESHOLD 1<<14
+#define QCGC_LARGE_ALLOC_THRESHOLD_EXP 14	// Less than QCGC_ARENA_SIZE_EXP
 #define QCGC_MARK_LIST_SEGMENT_SIZE 64		// TODO: Tune for performance
 #define QCGC_GRAY_STACK_INIT_SIZE 128		// TODO: Tune for performance
 #define QCGC_INC_MARK_MIN 64				// TODO: Tune for performance
@@ -32,8 +34,16 @@
  * DO NOT MODIFY BELOW HERE
  */
 
+#if QCGC_LARGE_ALLOC_THRESHOLD_EXP >= QCGC_ARENA_SIZE_EXP
+#error	"Inconsistent configuration. Huge block threshold must be smaller " \
+		"than the arena size."
+#endif
+
 #ifdef TESTING
 #define QCGC_STATIC
 #else
 #define QCGC_STATIC static
 #endif
+
+#define MAX(a,b) (((a)>(b))?(a):(b))
+#define MIN(a,b) (((a)<(b))?(a):(b))
diff --git a/rpython/translator/c/src/qcgc/qcgc.c b/rpython/translator/c/src/qcgc/qcgc.c
--- a/rpython/translator/c/src/qcgc/qcgc.c
+++ b/rpython/translator/c/src/qcgc/qcgc.c
@@ -10,10 +10,6 @@
 #include "hugeblocktable.h"
 #include "event_logger.h"
 
-// TODO: Eventually move to own header?
-#define MAX(a,b) (((a)>(b))?(a):(b))
-#define MIN(a,b) (((a)<(b))?(a):(b))
-
 void qcgc_mark(bool incremental);
 void qcgc_pop_object(object_t *object);
 void qcgc_push_object(object_t *object);
@@ -117,7 +113,7 @@
 			(uint8_t *) &size);
 #endif
 	object_t *result;
-	if (size <= QCGC_LARGE_ALLOC_THRESHOLD) {
+	if (size <= 1<<QCGC_LARGE_ALLOC_THRESHOLD_EXP) {
 		// Use bump / fit allocator
 		if (true) { // FIXME: Implement reasonable switch
 			result = qcgc_bump_allocate(size);
@@ -138,9 +134,6 @@
 	qcgc_event_logger_log(EVENT_ALLOCATE_DONE, sizeof(object_t *),
 			(uint8_t *) &result);
 #endif
-#if CHECKED
-	assert(qcgc_state.phase != GC_COLLECT);
-#endif
 	return result;
 }
 
@@ -167,9 +160,7 @@
 			return MARK_COLOR_BLACK;
 		}
 	} else {
-#if CHECKED
-		assert(false);
-#endif
+		return MARK_COLOR_INVALID;
 	}
 }
 
@@ -210,8 +201,8 @@
 
 		while (to_process > 0) {
 			object_t *top = qcgc_gray_stack_top(qcgc_state.gp_gray_stack);
-			qcgc_state.gp_gray_stack =
-				qcgc_gray_stack_pop(qcgc_state.gp_gray_stack);
+			qcgc_state.gp_gray_stack = qcgc_gray_stack_pop(
+					qcgc_state.gp_gray_stack);
 			qcgc_pop_object(top);
 			to_process--;
 		}
@@ -225,10 +216,8 @@
 					(arena->gray_stack->index));
 
 			while (to_process > 0) {
-				object_t *top =
-					qcgc_gray_stack_top(arena->gray_stack);
-				arena->gray_stack =
-					qcgc_gray_stack_pop(arena->gray_stack);
+				object_t *top = qcgc_gray_stack_top(arena->gray_stack);
+				arena->gray_stack = qcgc_gray_stack_pop(arena->gray_stack);
 				qcgc_pop_object(top);
 				to_process--;
 			}
@@ -247,6 +236,7 @@
 	qcgc_event_logger_log(EVENT_MARK_DONE, 0, NULL);
 #if CHECKED
 	assert(incremental || (qcgc_state.phase = GC_COLLECT));
+	assert(qcgc_state.phase != GC_PAUSE);
 #endif
 }
 
@@ -319,8 +309,22 @@
 			(uint8_t *) &arena_count);
 
 	qcgc_hbtable_sweep();
-	for (size_t i = 0; i < qcgc_allocator_state.arenas->count; i++) {
-		qcgc_arena_sweep(qcgc_allocator_state.arenas->items[i]);
+	size_t i = 0;
+	while (i < qcgc_allocator_state.arenas->count) {
+		arena_t *arena = qcgc_allocator_state.arenas->items[i];
+		// The arena that contains the bump pointer is autmatically skipped
+		if (qcgc_arena_sweep(arena)) {
+			// Free
+			qcgc_allocator_state.arenas = qcgc_arena_bag_remove_index(
+					qcgc_allocator_state.arenas, i);
+			qcgc_allocator_state.free_arenas = qcgc_arena_bag_add(
+					qcgc_allocator_state.free_arenas, arena);
+
+			// NO i++
+		} else {
+			// Not free
+			i++;
+		}
 	}
 	qcgc_state.phase = GC_PAUSE;
 
diff --git a/rpython/translator/c/src/qcgc/qcgc.h b/rpython/translator/c/src/qcgc/qcgc.h
--- a/rpython/translator/c/src/qcgc/qcgc.h
+++ b/rpython/translator/c/src/qcgc/qcgc.h
@@ -27,6 +27,7 @@
 	MARK_COLOR_LIGHT_GRAY,
 	MARK_COLOR_DARK_GRAY,
 	MARK_COLOR_BLACK,
+	MARK_COLOR_INVALID,
 } mark_color_t;
 
 /**


More information about the pypy-commit mailing list