[pypy-svn] r77543 - in pypy/branch/fast-forward: . lib_pypy/_ctypes pypy/jit/backend pypy/jit/backend/llgraph pypy/jit/backend/llsupport pypy/jit/backend/llsupport/test pypy/jit/backend/test pypy/jit/backend/x86 pypy/jit/metainterp pypy/jit/tl pypy/module/imp pypy/module/imp/test pypy/module/test_lib_pypy/ctypes_tests pypy/rpython/lltypesystem pypy/rpython/memory pypy/rpython/memory/gc pypy/rpython/memory/gc/test pypy/rpython/memory/gctransform pypy/rpython/memory/test pypy/translator pypy/translator/goal

afa at codespeak.net afa at codespeak.net
Fri Oct 1 22:53:06 CEST 2010


Author: afa
Date: Fri Oct  1 22:53:02 2010
New Revision: 77543

Added:
   pypy/branch/fast-forward/pypy/jit/backend/conftest.py
      - copied unchanged from r77542, pypy/trunk/pypy/jit/backend/conftest.py
   pypy/branch/fast-forward/pypy/jit/tl/jittest.py
      - copied unchanged from r77542, pypy/trunk/pypy/jit/tl/jittest.py
Removed:
   pypy/branch/fast-forward/pypy/jit/backend/test/conftest.py
Modified:
   pypy/branch/fast-forward/   (props changed)
   pypy/branch/fast-forward/lib_pypy/_ctypes/function.py
   pypy/branch/fast-forward/pypy/jit/backend/llgraph/llimpl.py
   pypy/branch/fast-forward/pypy/jit/backend/llsupport/gc.py
   pypy/branch/fast-forward/pypy/jit/backend/llsupport/test/test_gc.py
   pypy/branch/fast-forward/pypy/jit/backend/test/runner_test.py
   pypy/branch/fast-forward/pypy/jit/backend/test/test_ll_random.py
   pypy/branch/fast-forward/pypy/jit/backend/test/test_random.py
   pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py
   pypy/branch/fast-forward/pypy/jit/backend/x86/regalloc.py
   pypy/branch/fast-forward/pypy/jit/backend/x86/runner.py
   pypy/branch/fast-forward/pypy/jit/metainterp/executor.py
   pypy/branch/fast-forward/pypy/jit/metainterp/resoperation.py
   pypy/branch/fast-forward/pypy/jit/metainterp/warmspot.py
   pypy/branch/fast-forward/pypy/module/imp/importing.py
   pypy/branch/fast-forward/pypy/module/imp/test/test_import.py
   pypy/branch/fast-forward/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py
   pypy/branch/fast-forward/pypy/rpython/lltypesystem/llarena.py
   pypy/branch/fast-forward/pypy/rpython/memory/gc/base.py
   pypy/branch/fast-forward/pypy/rpython/memory/gc/generation.py
   pypy/branch/fast-forward/pypy/rpython/memory/gc/markcompact.py
   pypy/branch/fast-forward/pypy/rpython/memory/gc/marksweep.py
   pypy/branch/fast-forward/pypy/rpython/memory/gc/minimark.py
   pypy/branch/fast-forward/pypy/rpython/memory/gc/minimarkpage.py
   pypy/branch/fast-forward/pypy/rpython/memory/gc/semispace.py
   pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_direct.py
   pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_minimarkpage.py
   pypy/branch/fast-forward/pypy/rpython/memory/gctransform/framework.py
   pypy/branch/fast-forward/pypy/rpython/memory/gcwrapper.py
   pypy/branch/fast-forward/pypy/rpython/memory/test/test_transformed_gc.py
   pypy/branch/fast-forward/pypy/translator/driver.py
   pypy/branch/fast-forward/pypy/translator/goal/translate.py
Log:
Merge from trunk


Modified: pypy/branch/fast-forward/lib_pypy/_ctypes/function.py
==============================================================================
--- pypy/branch/fast-forward/lib_pypy/_ctypes/function.py	(original)
+++ pypy/branch/fast-forward/lib_pypy/_ctypes/function.py	Fri Oct  1 22:53:02 2010
@@ -171,7 +171,7 @@
         return self._build_result(restype, resbuffer, argtypes, argsandobjs)
 
     def _getfuncptr(self, argtypes, restype, thisarg=None):
-        if self._ptr is not None:
+        if self._ptr is not None and argtypes is self._argtypes_:
             return self._ptr
         if restype is None or not isinstance(restype, _CDataMeta):
             import ctypes

Modified: pypy/branch/fast-forward/pypy/jit/backend/llgraph/llimpl.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/llgraph/llimpl.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/llgraph/llimpl.py	Fri Oct  1 22:53:02 2010
@@ -129,7 +129,7 @@
     'arraylen_gc'     : (('ref',), 'int'),
     'call'            : (('ref', 'varargs'), 'intorptr'),
     'call_assembler'  : (('varargs',), 'intorptr'),
-    'cond_call_gc_wb' : (('ptr',), None),
+    'cond_call_gc_wb' : (('ptr', 'ptr'), None),
     'oosend'          : (('varargs',), 'intorptr'),
     'oosend_pure'     : (('varargs',), 'intorptr'),
     'guard_true'      : (('bool',), None),
@@ -810,7 +810,7 @@
                  FLOAT: 0.0}
             return d[calldescr.typeinfo]
 
-    def op_cond_call_gc_wb(self, descr, a):
+    def op_cond_call_gc_wb(self, descr, a, b):
         py.test.skip("cond_call_gc_wb not supported")
 
     def op_oosend(self, descr, obj, *args):

Modified: pypy/branch/fast-forward/pypy/jit/backend/llsupport/gc.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/llsupport/gc.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/llsupport/gc.py	Fri Oct  1 22:53:02 2010
@@ -404,7 +404,7 @@
         self.GC_MALLOC_BASIC = lltype.Ptr(lltype.FuncType(
             [lltype.Signed, lltype.Signed], llmemory.GCREF))
         self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType(
-            [llmemory.Address], lltype.Void))
+            [llmemory.Address, llmemory.Address], lltype.Void))
         self.write_barrier_descr = WriteBarrierDescr(self)
         #
         def malloc_array(itemsize, tid, num_elem):
@@ -550,7 +550,8 @@
             # the GC, and call it immediately
             llop1 = self.llop1
             funcptr = llop1.get_write_barrier_failing_case(self.WB_FUNCPTR)
-            funcptr(llmemory.cast_ptr_to_adr(gcref_struct))
+            funcptr(llmemory.cast_ptr_to_adr(gcref_struct),
+                    llmemory.cast_ptr_to_adr(gcref_newptr))
 
     def rewrite_assembler(self, cpu, operations):
         # Perform two kinds of rewrites in parallel:
@@ -589,7 +590,7 @@
                 v = op.getarg(1)
                 if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and
                                              bool(v.value)): # store a non-NULL
-                    self._gen_write_barrier(newops, op.getarg(0))
+                    self._gen_write_barrier(newops, op.getarg(0), v)
                     op = op.copy_and_change(rop.SETFIELD_RAW)
             # ---------- write barrier for SETARRAYITEM_GC ----------
             if op.getopnum() == rop.SETARRAYITEM_GC:
@@ -598,15 +599,15 @@
                                              bool(v.value)): # store a non-NULL
                     # XXX detect when we should produce a
                     # write_barrier_from_array
-                    self._gen_write_barrier(newops, op.getarg(0))
+                    self._gen_write_barrier(newops, op.getarg(0), v)
                     op = op.copy_and_change(rop.SETARRAYITEM_RAW)
             # ----------
             newops.append(op)
         del operations[:]
         operations.extend(newops)
 
-    def _gen_write_barrier(self, newops, v_base):
-        args = [v_base]
+    def _gen_write_barrier(self, newops, v_base, v_value):
+        args = [v_base, v_value]
         newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None,
                                    descr=self.write_barrier_descr))
 

Modified: pypy/branch/fast-forward/pypy/jit/backend/llsupport/test/test_gc.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/llsupport/test/test_gc.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/llsupport/test/test_gc.py	Fri Oct  1 22:53:02 2010
@@ -141,8 +141,8 @@
                             repr(offset_to_length), p))
         return p
 
-    def _write_barrier_failing_case(self, adr_struct):
-        self.record.append(('barrier', adr_struct))
+    def _write_barrier_failing_case(self, adr_struct, adr_newptr):
+        self.record.append(('barrier', adr_struct, adr_newptr))
 
     def get_write_barrier_failing_case(self, FPTRTYPE):
         return llhelper(FPTRTYPE, self._write_barrier_failing_case)
@@ -239,6 +239,7 @@
         s_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, s)
         r_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, r)
         s_adr = llmemory.cast_ptr_to_adr(s)
+        r_adr = llmemory.cast_ptr_to_adr(r)
         #
         s_hdr.tid &= ~gc_ll_descr.GCClass.JIT_WB_IF_FLAG
         gc_ll_descr.do_write_barrier(s_gcref, r_gcref)
@@ -246,7 +247,7 @@
         #
         s_hdr.tid |= gc_ll_descr.GCClass.JIT_WB_IF_FLAG
         gc_ll_descr.do_write_barrier(s_gcref, r_gcref)
-        assert self.llop1.record == [('barrier', s_adr)]
+        assert self.llop1.record == [('barrier', s_adr, r_adr)]
 
     def test_gen_write_barrier(self):
         gc_ll_descr = self.gc_ll_descr
@@ -254,11 +255,13 @@
         #
         newops = []
         v_base = BoxPtr()
-        gc_ll_descr._gen_write_barrier(newops, v_base)
+        v_value = BoxPtr()
+        gc_ll_descr._gen_write_barrier(newops, v_base, v_value)
         assert llop1.record == []
         assert len(newops) == 1
         assert newops[0].getopnum() == rop.COND_CALL_GC_WB
         assert newops[0].getarg(0) == v_base
+        assert newops[0].getarg(1) == v_value
         assert newops[0].result is None
         wbdescr = newops[0].getdescr()
         assert isinstance(wbdescr.jit_wb_if_flag, int)
@@ -358,6 +361,7 @@
         #
         assert operations[0].getopnum() == rop.COND_CALL_GC_WB
         assert operations[0].getarg(0) == v_base
+        assert operations[0].getarg(1) == v_value
         assert operations[0].result is None
         #
         assert operations[1].getopnum() == rop.SETFIELD_RAW
@@ -381,6 +385,7 @@
         #
         assert operations[0].getopnum() == rop.COND_CALL_GC_WB
         assert operations[0].getarg(0) == v_base
+        assert operations[0].getarg(1) == v_value
         assert operations[0].result is None
         #
         assert operations[1].getopnum() == rop.SETARRAYITEM_RAW

Modified: pypy/branch/fast-forward/pypy/jit/backend/test/runner_test.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/test/runner_test.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/test/runner_test.py	Fri Oct  1 22:53:02 2010
@@ -1427,12 +1427,12 @@
         assert not excvalue
 
     def test_cond_call_gc_wb(self):
-        def func_void(a):
-            record.append(a)
+        def func_void(a, b):
+            record.append((a, b))
         record = []
         #
         S = lltype.GcStruct('S', ('tid', lltype.Signed))
-        FUNC = self.FuncType([lltype.Ptr(S)], lltype.Void)
+        FUNC = self.FuncType([lltype.Ptr(S), lltype.Signed], lltype.Void)
         func_ptr = llhelper(lltype.Ptr(FUNC), func_void)
         funcbox = self.get_funcbox(self.cpu, func_ptr)
         class WriteBarrierDescr(AbstractDescr):
@@ -1453,10 +1453,10 @@
             sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s)
             del record[:]
             self.execute_operation(rop.COND_CALL_GC_WB,
-                                   [BoxPtr(sgcref)],
+                                   [BoxPtr(sgcref), ConstInt(-2121)],
                                    'void', descr=WriteBarrierDescr())
             if cond:
-                assert record == [s]
+                assert record == [(s, -2121)]
             else:
                 assert record == []
 

Modified: pypy/branch/fast-forward/pypy/jit/backend/test/test_ll_random.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/test/test_ll_random.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/test/test_ll_random.py	Fri Oct  1 22:53:02 2010
@@ -386,6 +386,20 @@
         v_string = self.get_string(builder, r)
         builder.do(self.opnum, [v_string])
 
+class AbstractCopyContentOperation(AbstractStringOperation):
+    def produce_into(self, builder, r):
+        v_srcstring = self.get_string(builder, r)
+        v_dststring = self.get_string(builder, r)
+        if v_srcstring.value == v_dststring.value:    # because it's not a
+            raise test_random.CannotProduceOperation  # memmove(), but memcpy()
+        srclen = len(v_srcstring.getref(self.ptr).chars)
+        dstlen = len(v_dststring.getref(self.ptr).chars)
+        v_length = builder.get_index(min(srclen, dstlen), r)
+        v_srcstart = builder.get_index(srclen - v_length.value + 1, r)
+        v_dststart = builder.get_index(dstlen - v_length.value + 1, r)
+        builder.do(self.opnum, [v_srcstring, v_dststring,
+                                v_srcstart, v_dststart, v_length])
+
 class StrGetItemOperation(AbstractGetItemOperation, _StrOperation):
     pass
 
@@ -404,6 +418,13 @@
 class UnicodeLenOperation(AbstractStringLenOperation, _UnicodeOperation):
     pass
 
+class CopyStrContentOperation(AbstractCopyContentOperation, _StrOperation):
+    pass
+
+class CopyUnicodeContentOperation(AbstractCopyContentOperation,
+                                  _UnicodeOperation):
+    pass
+
 
 # there are five options in total:
 # 1. non raising call and guard_no_exception
@@ -577,6 +598,8 @@
     OPERATIONS.append(UnicodeSetItemOperation(rop.UNICODESETITEM))
     OPERATIONS.append(StrLenOperation(rop.STRLEN))
     OPERATIONS.append(UnicodeLenOperation(rop.UNICODELEN))
+    OPERATIONS.append(CopyStrContentOperation(rop.COPYSTRCONTENT))
+    #OPERATIONS.append(CopyUnicodeContentOperation(rop.COPYUNICODECONTENT))
 
 for i in range(2):
     OPERATIONS.append(GuardClassOperation(rop.GUARD_CLASS))

Modified: pypy/branch/fast-forward/pypy/jit/backend/test/test_random.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/test/test_random.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/test/test_random.py	Fri Oct  1 22:53:02 2010
@@ -1,7 +1,7 @@
 import py, sys
 from pypy.rlib.rarithmetic import intmask, LONG_BIT
 from pypy.rpython.lltypesystem import llmemory
-from pypy.jit.backend.test import conftest as demo_conftest
+from pypy.jit.backend import conftest as demo_conftest
 from pypy.jit.metainterp.history import BasicFailDescr, TreeLoop
 from pypy.jit.metainterp.history import BoxInt, ConstInt, LoopToken
 from pypy.jit.metainterp.history import BoxPtr, ConstPtr
@@ -102,7 +102,7 @@
             elif isinstance(v, ConstFloat):
                 args.append('ConstFloat(%r)' % v.value)
             elif isinstance(v, ConstInt):
-                args.append('ConstInt(%d)' % v.value)
+                args.append('ConstInt(%s)' % v.value)
             else:
                 raise NotImplementedError(v)
         if op.getdescr() is None:
@@ -113,7 +113,7 @@
             except AttributeError:
                 descrstr = ', descr=...'
         print >>s, '        ResOperation(rop.%s, [%s], %s%s),' % (
-            opname[op.opnum], ', '.join(args), names[op.result], descrstr)
+            opname[op.getopnum()], ', '.join(args), names[op.result], descrstr)
         #if getattr(op, 'suboperations', None) is not None:
         #    subops.append(op)
 
@@ -189,7 +189,7 @@
                                                                        v.value)
         print >>s, '    op = cpu.execute_token(looptoken)'
         if self.should_fail_by is None:
-            fail_args = self.loop.operations[-1].args
+            fail_args = self.loop.operations[-1].getarglist()
         else:
             fail_args = self.should_fail_by.getfailargs()
         for i, v in enumerate(fail_args):

Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py	Fri Oct  1 22:53:02 2010
@@ -1780,11 +1780,12 @@
                     self.mc.PUSH_i32(loc.getint())
         
         if IS_X86_64:
-            # We clobber this register to pass the arguments, but that's
+            # We clobber these registers to pass the arguments, but that's
             # okay, because consider_cond_call_gc_wb makes sure that any
             # caller-save registers with values in them are present in arglocs,
             # so they are saved on the stack above and restored below 
             self.mc.MOV_rs(edi.value, 0)
+            self.mc.MOV_rs(esi.value, 8)
 
         # misaligned stack in the call, but it's ok because the write barrier
         # is not going to call anything more.  Also, this assumes that the

Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/regalloc.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/x86/regalloc.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/x86/regalloc.py	Fri Oct  1 22:53:02 2010
@@ -696,9 +696,13 @@
     def consider_cond_call_gc_wb(self, op):
         assert op.result is None
         args = op.getarglist()
+        loc_newvalue = self.rm.make_sure_var_in_reg(op.getarg(1), args)
+        # ^^^ we force loc_newvalue in a reg (unless it's a Const),
+        # because it will be needed anyway by the following setfield_gc.
+        # It avoids loading it twice from the memory.
         loc_base = self.rm.make_sure_var_in_reg(op.getarg(0), args,
                                                 imm_fine=False)
-        arglocs = [loc_base]
+        arglocs = [loc_base, loc_newvalue]
         # add eax, ecx and edx as extra "arguments" to ensure they are
         # saved and restored.  Fish in self.rm to know which of these
         # registers really need to be saved (a bit of a hack).  Moreover,
@@ -959,18 +963,23 @@
         args = op.getarglist()
         base_loc = self.rm.make_sure_var_in_reg(args[0], args)
         ofs_loc = self.rm.make_sure_var_in_reg(args[2], args)
+        assert args[0] is not args[1]    # forbidden case of aliasing
         self.rm.possibly_free_var(args[0])
-        self.rm.possibly_free_var(args[2])
+        if args[3] is not args[2] is not args[4]:  # MESS MESS MESS: don't free
+            self.rm.possibly_free_var(args[2])     # it if ==args[3] or args[4]
         srcaddr_box = TempBox()
-        srcaddr_loc = self.rm.force_allocate_reg(srcaddr_box)
+        forbidden_vars = [args[1], args[3], args[4], srcaddr_box]
+        srcaddr_loc = self.rm.force_allocate_reg(srcaddr_box, forbidden_vars)
         self._gen_address_inside_string(base_loc, ofs_loc, srcaddr_loc)
         # compute the destination address
-        base_loc = self.rm.make_sure_var_in_reg(args[1], args)
-        ofs_loc = self.rm.make_sure_var_in_reg(args[3], args)
+        base_loc = self.rm.make_sure_var_in_reg(args[1], forbidden_vars)
+        ofs_loc = self.rm.make_sure_var_in_reg(args[3], forbidden_vars)
         self.rm.possibly_free_var(args[1])
-        self.rm.possibly_free_var(args[3])
+        if args[3] is not args[4]:     # more of the MESS described above
+            self.rm.possibly_free_var(args[3])
+        forbidden_vars = [args[4], srcaddr_box]
         dstaddr_box = TempBox()
-        dstaddr_loc = self.rm.force_allocate_reg(dstaddr_box)
+        dstaddr_loc = self.rm.force_allocate_reg(dstaddr_box, forbidden_vars)
         self._gen_address_inside_string(base_loc, ofs_loc, dstaddr_loc)
         # call memcpy()
         length_loc = self.loc(args[4])

Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/runner.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/x86/runner.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/x86/runner.py	Fri Oct  1 22:53:02 2010
@@ -87,7 +87,9 @@
 
     def execute_token(self, executable_token):
         addr = executable_token._x86_bootstrap_code
+        #llop.debug_print(lltype.Void, ">>>> Entering", addr)
         func = rffi.cast(lltype.Ptr(self.BOOTSTRAP_TP), addr)
+        #llop.debug_print(lltype.Void, "<<<< Back")
         fail_index = self._execute_call(func)
         return self.get_fail_descr_from_number(fail_index)
 
@@ -99,10 +101,7 @@
             LLInterpreter.current_interpreter = self.debug_ll_interpreter
         res = 0
         try:
-            #llop.debug_print(lltype.Void, ">>>> Entering",
-            #                 rffi.cast(lltype.Signed, func))
             res = func()
-            #llop.debug_print(lltype.Void, "<<<< Back")
         finally:
             if not self.translate_support_code:
                 LLInterpreter.current_interpreter = prev_interpreter

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/executor.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/executor.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/executor.py	Fri Oct  1 22:53:02 2010
@@ -205,8 +205,8 @@
 
 def do_copystrcontent(cpu, _, srcbox, dstbox,
                       srcstartbox, dststartbox, lengthbox):
-    src = srcbox.getptr(lltype.Ptr(rstr.STR))
-    dst = dstbox.getptr(lltype.Ptr(rstr.STR))
+    src = srcbox.getref(lltype.Ptr(rstr.STR))
+    dst = dstbox.getref(lltype.Ptr(rstr.STR))
     srcstart = srcstartbox.getint()
     dststart = dststartbox.getint()
     length = lengthbox.getint()
@@ -214,8 +214,8 @@
 
 def do_copyunicodecontent(cpu, _, srcbox, dstbox,
                           srcstartbox, dststartbox, lengthbox):
-    src = srcbox.getptr(lltype.Ptr(rstr.UNICODE))
-    dst = dstbox.getptr(lltype.Ptr(rstr.UNICODE))
+    src = srcbox.getref(lltype.Ptr(rstr.UNICODE))
+    dst = dstbox.getref(lltype.Ptr(rstr.UNICODE))
     srcstart = srcstartbox.getint()
     dststart = dststartbox.getint()
     length = lengthbox.getint()
@@ -428,6 +428,10 @@
         if arity == 3:
             func = get_execute_funclist(3, False)[opnum]
             return func(cpu, metainterp, argboxes[0], argboxes[1], argboxes[2])
+        if arity == 5:    # copystrcontent, copyunicodecontent
+            func = get_execute_funclist(5, False)[opnum]
+            return func(cpu, metainterp, argboxes[0], argboxes[1],
+                        argboxes[2], argboxes[3], argboxes[4])
     raise NotImplementedError
 
 

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/resoperation.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/resoperation.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/resoperation.py	Fri Oct  1 22:53:02 2010
@@ -455,7 +455,7 @@
     'UNICODESETITEM/3',
     'NEWUNICODE/1',
     #'RUNTIMENEW/1',     # ootype operation    
-    'COND_CALL_GC_WB/1d',  # [objptr]   (for the write barrier)
+    'COND_CALL_GC_WB/2d', # [objptr, newvalue]   (for the write barrier)
     'DEBUG_MERGE_POINT/1',      # debugging only
     'VIRTUAL_REF_FINISH/2',   # removed before it's passed to the backend
     'COPYSTRCONTENT/5',       # src, dst, srcstart, dststart, length

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/warmspot.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/warmspot.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/warmspot.py	Fri Oct  1 22:53:02 2010
@@ -67,9 +67,16 @@
 def jittify_and_run(interp, graph, args, repeat=1,
                     backendopt=False, trace_limit=sys.maxint,
                     debug_level=DEBUG_STEPS, inline=False, **kwds):
+    from pypy.config.config import ConfigError
     translator = interp.typer.annotator.translator
-    translator.config.translation.gc = "boehm"
-    translator.config.translation.list_comprehension_operations = True
+    try:
+        translator.config.translation.gc = "boehm"
+    except ConfigError:
+        pass
+    try:
+        translator.config.translation.list_comprehension_operations = True
+    except ConfigError:
+        pass
     warmrunnerdesc = WarmRunnerDesc(translator, backendopt=backendopt, **kwds)
     for jd in warmrunnerdesc.jitdrivers_sd:
         jd.warmstate.set_param_threshold(3)          # for tests

Modified: pypy/branch/fast-forward/pypy/module/imp/importing.py
==============================================================================
--- pypy/branch/fast-forward/pypy/module/imp/importing.py	(original)
+++ pypy/branch/fast-forward/pypy/module/imp/importing.py	Fri Oct  1 22:53:02 2010
@@ -122,7 +122,7 @@
                     n = len(ctxt_name_prefix_parts)-level+1
                     assert n>=0
                     ctxt_name_prefix_parts = ctxt_name_prefix_parts[:n]
-                if ctxt_w_path is None: # plain module
+                if ctxt_name_prefix_parts and ctxt_w_path is None: # plain module
                     ctxt_name_prefix_parts.pop()
                 if ctxt_name_prefix_parts:
                     rel_modulename = '.'.join(ctxt_name_prefix_parts)

Modified: pypy/branch/fast-forward/pypy/module/imp/test/test_import.py
==============================================================================
--- pypy/branch/fast-forward/pypy/module/imp/test/test_import.py	(original)
+++ pypy/branch/fast-forward/pypy/module/imp/test/test_import.py	Fri Oct  1 22:53:02 2010
@@ -360,6 +360,12 @@
         """.rstrip()
         raises(ValueError, imp)
 
+    def test_future_relative_import_error_when_in_non_package2(self):
+        exec """def imp():
+                    from .. import inpackage
+        """.rstrip()
+        raises(ValueError, imp)
+
     def test_relative_import_with___name__(self):
         import sys
         mydict = {'__name__': 'sys.foo'}

Modified: pypy/branch/fast-forward/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py
==============================================================================
--- pypy/branch/fast-forward/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py	(original)
+++ pypy/branch/fast-forward/pypy/module/test_lib_pypy/ctypes_tests/test_functions.py	Fri Oct  1 22:53:02 2010
@@ -29,6 +29,7 @@
 class RECT(Structure):
     _fields_ = [("left", c_int), ("top", c_int),
                 ("right", c_int), ("bottom", c_int)]
+
 class TestFunctions(BaseCTypesTestChecker):
 
     def test_mro(self):
@@ -392,6 +393,18 @@
         result = f("abcd", ord("b"))
         assert result == "bcd"
 
+    def test_caching_bug_1(self):
+        # the same test as test_call_some_args, with two extra lines
+        # in the middle that trigger caching in f._ptr, which then
+        # makes the last two lines fail
+        f = dll.my_strchr
+        f.argtypes = [c_char_p, c_int]
+        f.restype = c_char_p
+        result = f("abcd", ord("b"))
+        assert result == "bcd"
+        result = f("abcd", ord("b"), 42)
+        assert result == "bcd"
+
     def test_sf1651235(self):
         py.test.skip("we are less strict in checking callback parameters")
         # see http://www.python.org/sf/1651235

Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/llarena.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/lltypesystem/llarena.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/llarena.py	Fri Oct  1 22:53:02 2010
@@ -124,6 +124,9 @@
             assert self.usagemap[i] == 'x'
             self.usagemap[i] = '#'
 
+    def mark_freed(self):
+        self.freed = True    # this method is a hook for tests
+
 class fakearenaaddress(llmemory.fakeaddress):
 
     def __init__(self, arena, offset):
@@ -314,7 +317,7 @@
     assert arena_addr.offset == 0
     arena_addr.arena.reset(False)
     assert not arena_addr.arena.objectptrs
-    arena_addr.arena.freed = True
+    arena_addr.arena.mark_freed()
 
 def arena_reset(arena_addr, size, zero):
     """Free all objects in the arena, which can then be reused.

Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/base.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/memory/gc/base.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/memory/gc/base.py	Fri Oct  1 22:53:02 2010
@@ -20,12 +20,15 @@
     prebuilt_gc_objects_are_static_roots = True
     object_minimal_size = 0
 
-    def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE):
+    def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE,
+                 translated_to_c=True):
         self.gcheaderbuilder = GCHeaderBuilder(self.HDR)
         self.AddressStack = get_address_stack(chunk_size)
         self.AddressDeque = get_address_deque(chunk_size)
         self.AddressDict = AddressDict
         self.config = config
+        assert isinstance(translated_to_c, bool)
+        self.translated_to_c = translated_to_c
 
     def setup(self):
         # all runtime mutable values' setup should happen here
@@ -79,7 +82,7 @@
     def set_root_walker(self, root_walker):
         self.root_walker = root_walker
 
-    def write_barrier(self, addr_struct):
+    def write_barrier(self, newvalue, addr_struct):
         pass
 
     def statistics(self, index):

Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/generation.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/memory/gc/generation.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/memory/gc/generation.py	Fri Oct  1 22:53:02 2010
@@ -5,7 +5,6 @@
 from pypy.rpython.memory.gc.base import read_from_env
 from pypy.rpython.lltypesystem.llmemory import NULL, raw_malloc_usage
 from pypy.rpython.lltypesystem import lltype, llmemory, llarena
-from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE
 from pypy.rlib.objectmodel import free_non_gc_object
 from pypy.rlib.debug import ll_assert
 from pypy.rlib.debug import debug_print, debug_start, debug_stop
@@ -49,15 +48,17 @@
 
     nursery_hash_base = -1
 
-    def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE,
+    def __init__(self, config,
                  nursery_size=32*WORD,
                  min_nursery_size=32*WORD,
                  auto_nursery_size=False,
                  space_size=1024*WORD,
-                 max_space_size=sys.maxint//2+1):
-        SemiSpaceGC.__init__(self, config, chunk_size = chunk_size,
+                 max_space_size=sys.maxint//2+1,
+                 **kwds):
+        SemiSpaceGC.__init__(self, config,
                              space_size = space_size,
-                             max_space_size = max_space_size)
+                             max_space_size = max_space_size,
+                             **kwds)
         assert min_nursery_size <= nursery_size <= space_size // 2
         self.initial_nursery_size = nursery_size
         self.auto_nursery_size = auto_nursery_size
@@ -157,6 +158,14 @@
                   "odd-valued (i.e. tagged) pointer unexpected here")
         return self.nursery <= addr < self.nursery_top
 
+    def appears_to_be_in_nursery(self, addr):
+        # same as is_in_nursery(), but may return True accidentally if
+        # 'addr' is a tagged pointer with just the wrong value.
+        if not self.translated_to_c:
+            if not self.is_valid_gc_object(addr):
+                return False
+        return self.nursery <= addr < self.nursery_top
+
     def malloc_fixedsize_clear(self, typeid, size, can_collect,
                                has_finalizer=False, contains_weakptr=False):
         if (has_finalizer or not can_collect or
@@ -326,7 +335,7 @@
         addr = pointer.address[0]
         newaddr = self.copy(addr)
         pointer.address[0] = newaddr
-        self.write_into_last_generation_obj(obj)
+        self.write_into_last_generation_obj(obj, newaddr)
 
     # ____________________________________________________________
     # Implementation of nursery-only collections
@@ -457,9 +466,9 @@
     #  "if addr_struct.int0 & JIT_WB_IF_FLAG: remember_young_pointer()")
     JIT_WB_IF_FLAG = GCFLAG_NO_YOUNG_PTRS
 
-    def write_barrier(self, addr_struct):
-        if self.header(addr_struct).tid & GCFLAG_NO_YOUNG_PTRS:
-            self.remember_young_pointer(addr_struct)
+    def write_barrier(self, newvalue, addr_struct):
+         if self.header(addr_struct).tid & GCFLAG_NO_YOUNG_PTRS:
+            self.remember_young_pointer(addr_struct, newvalue)
 
     def _setup_wb(self):
         DEBUG = self.DEBUG
@@ -470,23 +479,33 @@
         # For x86, there is also an extra requirement: when the JIT calls
         # remember_young_pointer(), it assumes that it will not touch the SSE
         # registers, so it does not save and restore them (that's a *hack*!).
-        def remember_young_pointer(addr_struct):
+        def remember_young_pointer(addr_struct, addr):
             #llop.debug_print(lltype.Void, "\tremember_young_pointer",
             #                 addr_struct, "<-", addr)
             if DEBUG:
                 ll_assert(not self.is_in_nursery(addr_struct),
                           "nursery object with GCFLAG_NO_YOUNG_PTRS")
-            self.old_objects_pointing_to_young.append(addr_struct)
-            self.header(addr_struct).tid &= ~GCFLAG_NO_YOUNG_PTRS
-            self.write_into_last_generation_obj(addr_struct)
+            #
+            # What is important in this function is that it *must*
+            # clear the flag GCFLAG_NO_YOUNG_PTRS from 'addr_struct'
+            # if 'addr' is in the nursery.  It is ok if, accidentally,
+            # it also clears the flag in some more rare cases, like
+            # 'addr' being a tagged pointer whose value happens to be
+            # a large integer that fools is_in_nursery().
+            if self.appears_to_be_in_nursery(addr):
+                self.old_objects_pointing_to_young.append(addr_struct)
+                self.header(addr_struct).tid &= ~GCFLAG_NO_YOUNG_PTRS
+            self.write_into_last_generation_obj(addr_struct, addr)
         remember_young_pointer._dont_inline_ = True
         self.remember_young_pointer = remember_young_pointer
 
-    def write_into_last_generation_obj(self, addr_struct):
+    def write_into_last_generation_obj(self, addr_struct, addr):
         objhdr = self.header(addr_struct)
         if objhdr.tid & GCFLAG_NO_HEAP_PTRS:
-            objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS
-            self.last_generation_root_objects.append(addr_struct)
+            if (self.is_valid_gc_object(addr) and
+                    not self.is_last_generation(addr)):
+                objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS
+                self.last_generation_root_objects.append(addr_struct)
     write_into_last_generation_obj._always_inline_ = True
 
     def assume_young_pointers(self, addr_struct):

Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/markcompact.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/memory/gc/markcompact.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/memory/gc/markcompact.py	Fri Oct  1 22:53:02 2010
@@ -2,7 +2,6 @@
 from pypy.rpython.memory.gc.base import MovingGCBase, read_from_env
 from pypy.rlib.debug import ll_assert, have_debug_prints
 from pypy.rlib.debug import debug_print, debug_start, debug_stop
-from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE
 from pypy.rpython.memory.support import get_address_stack, get_address_deque
 from pypy.rpython.memory.support import AddressDict
 from pypy.rpython.lltypesystem.llmemory import NULL, raw_malloc_usage
@@ -86,9 +85,9 @@
     free = NULL
     next_collect_after = -1
 
-    def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE, space_size=4096,
-                 min_next_collect_after=128):
-        MovingGCBase.__init__(self, config, chunk_size)
+    def __init__(self, config, space_size=4096,
+                 min_next_collect_after=128, **kwds):
+        MovingGCBase.__init__(self, config, **kwds)
         self.space_size = space_size
         self.min_next_collect_after = min_next_collect_after
 

Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/marksweep.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/memory/gc/marksweep.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/memory/gc/marksweep.py	Fri Oct  1 22:53:02 2010
@@ -1,7 +1,6 @@
 from pypy.rpython.lltypesystem.llmemory import raw_malloc, raw_free
 from pypy.rpython.lltypesystem.llmemory import raw_memcopy, raw_memclear
 from pypy.rpython.lltypesystem.llmemory import NULL, raw_malloc_usage
-from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE
 from pypy.rpython.memory.support import get_address_stack
 from pypy.rpython.memory.gcheader import GCHeaderBuilder
 from pypy.rpython.lltypesystem import lltype, llmemory, rffi, llgroup
@@ -48,9 +47,9 @@
     # translating to a real backend.
     TRANSLATION_PARAMS = {'start_heap_size': 8*1024*1024} # XXX adjust
 
-    def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE, start_heap_size=4096):
+    def __init__(self, config, start_heap_size=4096, **kwds):
         self.param_start_heap_size = start_heap_size
-        GCBase.__init__(self, config, chunk_size)
+        GCBase.__init__(self, config, **kwds)
 
     def setup(self):
         GCBase.setup(self)
@@ -714,8 +713,8 @@
     _alloc_flavor_ = "raw"
     COLLECT_EVERY = 2000
 
-    def __init__(self, chunk_size=DEFAULT_CHUNK_SIZE, start_heap_size=4096):
-        MarkSweepGC.__init__(self, chunk_size, start_heap_size)
+    def __init__(self, config, **kwds):
+        MarkSweepGC.__init__(self, config, **kwds)
         self.count_mallocs = 0
 
     def maybe_collect(self):

Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/minimark.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/memory/gc/minimark.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/memory/gc/minimark.py	Fri Oct  1 22:53:02 2010
@@ -4,7 +4,6 @@
 from pypy.rpython.lltypesystem.llmemory import raw_malloc_usage
 from pypy.rpython.memory.gc.base import GCBase, MovingGCBase
 from pypy.rpython.memory.gc import minimarkpage, base, generation
-from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE
 from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask, r_uint
 from pypy.rlib.rarithmetic import LONG_BIT_SHIFT
 from pypy.rlib.debug import ll_assert, debug_print, debug_start, debug_stop
@@ -140,7 +139,7 @@
         "large_object_gcptrs": 8250*WORD,
         }
 
-    def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE,
+    def __init__(self, config,
                  read_from_env=False,
                  nursery_size=32*WORD,
                  page_size=16*WORD,
@@ -150,8 +149,9 @@
                  card_page_indices=0,
                  large_object=8*WORD,
                  large_object_gcptrs=10*WORD,
-                 ArenaCollectionClass=None):
-        MovingGCBase.__init__(self, config, chunk_size)
+                 ArenaCollectionClass=None,
+                 **kwds):
+        MovingGCBase.__init__(self, config, **kwds)
         assert small_request_threshold % WORD == 0
         self.read_from_env = read_from_env
         self.nursery_size = nursery_size
@@ -636,6 +636,14 @@
                   "odd-valued (i.e. tagged) pointer unexpected here")
         return self.nursery <= addr < self.nursery_top
 
+    def appears_to_be_in_nursery(self, addr):
+        # same as is_in_nursery(), but may return True accidentally if
+        # 'addr' is a tagged pointer with just the wrong value.
+        if not self.translated_to_c:
+            if not self.is_valid_gc_object(addr):
+                return False
+        return self.nursery <= addr < self.nursery_top
+
     def is_forwarded(self, obj):
         """Returns True if the nursery obj is marked as forwarded.
         Implemented a bit obscurely by checking an unrelated flag
@@ -726,16 +734,16 @@
     def JIT_max_size_of_young_obj(cls):
         return cls.TRANSLATION_PARAMS['large_object']
 
-    def write_barrier(self, addr_struct):
+    def write_barrier(self, newvalue, addr_struct):
         if self.header(addr_struct).tid & GCFLAG_NO_YOUNG_PTRS:
-            self.remember_young_pointer(addr_struct)
+            self.remember_young_pointer(addr_struct, newvalue)
 
-    def write_barrier_from_array(self, addr_array, index):
+    def write_barrier_from_array(self, newvalue, addr_array, index):
         if self.header(addr_array).tid & GCFLAG_NO_YOUNG_PTRS:
             if self.card_page_indices > 0:     # <- constant-folded
                 self.remember_young_pointer_from_array(addr_array, index)
             else:
-                self.remember_young_pointer(addr_array)
+                self.remember_young_pointer(addr_array, newvalue)
 
     def _init_writebarrier_logic(self):
         DEBUG = self.DEBUG
@@ -746,27 +754,28 @@
         # For x86, there is also an extra requirement: when the JIT calls
         # remember_young_pointer(), it assumes that it will not touch the SSE
         # registers, so it does not save and restore them (that's a *hack*!).
-        def remember_young_pointer(addr_struct):
+        def remember_young_pointer(addr_struct, newvalue):
             # 'addr_struct' is the address of the object in which we write.
+            # 'newvalue' is the address that we are going to write in there.
             if DEBUG:
                 ll_assert(not self.is_in_nursery(addr_struct),
                           "nursery object with GCFLAG_NO_YOUNG_PTRS")
             #
-            # We assume that what we are writing is a pointer to the nursery
-            # (and don't care for the fact that this new pointer may not
-            # actually point to the nursery, which seems ok).  What we need is
+            # If it seems that what we are writing is a pointer to the nursery
+            # (as checked with appears_to_be_in_nursery()), then we need
             # to remove the flag GCFLAG_NO_YOUNG_PTRS and add the old object
             # to the list 'old_objects_pointing_to_young'.  We know that
             # 'addr_struct' cannot be in the nursery, because nursery objects
             # never have the flag GCFLAG_NO_YOUNG_PTRS to start with.
-            self.old_objects_pointing_to_young.append(addr_struct)
             objhdr = self.header(addr_struct)
-            objhdr.tid &= ~GCFLAG_NO_YOUNG_PTRS
+            if self.appears_to_be_in_nursery(newvalue):
+                self.old_objects_pointing_to_young.append(addr_struct)
+                objhdr.tid &= ~GCFLAG_NO_YOUNG_PTRS
             #
             # Second part: if 'addr_struct' is actually a prebuilt GC
             # object and it's the first time we see a write to it, we
             # add it to the list 'prebuilt_root_objects'.  Note that we
-            # do it even in the (rare?) case of 'addr' being another
+            # do it even in the (rare?) case of 'addr' being NULL or another
             # prebuilt object, to simplify code.
             if objhdr.tid & GCFLAG_NO_HEAP_PTRS:
                 objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS
@@ -780,16 +789,24 @@
 
 
     def _init_writebarrier_with_card_marker(self):
+        DEBUG = self.DEBUG
         def remember_young_pointer_from_array(addr_array, index):
             # 'addr_array' is the address of the object in which we write,
             # which must have an array part;  'index' is the index of the
             # item that is (or contains) the pointer that we write.
+            if DEBUG:
+                ll_assert(not self.is_in_nursery(addr_array),
+                          "nursery array with GCFLAG_NO_YOUNG_PTRS")
             objhdr = self.header(addr_array)
             if objhdr.tid & GCFLAG_HAS_CARDS == 0:
                 #
-                # no cards, use default logic.  The 'nocard_logic()' is just
-                # 'remember_young_pointer()', but forced to be inlined here.
-                nocard_logic(addr_array)
+                # no cards, use default logic.  Mostly copied from above.
+                self.old_objects_pointing_to_young.append(addr_array)
+                objhdr = self.header(addr_array)
+                objhdr.tid &= ~GCFLAG_NO_YOUNG_PTRS
+                if objhdr.tid & GCFLAG_NO_HEAP_PTRS:
+                    objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS
+                    self.prebuilt_root_objects.append(addr_array)
                 return
             #
             # 'addr_array' is a raw_malloc'ed array with card markers
@@ -807,17 +824,15 @@
                 return
             #
             # We set the flag (even if the newly written address does not
-            # actually point to the nursery -- like remember_young_pointer()).
+            # actually point to the nursery, which seems to be ok -- actually
+            # it seems more important that remember_young_pointer_from_array()
+            # does not take 3 arguments).
             addr_byte.char[0] = chr(byte | bitmask)
             #
             if objhdr.tid & GCFLAG_CARDS_SET == 0:
                 self.old_objects_with_cards_set.append(addr_array)
                 objhdr.tid |= GCFLAG_CARDS_SET
 
-        nocard_logic = func_with_new_name(self.remember_young_pointer,
-                                          'remember_young_pointer_nocard')
-        del nocard_logic._dont_inline_
-        nocard_logic._always_inline_ = True
         remember_young_pointer_from_array._dont_inline_ = True
         self.remember_young_pointer_from_array = (
             remember_young_pointer_from_array)

Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/minimarkpage.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/memory/gc/minimarkpage.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/memory/gc/minimarkpage.py	Fri Oct  1 22:53:02 2010
@@ -4,15 +4,45 @@
 from pypy.rlib.debug import ll_assert
 
 WORD = LONG_BIT // 8
-WORD_POWER_2 = {32: 2, 64: 3}[LONG_BIT]
 NULL = llmemory.NULL
+WORD_POWER_2 = {32: 2, 64: 3}[LONG_BIT]
+assert 1 << WORD_POWER_2 == WORD
 
 
-# Terminology: the memory is subdivided into "pages".
+# Terminology: the memory is subdivided into "arenas" containing "pages".
 # A page contains a number of allocated objects, called "blocks".
 
-# The actual allocation occurs in whole arenas, which are subdivided
-# into pages.  We don't keep track of the arenas.  A page can be:
+# The actual allocation occurs in whole arenas, which are then subdivided
+# into pages.  For each arena we allocate one of the following structures:
+
+ARENA_PTR = lltype.Ptr(lltype.ForwardReference())
+ARENA = lltype.Struct('ArenaReference',
+    # -- The address of the arena, as returned by malloc()
+    ('base', llmemory.Address),
+    # -- The number of free and the total number of pages in the arena
+    ('nfreepages', lltype.Signed),
+    ('totalpages', lltype.Signed),
+    # -- A chained list of free pages in the arena.  Ends with NULL.
+    ('freepages', llmemory.Address),
+    # -- A linked list of arenas.  See below.
+    ('nextarena', ARENA_PTR),
+    )
+ARENA_PTR.TO.become(ARENA)
+ARENA_NULL = lltype.nullptr(ARENA)
+
+# The idea is that when we need a free page, we take it from the arena
+# which currently has the *lowest* number of free pages.  This allows
+# arenas with a lot of free pages to eventually become entirely free, at
+# which point they are returned to the OS.  If an arena has a total of
+# 64 pages, then we have 64 global lists, arenas_lists[0] to
+# arenas_lists[63], such that arenas_lists[i] contains exactly those
+# arenas that have 'nfreepages == i'.  We allocate pages out of the
+# arena in 'current_arena'; when it is exhausted we pick another arena
+# with the smallest value for nfreepages (but > 0).
+
+# ____________________________________________________________
+#
+# Each page in an arena can be:
 #
 # - uninitialized: never touched so far.
 #
@@ -21,10 +51,11 @@
 #   room for objects of that size, unless it is completely full.
 #
 # - free: used to be partially full, and is now free again.  The page is
-#   on the chained list of free pages.
+#   on the chained list of free pages 'freepages' from its arena.
 
-# Similarily, each allocated page contains blocks of a given size, which can
-# be either uninitialized, allocated or free.
+# Each allocated page contains blocks of a given size, which can again be in
+# one of three states: allocated, free, or uninitialized.  The uninitialized
+# blocks (initially all of them) are at the tail of the page.
 
 PAGE_PTR = lltype.Ptr(lltype.ForwardReference())
 PAGE_HEADER = lltype.Struct('PageHeader',
@@ -32,13 +63,16 @@
     #    pages, it is a chained list of pages having the same size class,
     #    rooted in 'page_for_size[size_class]'.  For full pages, it is a
     #    different chained list rooted in 'full_page_for_size[size_class]'.
+    #    For free pages, it is the list 'freepages' in the arena header.
     ('nextpage', PAGE_PTR),
-    # -- The number of free blocks, and the number of uninitialized blocks.
-    #    The number of allocated blocks is the rest.
-    ('nuninitialized', lltype.Signed),
+    # -- The arena this page is part of.
+    ('arena', ARENA_PTR),
+    # -- The number of free blocks.  The numbers of uninitialized and
+    #    allocated blocks can be deduced from the context if needed.
     ('nfree', lltype.Signed),
-    # -- The chained list of free blocks.  If there are none, points to the
-    #    first uninitialized block.
+    # -- The chained list of free blocks.  It ends as a pointer to the
+    #    first uninitialized block (pointing to data that is uninitialized,
+    #    or to the end of the page).
     ('freeblock', llmemory.Address),
     # -- The structure above is 4 words, which is a good value:
     #    '(1024-4) % N' is zero or very small for various small N's,
@@ -72,13 +106,35 @@
         self.nblocks_for_size = lltype.malloc(rffi.CArray(lltype.Signed),
                                               length, flavor='raw')
         self.hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER))
+        assert page_size > self.hdrsize
         self.nblocks_for_size[0] = 0    # unused
         for i in range(1, length):
             self.nblocks_for_size[i] = (page_size - self.hdrsize) // (WORD * i)
         #
-        self.uninitialized_pages = NULL
+        self.max_pages_per_arena = arena_size // page_size
+        self.arenas_lists = lltype.malloc(rffi.CArray(ARENA_PTR),
+                                          self.max_pages_per_arena,
+                                          flavor='raw', zero=True)
+        # this is used in mass_free() only
+        self.old_arenas_lists = lltype.malloc(rffi.CArray(ARENA_PTR),
+                                              self.max_pages_per_arena,
+                                              flavor='raw', zero=True)
+        #
+        # the arena currently consumed; it must have at least one page
+        # available, or be NULL.  The arena object that we point to is
+        # not in any 'arenas_lists'.  We will consume all its pages before
+        # we choose a next arena, even if there is a major collection
+        # in-between.
+        self.current_arena = ARENA_NULL
+        #
+        # guarantee that 'arenas_lists[1:min_empty_nfreepages]' are all empty
+        self.min_empty_nfreepages = self.max_pages_per_arena
+        #
+        # part of current_arena might still contain uninitialized pages
         self.num_uninitialized_pages = 0
-        self.free_pages = NULL
+        #
+        # the total memory used, counting every block in use, without
+        # the additional bookkeeping stuff.
         self.total_memory_used = r_uint(0)
 
 
@@ -109,16 +165,12 @@
             #
         else:
             # The 'result' is part of the uninitialized blocks.
-            ll_assert(page.nuninitialized > 0,
-                      "fully allocated page found in the page_for_size list")
-            page.nuninitialized -= 1
-            if page.nuninitialized > 0:
-                freeblock = result + nsize
-            else:
-                freeblock = NULL
+            freeblock = result + nsize
         #
         page.freeblock = freeblock
-        if freeblock == NULL:
+        #
+        pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
+        if freeblock - pageaddr > self.page_size - nsize:
             # This was the last free block, so unlink the page from the
             # chained list and put it in the 'full_page_for_size' list.
             self.page_for_size[size_class] = page.nextpage
@@ -132,37 +184,88 @@
     def allocate_new_page(self, size_class):
         """Allocate and return a new page for the given size_class."""
         #
-        if self.free_pages != NULL:
+        # Allocate a new arena if needed.
+        if self.current_arena == ARENA_NULL:
+            self.allocate_new_arena()
+        #
+        # The result is simply 'current_arena.freepages'.
+        arena = self.current_arena
+        result = arena.freepages
+        if arena.nfreepages > 0:
+            #
+            # The 'result' was part of the chained list; read the next.
+            arena.nfreepages -= 1
+            freepages = result.address[0]
+            llarena.arena_reset(result,
+                                llmemory.sizeof(llmemory.Address),
+                                0)
             #
-            # Get the page from the chained list 'free_pages'.
-            page = self.free_pages
-            self.free_pages = page.address[0]
-            llarena.arena_reset(page, llmemory.sizeof(llmemory.Address), 0)
         else:
-            # Get the next free page from the uninitialized pages.
-            if self.num_uninitialized_pages == 0:
-                self.allocate_new_arena()   # Out of memory.  Get a new arena.
-            page = self.uninitialized_pages
-            self.uninitialized_pages += self.page_size
+            # The 'result' is part of the uninitialized pages.
+            ll_assert(self.num_uninitialized_pages > 0,
+                      "fully allocated arena found in self.current_arena")
             self.num_uninitialized_pages -= 1
+            if self.num_uninitialized_pages > 0:
+                freepages = result + self.page_size
+            else:
+                freepages = NULL
         #
-        # Initialize the fields of the resulting page
-        llarena.arena_reserve(page, llmemory.sizeof(PAGE_HEADER))
-        result = llmemory.cast_adr_to_ptr(page, PAGE_PTR)
+        arena.freepages = freepages
+        if freepages == NULL:
+            # This was the last page, so put the arena away into
+            # arenas_lists[0].
+            ll_assert(arena.nfreepages == 0, 
+                      "freepages == NULL but nfreepages > 0")
+            arena.nextarena = self.arenas_lists[0]
+            self.arenas_lists[0] = arena
+            self.current_arena = ARENA_NULL
         #
-        result.nuninitialized = self.nblocks_for_size[size_class]
-        result.nfree = 0
-        result.freeblock = page + self.hdrsize
-        result.nextpage = PAGE_NULL
+        # Initialize the fields of the resulting page
+        llarena.arena_reserve(result, llmemory.sizeof(PAGE_HEADER))
+        page = llmemory.cast_adr_to_ptr(result, PAGE_PTR)
+        page.arena = arena
+        page.nfree = 0
+        page.freeblock = result + self.hdrsize
+        page.nextpage = PAGE_NULL
         ll_assert(self.page_for_size[size_class] == PAGE_NULL,
                   "allocate_new_page() called but a page is already waiting")
-        self.page_for_size[size_class] = result
-        return result
+        self.page_for_size[size_class] = page
+        return page
+
+
+    def _all_arenas(self):
+        """For testing.  Enumerates all arenas."""
+        if self.current_arena:
+            yield self.current_arena
+        for arena in self.arenas_lists:
+            while arena:
+                yield arena
+                arena = arena.nextarena
 
 
     def allocate_new_arena(self):
-        ll_assert(self.num_uninitialized_pages == 0,
-                  "some uninitialized pages are already waiting")
+        """Loads in self.current_arena the arena to allocate from next."""
+        #
+        # Pick an arena from 'arenas_lists[i]', with i as small as possible
+        # but > 0.  Use caching with 'min_empty_nfreepages', which guarantees
+        # that 'arenas_lists[1:min_empty_nfreepages]' are all empty.
+        i = self.min_empty_nfreepages
+        while i < self.max_pages_per_arena:
+            #
+            if self.arenas_lists[i] != ARENA_NULL:
+                #
+                # Found it.
+                self.current_arena = self.arenas_lists[i]
+                self.arenas_lists[i] = self.current_arena.nextarena
+                return
+            #
+            i += 1
+            self.min_empty_nfreepages = i
+        #
+        # No more arena with any free page.  We must allocate a new arena.
+        if not we_are_translated():
+            for a in self._all_arenas():
+                assert a.nfreepages == 0
         #
         # 'arena_base' points to the start of malloced memory; it might not
         # be a page-aligned address
@@ -177,13 +280,15 @@
         # 'npages' is the number of full pages just allocated
         npages = (arena_end - firstpage) // self.page_size
         #
-        # add these pages to the list
-        self.uninitialized_pages = firstpage
+        # Allocate an ARENA object and initialize it
+        arena = lltype.malloc(ARENA, flavor='raw')
+        arena.base = arena_base
+        arena.nfreepages = 0        # they are all uninitialized pages
+        arena.totalpages = npages
+        arena.freepages = firstpage
         self.num_uninitialized_pages = npages
+        self.current_arena = arena
         #
-        # increase a bit arena_size for the next time
-        self.arena_size = (self.arena_size // 4 * 5) + (self.page_size - 1)
-        self.arena_size = (self.arena_size // self.page_size) * self.page_size
     allocate_new_arena._dont_inline_ = True
 
 
@@ -199,16 +304,51 @@
             #
             # Walk the pages in 'page_for_size[size_class]' and
             # 'full_page_for_size[size_class]' and free some objects.
-            # Pages completely freed are added to 'self.free_pages', and
-            # become available for reuse by any size class.  Pages not
-            # completely freed are re-chained either in
+            # Pages completely freed are added to 'page.arena.freepages',
+            # and become available for reuse by any size class.  Pages
+            # not completely freed are re-chained either in
             # 'full_page_for_size[]' or 'page_for_size[]'.
-            self.mass_free_in_page(size_class, ok_to_free_func)
+            self.mass_free_in_pages(size_class, ok_to_free_func)
             #
             size_class -= 1
+        #
+        # Rehash arenas into the correct arenas_lists[i].  If
+        # 'self.current_arena' contains an arena too, it remains there.
+        (self.old_arenas_lists, self.arenas_lists) = (
+            self.arenas_lists, self.old_arenas_lists)
+        #
+        i = 0
+        while i < self.max_pages_per_arena:
+            self.arenas_lists[i] = ARENA_NULL
+            i += 1
+        #
+        i = 0
+        while i < self.max_pages_per_arena:
+            arena = self.old_arenas_lists[i]
+            while arena != ARENA_NULL:
+                nextarena = arena.nextarena
+                #
+                if arena.nfreepages == arena.totalpages:
+                    #
+                    # The whole arena is empty.  Free it.
+                    llarena.arena_free(arena.base)
+                    lltype.free(arena, flavor='raw')
+                    #
+                else:
+                    # Insert 'arena' in the correct arenas_lists[n]
+                    n = arena.nfreepages
+                    ll_assert(n < self.max_pages_per_arena,
+                             "totalpages != nfreepages >= max_pages_per_arena")
+                    arena.nextarena = self.arenas_lists[n]
+                    self.arenas_lists[n] = arena
+                #
+                arena = nextarena
+            i += 1
+        #
+        self.min_empty_nfreepages = 1
 
 
-    def mass_free_in_page(self, size_class, ok_to_free_func):
+    def mass_free_in_pages(self, size_class, ok_to_free_func):
         nblocks = self.nblocks_for_size[size_class]
         block_size = size_class * WORD
         remaining_partial_pages = PAGE_NULL
@@ -224,8 +364,7 @@
             while page != PAGE_NULL:
                 #
                 # Collect the page.
-                surviving = self.walk_page(page, block_size,
-                                           nblocks, ok_to_free_func)
+                surviving = self.walk_page(page, block_size, ok_to_free_func)
                 nextpage = page.nextpage
                 #
                 if surviving == nblocks:
@@ -259,19 +398,23 @@
     def free_page(self, page):
         """Free a whole page."""
         #
-        # Done by inserting it in the 'free_pages' list.
+        # Insert the freed page in the arena's 'freepages' list.
+        # If nfreepages == totalpages, then it will be freed at the
+        # end of mass_free().
+        arena = page.arena
+        arena.nfreepages += 1
         pageaddr = llmemory.cast_ptr_to_adr(page)
         pageaddr = llarena.getfakearenaaddress(pageaddr)
         llarena.arena_reset(pageaddr, self.page_size, 0)
         llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address))
-        pageaddr.address[0] = self.free_pages
-        self.free_pages = pageaddr
+        pageaddr.address[0] = arena.freepages
+        arena.freepages = pageaddr
 
 
-    def walk_page(self, page, block_size, nblocks, ok_to_free_func):
+    def walk_page(self, page, block_size, ok_to_free_func):
         """Walk over all objects in a page, and ask ok_to_free_func()."""
         #
-        # 'freeblock' is the next free block, or NULL if there isn't any more.
+        # 'freeblock' is the next free block
         freeblock = page.freeblock
         #
         # 'prevfreeblockat' is the address of where 'freeblock' was read from.
@@ -281,22 +424,28 @@
         obj = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
         obj += self.hdrsize
         surviving = 0    # initially
+        skip_free_blocks = page.nfree
         #
-        nblocks -= page.nuninitialized
-        index = nblocks
-        while index > 0:
+        while True:
             #
             if obj == freeblock:
                 #
+                if skip_free_blocks == 0:
+                    #
+                    # 'obj' points to the first uninitialized block,
+                    # or to the end of the page if there are none.
+                    break
+                #
                 # 'obj' points to a free block.  It means that
                 # 'prevfreeblockat.address[0]' does not need to be updated.
                 # Just read the next free block from 'obj.address[0]'.
+                skip_free_blocks -= 1
                 prevfreeblockat = obj
                 freeblock = obj.address[0]
                 #
             else:
                 # 'obj' points to a valid object.
-                ll_assert(not freeblock or freeblock > obj,
+                ll_assert(freeblock > obj,
                           "freeblocks are linked out of order")
                 #
                 if ok_to_free_func(obj):
@@ -310,15 +459,14 @@
                     prevfreeblockat = obj
                     obj.address[0] = freeblock
                     #
+                    # Update the number of free objects in the page.
+                    page.nfree += 1
+                    #
                 else:
                     # The object survives.
                     surviving += 1
             #
             obj += block_size
-            index -= 1
-        #
-        # Update the number of free objects in the page.
-        page.nfree = nblocks - surviving
         #
         # Update the global total size of objects.
         self.total_memory_used += surviving * block_size
@@ -327,6 +475,20 @@
         return surviving
 
 
+    def _nuninitialized(self, page, size_class):
+        # Helper for debugging: count the number of uninitialized blocks
+        freeblock = page.freeblock
+        for i in range(page.nfree):
+            freeblock = freeblock.address[0]
+        assert freeblock != NULL
+        pageaddr = llarena.getfakearenaaddress(llmemory.cast_ptr_to_adr(page))
+        num_initialized_blocks, rem = divmod(
+            freeblock - pageaddr - self.hdrsize, size_class * WORD)
+        assert rem == 0, "page size_class misspecified?"
+        nblocks = self.nblocks_for_size[size_class]
+        return nblocks - num_initialized_blocks
+
+
 # ____________________________________________________________
 # Helpers to go from a pointer to the start of its page
 

Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/semispace.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/memory/gc/semispace.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/memory/gc/semispace.py	Fri Oct  1 22:53:02 2010
@@ -1,7 +1,6 @@
 from pypy.rpython.lltypesystem.llmemory import raw_malloc, raw_free
 from pypy.rpython.lltypesystem.llmemory import raw_memcopy, raw_memclear
 from pypy.rpython.lltypesystem.llmemory import NULL, raw_malloc_usage
-from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE
 from pypy.rpython.memory.support import get_address_stack, get_address_deque
 from pypy.rpython.memory.support import AddressDict
 from pypy.rpython.lltypesystem import lltype, llmemory, llarena, rffi, llgroup
@@ -59,11 +58,11 @@
     # translating to a real backend.
     TRANSLATION_PARAMS = {'space_size': 8*1024*1024} # XXX adjust
 
-    def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE, space_size=4096,
-                 max_space_size=sys.maxint//2+1):
+    def __init__(self, config, space_size=4096, max_space_size=sys.maxint//2+1,
+                 **kwds):
         self.param_space_size = space_size
         self.param_max_space_size = max_space_size
-        MovingGCBase.__init__(self, config, chunk_size)
+        MovingGCBase.__init__(self, config, **kwds)
 
     def setup(self):
         #self.total_collection_time = 0.0

Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_direct.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_direct.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_direct.py	Fri Oct  1 22:53:02 2010
@@ -70,6 +70,7 @@
         GC_PARAMS = self.GC_PARAMS.copy()
         if hasattr(meth, 'GC_PARAMS'):
             GC_PARAMS.update(meth.GC_PARAMS)
+        GC_PARAMS['translated_to_c'] = False
         self.gc = self.GCClass(config, **GC_PARAMS)
         self.gc.DEBUG = True
         self.rootwalker = DirectRootWalker(self)
@@ -86,17 +87,19 @@
 
     def write(self, p, fieldname, newvalue):
         if self.gc.needs_write_barrier:
+            newaddr = llmemory.cast_ptr_to_adr(newvalue)
             addr_struct = llmemory.cast_ptr_to_adr(p)
-            self.gc.write_barrier(addr_struct)
+            self.gc.write_barrier(newaddr, addr_struct)
         setattr(p, fieldname, newvalue)
 
     def writearray(self, p, index, newvalue):
         if self.gc.needs_write_barrier:
+            newaddr = llmemory.cast_ptr_to_adr(newvalue)
             addr_struct = llmemory.cast_ptr_to_adr(p)
             if hasattr(self.gc, 'write_barrier_from_array'):
-                self.gc.write_barrier_from_array(addr_struct, index)
+                self.gc.write_barrier_from_array(newaddr, addr_struct, index)
             else:
-                self.gc.write_barrier(addr_struct)
+                self.gc.write_barrier(newaddr, addr_struct)
         p[index] = newvalue
 
     def malloc(self, TYPE, n=None):

Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_minimarkpage.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_minimarkpage.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_minimarkpage.py	Fri Oct  1 22:53:02 2010
@@ -12,17 +12,19 @@
 
 
 def test_allocate_arena():
-    ac = ArenaCollection(SHIFT + 16*20, 16, 1)
+    ac = ArenaCollection(SHIFT + 64*20, 64, 1)
     ac.allocate_new_arena()
     assert ac.num_uninitialized_pages == 20
-    ac.uninitialized_pages + 16*20   # does not raise
-    py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 16*20 + 1")
+    upages = ac.current_arena.freepages
+    upages + 64*20   # does not raise
+    py.test.raises(llarena.ArenaError, "upages + 64*20 + 1")
     #
-    ac = ArenaCollection(SHIFT + 16*20 + 7, 16, 1)
+    ac = ArenaCollection(SHIFT + 64*20 + 7, 64, 1)
     ac.allocate_new_arena()
     assert ac.num_uninitialized_pages == 20
-    ac.uninitialized_pages + 16*20 + 7   # does not raise
-    py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 16*20 + 16")
+    upages = ac.current_arena.freepages
+    upages + 64*20 + 7   # does not raise
+    py.test.raises(llarena.ArenaError, "upages + 64*20 + 64")
 
 
 def test_allocate_new_page():
@@ -31,7 +33,8 @@
     #
     def checknewpage(page, size_class):
         size = WORD * size_class
-        assert page.nuninitialized == (pagesize - hdrsize) // size
+        assert (ac._nuninitialized(page, size_class) ==
+                    (pagesize - hdrsize) // size)
         assert page.nfree == 0
         page1 = page.freeblock - hdrsize
         assert llmemory.cast_ptr_to_adr(page) == page1
@@ -44,13 +47,13 @@
     page = ac.allocate_new_page(5)
     checknewpage(page, 5)
     assert ac.num_uninitialized_pages == 2
-    assert ac.uninitialized_pages - pagesize == cast_ptr_to_adr(page)
+    assert ac.current_arena.freepages - pagesize == cast_ptr_to_adr(page)
     assert ac.page_for_size[5] == page
     #
     page = ac.allocate_new_page(3)
     checknewpage(page, 3)
     assert ac.num_uninitialized_pages == 1
-    assert ac.uninitialized_pages - pagesize == cast_ptr_to_adr(page)
+    assert ac.current_arena.freepages - pagesize == cast_ptr_to_adr(page)
     assert ac.page_for_size[3] == page
     #
     page = ac.allocate_new_page(4)
@@ -71,17 +74,17 @@
         page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR)
         if step == 1:
             page.nfree = 0
-            page.nuninitialized = nblocks - nusedblocks
+            nuninitialized = nblocks - nusedblocks
         else:
             page.nfree = nusedblocks
-            page.nuninitialized = nblocks - 2*nusedblocks
+            nuninitialized = nblocks - 2*nusedblocks
+        page.freeblock = pageaddr + hdrsize + nusedblocks * size_block
         if nusedblocks < nblocks:
-            page.freeblock = pageaddr + hdrsize + nusedblocks * size_block
             chainedlists = ac.page_for_size
         else:
-            page.freeblock = NULL
             chainedlists = ac.full_page_for_size
         page.nextpage = chainedlists[size_class]
+        page.arena = ac.current_arena
         chainedlists[size_class] = page
         if fill_with_objects:
             for i in range(0, nusedblocks*step, step):
@@ -98,11 +101,15 @@
                     prev = 'prevhole.address[0]'
                 endaddr = pageaddr + hdrsize + 2*nusedblocks * size_block
                 exec '%s = endaddr' % prev in globals(), locals()
+        assert ac._nuninitialized(page, size_class) == nuninitialized
     #
     ac.allocate_new_arena()
     num_initialized_pages = len(pagelayout.rstrip(" "))
-    ac._startpageaddr = ac.uninitialized_pages
-    ac.uninitialized_pages += pagesize * num_initialized_pages
+    ac._startpageaddr = ac.current_arena.freepages
+    if pagelayout.endswith(" "):
+        ac.current_arena.freepages += pagesize * num_initialized_pages
+    else:
+        ac.current_arena.freepages = NULL
     ac.num_uninitialized_pages -= num_initialized_pages
     #
     for i in reversed(range(num_initialized_pages)):
@@ -115,8 +122,9 @@
             link(pageaddr, size_class, size_block, nblocks, nblocks-1)
         elif c == '.':    # a free, but initialized, page
             llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address))
-            pageaddr.address[0] = ac.free_pages
-            ac.free_pages = pageaddr
+            pageaddr.address[0] = ac.current_arena.freepages
+            ac.current_arena.freepages = pageaddr
+            ac.current_arena.nfreepages += 1
         elif c == '#':    # a random full page, in the list 'full_pages'
             size_class = fill_with_objects or 1
             size_block = WORD * size_class
@@ -142,26 +150,29 @@
 def checkpage(ac, page, expected_position):
     assert llmemory.cast_ptr_to_adr(page) == pagenum(ac, expected_position)
 
+def freepages(ac):
+    return ac.current_arena.freepages
+
 
 def test_simple_arena_collection():
     pagesize = hdrsize + 16
     ac = arena_collection_for_test(pagesize, "##....#   ")
     #
-    assert ac.free_pages == pagenum(ac, 2)
+    assert freepages(ac) == pagenum(ac, 2)
     page = ac.allocate_new_page(1); checkpage(ac, page, 2)
-    assert ac.free_pages == pagenum(ac, 3)
+    assert freepages(ac) == pagenum(ac, 3)
     page = ac.allocate_new_page(2); checkpage(ac, page, 3)
-    assert ac.free_pages == pagenum(ac, 4)
+    assert freepages(ac) == pagenum(ac, 4)
     page = ac.allocate_new_page(3); checkpage(ac, page, 4)
-    assert ac.free_pages == pagenum(ac, 5)
+    assert freepages(ac) == pagenum(ac, 5)
     page = ac.allocate_new_page(4); checkpage(ac, page, 5)
-    assert ac.free_pages == NULL and ac.num_uninitialized_pages == 3
+    assert freepages(ac) == pagenum(ac, 7) and ac.num_uninitialized_pages == 3
     page = ac.allocate_new_page(5); checkpage(ac, page, 7)
-    assert ac.free_pages == NULL and ac.num_uninitialized_pages == 2
+    assert freepages(ac) == pagenum(ac, 8) and ac.num_uninitialized_pages == 2
     page = ac.allocate_new_page(6); checkpage(ac, page, 8)
-    assert ac.free_pages == NULL and ac.num_uninitialized_pages == 1
+    assert freepages(ac) == pagenum(ac, 9) and ac.num_uninitialized_pages == 1
     page = ac.allocate_new_page(7); checkpage(ac, page, 9)
-    assert ac.free_pages == NULL and ac.num_uninitialized_pages == 0
+    assert not ac.current_arena and ac.num_uninitialized_pages == 0
 
 
 def chkob(ac, num_page, pos_obj, obj):
@@ -205,47 +216,47 @@
     ac = arena_collection_for_test(pagesize, "/.", fill_with_objects=2)
     page = getpage(ac, 0)
     assert page.nfree == 3
-    assert page.nuninitialized == 3
+    assert ac._nuninitialized(page, 2) == 3
     chkob(ac, 0, 2*WORD, page.freeblock)
     #
     obj = ac.malloc(2*WORD); chkob(ac, 0,  2*WORD, obj)
     obj = ac.malloc(2*WORD); chkob(ac, 0,  6*WORD, obj)
     assert page.nfree == 1
-    assert page.nuninitialized == 3
+    assert ac._nuninitialized(page, 2) == 3
     chkob(ac, 0, 10*WORD, page.freeblock)
     #
     obj = ac.malloc(2*WORD); chkob(ac, 0, 10*WORD, obj)
     assert page.nfree == 0
-    assert page.nuninitialized == 3
+    assert ac._nuninitialized(page, 2) == 3
     chkob(ac, 0, 12*WORD, page.freeblock)
     #
     obj = ac.malloc(2*WORD); chkob(ac, 0, 12*WORD, obj)
-    assert page.nuninitialized == 2
+    assert ac._nuninitialized(page, 2) == 2
     obj = ac.malloc(2*WORD); chkob(ac, 0, 14*WORD, obj)
     obj = ac.malloc(2*WORD); chkob(ac, 0, 16*WORD, obj)
     assert page.nfree == 0
-    assert page.nuninitialized == 0
+    assert ac._nuninitialized(page, 2) == 0
     obj = ac.malloc(2*WORD); chkob(ac, 1,  0*WORD, obj)
 
 
 def test_malloc_new_arena():
     pagesize = hdrsize + 7*WORD
     ac = arena_collection_for_test(pagesize, "### ")
+    arena_size = ac.arena_size
     obj = ac.malloc(2*WORD); chkob(ac, 3, 0*WORD, obj)  # 3rd page -> size 2
     #
     del ac.allocate_new_arena    # restore the one from the class
-    arena_size = ac.arena_size
     obj = ac.malloc(3*WORD)                             # need a new arena
     assert ac.num_uninitialized_pages == (arena_size // ac.page_size
-                                          - 1    # for start_of_page()
                                           - 1    # the just-allocated page
                                           )
 
 class OkToFree(object):
-    def __init__(self, ac, answer):
+    def __init__(self, ac, answer, multiarenas=False):
         assert callable(answer) or 0.0 <= answer <= 1.0
         self.ac = ac
         self.answer = answer
+        self.multiarenas = multiarenas
         self.lastnum = 0.0
         self.seen = {}
 
@@ -257,7 +268,10 @@
             ok_to_free = self.lastnum >= 1.0
             if ok_to_free:
                 self.lastnum -= 1.0
-        key = addr - self.ac._startpageaddr
+        if self.multiarenas:
+            key = (addr.arena, addr.offset)
+        else:
+            key = addr - self.ac._startpageaddr
         assert key not in self.seen
         self.seen[key] = ok_to_free
         return ok_to_free
@@ -272,10 +286,10 @@
     page = getpage(ac, 0)
     assert page == ac.page_for_size[2]
     assert page.nextpage == PAGE_NULL
-    assert page.nuninitialized == 1
+    assert ac._nuninitialized(page, 2) == 1
     assert page.nfree == 0
     chkob(ac, 0, 4*WORD, page.freeblock)
-    assert ac.free_pages == NULL
+    assert freepages(ac) == NULL
 
 def test_mass_free_emptied_page():
     pagesize = hdrsize + 7*WORD
@@ -285,7 +299,7 @@
     assert ok_to_free.seen == {hdrsize + 0*WORD: True,
                                hdrsize + 2*WORD: True}
     pageaddr = pagenum(ac, 0)
-    assert pageaddr == ac.free_pages
+    assert pageaddr == freepages(ac)
     assert pageaddr.address[0] == NULL
     assert ac.page_for_size[2] == PAGE_NULL
 
@@ -300,10 +314,9 @@
     page = getpage(ac, 0)
     assert page == ac.full_page_for_size[2]
     assert page.nextpage == PAGE_NULL
-    assert page.nuninitialized == 0
+    assert ac._nuninitialized(page, 2) == 0
     assert page.nfree == 0
-    assert page.freeblock == NULL
-    assert ac.free_pages == NULL
+    assert freepages(ac) == NULL
     assert ac.page_for_size[2] == PAGE_NULL
 
 def test_mass_free_full_is_partially_emptied():
@@ -319,19 +332,19 @@
     pageaddr = pagenum(ac, 0)
     assert page == ac.page_for_size[2]
     assert page.nextpage == PAGE_NULL
-    assert page.nuninitialized == 0
+    assert ac._nuninitialized(page, 2) == 0
     assert page.nfree == 2
     assert page.freeblock == pageaddr + hdrsize + 2*WORD
     assert page.freeblock.address[0] == pageaddr + hdrsize + 6*WORD
-    assert page.freeblock.address[0].address[0] == NULL
-    assert ac.free_pages == NULL
+    assert page.freeblock.address[0].address[0] == pageaddr + hdrsize + 8*WORD
+    assert freepages(ac) == NULL
     assert ac.full_page_for_size[2] == PAGE_NULL
 
 def test_mass_free_half_page_remains():
     pagesize = hdrsize + 24*WORD
     ac = arena_collection_for_test(pagesize, "/", fill_with_objects=2)
     page = getpage(ac, 0)
-    assert page.nuninitialized == 4
+    assert ac._nuninitialized(page, 2) == 4
     assert page.nfree == 4
     #
     ok_to_free = OkToFree(ac, False)
@@ -344,7 +357,7 @@
     pageaddr = pagenum(ac, 0)
     assert page == ac.page_for_size[2]
     assert page.nextpage == PAGE_NULL
-    assert page.nuninitialized == 4
+    assert ac._nuninitialized(page, 2) == 4
     assert page.nfree == 4
     assert page.freeblock == pageaddr + hdrsize + 2*WORD
     assert page.freeblock.address[0] == pageaddr + hdrsize + 6*WORD
@@ -352,14 +365,14 @@
                                         pageaddr + hdrsize + 10*WORD
     assert page.freeblock.address[0].address[0].address[0] == \
                                         pageaddr + hdrsize + 14*WORD
-    assert ac.free_pages == NULL
+    assert freepages(ac) == NULL
     assert ac.full_page_for_size[2] == PAGE_NULL
 
 def test_mass_free_half_page_becomes_more_free():
     pagesize = hdrsize + 24*WORD
     ac = arena_collection_for_test(pagesize, "/", fill_with_objects=2)
     page = getpage(ac, 0)
-    assert page.nuninitialized == 4
+    assert ac._nuninitialized(page, 2) == 4
     assert page.nfree == 4
     #
     ok_to_free = OkToFree(ac, 0.5)
@@ -372,7 +385,7 @@
     pageaddr = pagenum(ac, 0)
     assert page == ac.page_for_size[2]
     assert page.nextpage == PAGE_NULL
-    assert page.nuninitialized == 4
+    assert ac._nuninitialized(page, 2) == 4
     assert page.nfree == 6
     fb = page.freeblock
     assert fb == pageaddr + hdrsize + 2*WORD
@@ -384,7 +397,7 @@
                                        pageaddr + hdrsize + 12*WORD
     assert fb.address[0].address[0].address[0].address[0].address[0] == \
                                        pageaddr + hdrsize + 14*WORD
-    assert ac.free_pages == NULL
+    assert freepages(ac) == NULL
     assert ac.full_page_for_size[2] == PAGE_NULL
 
 # ____________________________________________________________
@@ -392,17 +405,29 @@
 def test_random():
     import random
     pagesize = hdrsize + 24*WORD
-    num_pages = 28
+    num_pages = 3
     ac = arena_collection_for_test(pagesize, " " * num_pages)
     live_objects = {}
     #
-    # Run the test until ac.allocate_new_arena() is called.
+    # Run the test until three arenas are freed.  This is a quick test
+    # that the arenas are really freed by the logic.
     class DoneTesting(Exception):
-        pass
-    def done_testing():
-        raise DoneTesting
-    ac.allocate_new_arena = done_testing
-    #
+        counter = 0
+    def my_allocate_new_arena():
+        # the following output looks cool on a 112-character-wide terminal.
+        lst = sorted(ac._all_arenas(), key=lambda a: a.base.arena._arena_index)
+        for a in lst:
+            print a.base.arena, a.base.arena.usagemap
+        print '-' * 80
+        ac.__class__.allocate_new_arena(ac)
+        a = ac.current_arena.base.arena
+        def my_mark_freed():
+            a.freed = True
+            DoneTesting.counter += 1
+            if DoneTesting.counter > 3:
+                raise DoneTesting
+        a.mark_freed = my_mark_freed
+    ac.allocate_new_arena = my_allocate_new_arena
     try:
         while True:
             #
@@ -410,12 +435,13 @@
             for i in range(random.randrange(50, 100)):
                 size_class = random.randrange(1, 7)
                 obj = ac.malloc(size_class * WORD)
-                at = obj - ac._startpageaddr
+                at = (obj.arena, obj.offset)
                 assert at not in live_objects
                 live_objects[at] = size_class * WORD
             #
             # Free half the objects, randomly
-            ok_to_free = OkToFree(ac, lambda obj: random.random() < 0.5)
+            ok_to_free = OkToFree(ac, lambda obj: random.random() < 0.5,
+                                  multiarenas=True)
             ac.mass_free(ok_to_free)
             #
             # Check that we have seen all objects
@@ -428,5 +454,4 @@
                     surviving_total_size += live_objects[at]
             assert ac.total_memory_used == surviving_total_size
     except DoneTesting:
-        # the following output looks cool on a 112-character-wide terminal.
-        print ac._startpageaddr.arena.usagemap
+        pass

Modified: pypy/branch/fast-forward/pypy/rpython/memory/gctransform/framework.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/memory/gctransform/framework.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/memory/gctransform/framework.py	Fri Oct  1 22:53:02 2010
@@ -426,6 +426,7 @@
         if GCClass.needs_write_barrier:
             self.write_barrier_ptr = getfn(GCClass.write_barrier.im_func,
                                            [s_gc,
+                                            annmodel.SomeAddress(),
                                             annmodel.SomeAddress()],
                                            annmodel.s_None,
                                            inline=True)
@@ -434,13 +435,15 @@
                 # func should not be a bound method, but a real function
                 assert isinstance(func, types.FunctionType)
                 self.write_barrier_failing_case_ptr = getfn(func,
-                                               [annmodel.SomeAddress()],
+                                               [annmodel.SomeAddress(),
+                                                annmodel.SomeAddress()],
                                                annmodel.s_None)
             func = getattr(GCClass, 'write_barrier_from_array', None)
             if func is not None:
                 self.write_barrier_from_array_ptr = getfn(func.im_func,
                                            [s_gc,
                                             annmodel.SomeAddress(),
+                                            annmodel.SomeAddress(),
                                             annmodel.SomeInteger()],
                                            annmodel.s_None,
                                            inline=True)
@@ -1021,6 +1024,8 @@
             and not isinstance(v_newvalue, Constant)
             and v_struct.concretetype.TO._gckind == "gc"
             and hop.spaceop not in self.clean_sets):
+            v_newvalue = hop.genop("cast_ptr_to_adr", [v_newvalue],
+                                   resulttype = llmemory.Address)
             v_structaddr = hop.genop("cast_ptr_to_adr", [v_struct],
                                      resulttype = llmemory.Address)
             if (self.write_barrier_from_array_ptr is not None and
@@ -1030,12 +1035,14 @@
                 assert v_index.concretetype == lltype.Signed
                 hop.genop("direct_call", [self.write_barrier_from_array_ptr,
                                           self.c_const_gc,
+                                          v_newvalue,
                                           v_structaddr,
                                           v_index])
             else:
                 self.write_barrier_calls += 1
                 hop.genop("direct_call", [self.write_barrier_ptr,
                                           self.c_const_gc,
+                                          v_newvalue,
                                           v_structaddr])
         hop.rename('bare_' + opname)
 

Modified: pypy/branch/fast-forward/pypy/rpython/memory/gcwrapper.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/memory/gcwrapper.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/memory/gcwrapper.py	Fri Oct  1 22:53:02 2010
@@ -9,7 +9,10 @@
     def __init__(self, llinterp, flowgraphs, gc_class, GC_PARAMS={}):
         translator = llinterp.typer.annotator.translator
         config = translator.config.translation
-        self.gc = gc_class(config, chunk_size = 10, **GC_PARAMS)
+        self.gc = gc_class(config,
+                           chunk_size      = 10,
+                           translated_to_c = False,
+                           **GC_PARAMS)
         self.gc.set_root_walker(LLInterpRootWalker(self))
         self.gc.DEBUG = True
         self.llinterp = llinterp
@@ -94,6 +97,7 @@
                         assert (type(index) is int    # <- fast path
                                 or lltype.typeOf(index) == lltype.Signed)
                         self.gc.write_barrier_from_array(
+                            llmemory.cast_ptr_to_adr(newvalue),
                             llmemory.cast_ptr_to_adr(toplevelcontainer),
                             index)
                         wb = False
@@ -101,6 +105,7 @@
             #
             if wb:
                 self.gc.write_barrier(
+                    llmemory.cast_ptr_to_adr(newvalue),
                     llmemory.cast_ptr_to_adr(toplevelcontainer))
         llheap.setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue)
 

Modified: pypy/branch/fast-forward/pypy/rpython/memory/test/test_transformed_gc.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/memory/test/test_transformed_gc.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/memory/test/test_transformed_gc.py	Fri Oct  1 22:53:02 2010
@@ -906,7 +906,8 @@
     gcname = "marksweep"
     class gcpolicy(gc.FrameworkGcPolicy):
         class transformerclass(framework.FrameworkGCTransformer):
-            GC_PARAMS = {'start_heap_size': 1024*WORD }
+            GC_PARAMS = {'start_heap_size': 1024*WORD,
+                         'translated_to_c': False}
             root_stack_depth = 200
 
 
@@ -1144,7 +1145,8 @@
     class gcpolicy(gc.FrameworkGcPolicy):
         class transformerclass(framework.FrameworkGCTransformer):
             from pypy.rpython.memory.gc.marksweep import PrintingMarkSweepGC as GCClass
-            GC_PARAMS = {'start_heap_size': 1024*WORD }
+            GC_PARAMS = {'start_heap_size': 1024*WORD,
+                         'translated_to_c': False}
             root_stack_depth = 200
 
 class TestSemiSpaceGC(GenericMovingGCTests):
@@ -1154,7 +1156,8 @@
     class gcpolicy(gc.FrameworkGcPolicy):
         class transformerclass(framework.FrameworkGCTransformer):
             from pypy.rpython.memory.gc.semispace import SemiSpaceGC as GCClass
-            GC_PARAMS = {'space_size': 512*WORD}
+            GC_PARAMS = {'space_size': 512*WORD,
+                         'translated_to_c': False}
             root_stack_depth = 200
 
 class TestMarkCompactGC(GenericMovingGCTests):
@@ -1163,7 +1166,8 @@
     class gcpolicy(gc.FrameworkGcPolicy):
         class transformerclass(framework.FrameworkGCTransformer):
             from pypy.rpython.memory.gc.markcompact import MarkCompactGC as GCClass
-            GC_PARAMS = {'space_size': 4096*WORD}
+            GC_PARAMS = {'space_size': 4096*WORD,
+                         'translated_to_c': False}
             root_stack_depth = 200
 
 class TestGenerationGC(GenericMovingGCTests):
@@ -1175,7 +1179,8 @@
             from pypy.rpython.memory.gc.generation import GenerationGC as \
                                                           GCClass
             GC_PARAMS = {'space_size': 512*WORD,
-                         'nursery_size': 32*WORD}
+                         'nursery_size': 32*WORD,
+                         'translated_to_c': False}
             root_stack_depth = 200
 
     def define_weakref_across_minor_collection(cls):
@@ -1372,7 +1377,8 @@
                 GenerationGC._teardown(self)
                 
             GC_PARAMS = {'space_size': 512*WORD,
-                         'nursery_size': 128*WORD}
+                         'nursery_size': 128*WORD,
+                         'translated_to_c': False}
             root_stack_depth = 200
 
     def define_working_nursery(cls):
@@ -1404,7 +1410,8 @@
             from pypy.rpython.memory.gc.hybrid import HybridGC as GCClass
             GC_PARAMS = {'space_size': 512*WORD,
                          'nursery_size': 32*WORD,
-                         'large_object': 8*WORD}
+                         'large_object': 8*WORD,
+                         'translated_to_c': False}
             root_stack_depth = 200
 
     def define_ref_from_rawmalloced_to_regular(cls):
@@ -1477,6 +1484,7 @@
                          'large_object': 8*WORD,
                          'large_object_gcptrs': 10*WORD,
                          'card_page_indices': 4,
+                         'translated_to_c': False,
                          }
             root_stack_depth = 200
 
@@ -1585,7 +1593,8 @@
     gcname = "marksweep"
     class gcpolicy(gc.FrameworkGcPolicy):
         class transformerclass(framework.FrameworkGCTransformer):
-            GC_PARAMS = {'start_heap_size': 1024*WORD }
+            GC_PARAMS = {'start_heap_size': 1024*WORD,
+                         'translated_to_c': False}
             root_stack_depth = 200
 
 class TestHybridTaggedPointerGC(TaggedPointerGCTests):
@@ -1596,7 +1605,8 @@
             from pypy.rpython.memory.gc.generation import GenerationGC as \
                                                           GCClass
             GC_PARAMS = {'space_size': 512*WORD,
-                         'nursery_size': 32*WORD}
+                         'nursery_size': 32*WORD,
+                         'translated_to_c': False}
             root_stack_depth = 200
 
 class TestMarkCompactTaggedpointerGC(TaggedPointerGCTests):
@@ -1605,5 +1615,6 @@
     class gcpolicy(gc.FrameworkGcPolicy):
         class transformerclass(framework.FrameworkGCTransformer):
             from pypy.rpython.memory.gc.markcompact import MarkCompactGC as GCClass
-            GC_PARAMS = {'space_size': 4096*WORD}
+            GC_PARAMS = {'space_size': 4096*WORD,
+                         'translated_to_c': False}
             root_stack_depth = 200

Modified: pypy/branch/fast-forward/pypy/translator/driver.py
==============================================================================
--- pypy/branch/fast-forward/pypy/translator/driver.py	(original)
+++ pypy/branch/fast-forward/pypy/translator/driver.py	Fri Oct  1 22:53:02 2010
@@ -426,6 +426,22 @@
                                   [OOTYPE],
                                   "JIT compiler generation")
 
+    def task_jittest_lltype(self):
+        """ Run with the JIT on top of the llgraph backend
+        """
+        # parent process loop: spawn a child, wait for the child to finish,
+        # print a message, and restart
+        from pypy.translator.goal import unixcheckpoint
+        unixcheckpoint.restartable_point(auto='run')
+        # load the module pypy/jit/tl/jittest.py, which you can hack at
+        # and restart without needing to restart the whole translation process
+        from pypy.jit.tl import jittest
+        jittest.jittest(self)
+    #
+    task_jittest_lltype = taskdef(task_jittest_lltype,
+                                  [RTYPE],
+                                  "test of the JIT on the llgraph backend")
+
     def task_backendopt_lltype(self):
         """ Run all backend optimizations - lltype version
         """
@@ -433,7 +449,8 @@
         backend_optimizations(self.translator)
     #
     task_backendopt_lltype = taskdef(task_backendopt_lltype,
-                                     [RTYPE, '??pyjitpl_lltype'],
+                                     [RTYPE, '??pyjitpl_lltype',
+                                             '??jittest_lltype'],
                                      "lltype back-end optimisations")
     BACKENDOPT = 'backendopt_lltype'
 

Modified: pypy/branch/fast-forward/pypy/translator/goal/translate.py
==============================================================================
--- pypy/branch/fast-forward/pypy/translator/goal/translate.py	(original)
+++ pypy/branch/fast-forward/pypy/translator/goal/translate.py	Fri Oct  1 22:53:02 2010
@@ -27,6 +27,7 @@
         ("annotate", "do type inference", "-a --annotate", ""),
         ("rtype", "do rtyping", "-t --rtype", ""),
         ("pyjitpl", "JIT generation step", "--pyjitpl", ""),
+        ("jittest", "JIT test with llgraph backend", "--jittest", ""),
         ("backendopt", "do backend optimizations", "--backendopt", ""),
         ("source", "create source", "-s --source", ""),
         ("compile", "compile", "-c --compile", " (default goal)"),



More information about the Pypy-commit mailing list