[pypy-commit] pypy jitframe-on-heap: merge

fijal noreply at buildbot.pypy.org
Sun Jan 27 20:47:26 CET 2013


Author: Maciej Fijalkowski <fijall at gmail.com>
Branch: jitframe-on-heap
Changeset: r60540:73a4b46a1381
Date: 2013-01-27 21:46 +0200
http://bitbucket.org/pypy/pypy/changeset/73a4b46a1381/

Log:	merge

diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py
--- a/rpython/jit/backend/llgraph/runner.py
+++ b/rpython/jit/backend/llgraph/runner.py
@@ -582,7 +582,7 @@
 
 
 class LLFrame(object):
-    _TYPE = lltype.Signed
+    _TYPE = llmemory.GCREF
 
     forced_deadframe = None
     overflow_flag = False
@@ -595,6 +595,22 @@
         for box, arg in zip(argboxes, args):
             self.setenv(box, arg)
 
+    def __eq__(self, other):
+        # this is here to avoid crashes in 'token == TOKEN_TRACING_RESCALL'
+        from rpython.jit.metainterp.virtualizable import TOKEN_NONE
+        from rpython.jit.metainterp.virtualizable import TOKEN_TRACING_RESCALL
+        if isinstance(other, LLFrame):
+            return self is other
+        if other == TOKEN_NONE or other == TOKEN_TRACING_RESCALL:
+            return False
+        assert 0
+
+    def __ne__(self, other):
+        return not (self == other)
+
+    def _identityhash(self):
+        return hash(self)
+
     def setenv(self, box, arg):
         if box.type == INT:
             # typecheck the result
@@ -863,7 +879,8 @@
         def reset_vable(jd, vable):
             if jd.index_of_virtualizable != -1:
                 fielddescr = jd.vable_token_descr
-                self.cpu.bh_setfield_gc(vable, 0, fielddescr)
+                NULL = lltype.nullptr(llmemory.GCREF.TO)
+                self.cpu.bh_setfield_gc(vable, NULL, fielddescr)
         faildescr = self.cpu.get_latest_descr(pframe)
         if faildescr == self.cpu.done_with_this_frame_descr_int:
             reset_vable(jd, vable)
diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py
--- a/rpython/jit/backend/test/runner_test.py
+++ b/rpython/jit/backend/test/runner_test.py
@@ -2220,7 +2220,7 @@
                 values.append(self.cpu.get_int_value(deadframe, 1))
                 self.cpu.set_savedata_ref(deadframe, random_gcref)
 
-        FUNC = self.FuncType([lltype.Signed, lltype.Signed], lltype.Void)
+        FUNC = self.FuncType([llmemory.GCREF, lltype.Signed], lltype.Void)
         func_ptr = llhelper(lltype.Ptr(FUNC), maybe_force)
         funcbox = self.get_funcbox(self.cpu, func_ptr).constbox()
         calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
@@ -2228,7 +2228,7 @@
         cpu = self.cpu
         i0 = BoxInt()
         i1 = BoxInt()
-        tok = BoxInt()
+        tok = BoxPtr()
         faildescr = BasicFailDescr(1)
         ops = [
         ResOperation(rop.FORCE_TOKEN, [], tok),
@@ -2265,7 +2265,7 @@
                 self.cpu.set_savedata_ref(deadframe, random_gcref)
             return 42
 
-        FUNC = self.FuncType([lltype.Signed, lltype.Signed], lltype.Signed)
+        FUNC = self.FuncType([llmemory.GCREF, lltype.Signed], lltype.Signed)
         func_ptr = llhelper(lltype.Ptr(FUNC), maybe_force)
         funcbox = self.get_funcbox(self.cpu, func_ptr).constbox()
         calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
@@ -2274,7 +2274,7 @@
         i0 = BoxInt()
         i1 = BoxInt()
         i2 = BoxInt()
-        tok = BoxInt()
+        tok = BoxPtr()
         faildescr = BasicFailDescr(1)
         ops = [
         ResOperation(rop.FORCE_TOKEN, [], tok),
@@ -2313,7 +2313,7 @@
                 self.cpu.set_savedata_ref(deadframe, random_gcref)
             return 42.5
 
-        FUNC = self.FuncType([lltype.Signed, lltype.Signed], lltype.Float)
+        FUNC = self.FuncType([llmemory.GCREF, lltype.Signed], lltype.Float)
         func_ptr = llhelper(lltype.Ptr(FUNC), maybe_force)
         funcbox = self.get_funcbox(self.cpu, func_ptr).constbox()
         calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
@@ -2322,7 +2322,7 @@
         i0 = BoxInt()
         i1 = BoxInt()
         f2 = BoxFloat()
-        tok = BoxInt()
+        tok = BoxPtr()
         faildescr = BasicFailDescr(1)
         ops = [
         ResOperation(rop.FORCE_TOKEN, [], tok),
@@ -3697,7 +3697,7 @@
             values.append(self.cpu.get_int_value(deadframe, 0))
             return 42
 
-        FUNC = self.FuncType([lltype.Signed, lltype.Signed], lltype.Signed)
+        FUNC = self.FuncType([llmemory.GCREF, lltype.Signed], lltype.Signed)
         func_ptr = llhelper(lltype.Ptr(FUNC), maybe_force)
         funcbox = self.get_funcbox(self.cpu, func_ptr).constbox()
         calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
@@ -3705,7 +3705,7 @@
         i0 = BoxInt()
         i1 = BoxInt()
         i2 = BoxInt()
-        tok = BoxInt()
+        tok = BoxPtr()
         faildescr = BasicFailDescr(23)
         ops = [
         ResOperation(rop.FORCE_TOKEN, [], tok),
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py
@@ -2954,7 +2954,7 @@
         """
         expected = """
         [p1]
-        i0 = force_token()
+        p0 = force_token()
         jump(p1)
         """
         self.optimize_loop(ops, expected)
@@ -2969,12 +2969,12 @@
         """
         expected = """
         [p1]
-        i0 = force_token()
+        p0 = force_token()
         p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable))
-        setfield_gc(p2, i0, descr=virtualtokendescr)
+        setfield_gc(p2, p0, descr=virtualtokendescr)
         escape(p2)
         setfield_gc(p2, p1, descr=virtualforceddescr)
-        setfield_gc(p2, -3, descr=virtualtokendescr)
+        setfield_gc(p2, NULL, descr=virtualtokendescr)
         jump(p1)
         """
         # XXX we should optimize a bit more the case of a nonvirtual.
@@ -3000,10 +3000,10 @@
         """
         expected = """
         [p0, i1]
-        i3 = force_token()
+        p3 = force_token()
         #
         p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable))
-        setfield_gc(p2, i3, descr=virtualtokendescr)
+        setfield_gc(p2, p3, descr=virtualtokendescr)
         setfield_gc(p0, p2, descr=nextdescr)
         #
         call_may_force(i1, descr=mayforcevirtdescr)
@@ -3015,7 +3015,7 @@
         setfield_gc(p1b, 252, descr=valuedescr)
         setfield_gc(p1, p1b, descr=nextdescr)
         setfield_gc(p2, p1, descr=virtualforceddescr)
-        setfield_gc(p2, -3, descr=virtualtokendescr)
+        setfield_gc(p2, NULL, descr=virtualtokendescr)
         jump(p0, i1)
         """
         self.optimize_loop(ops, expected)
@@ -3039,10 +3039,10 @@
         """
         expected = """
         [p0, i1]
-        i3 = force_token()
+        p3 = force_token()
         #
         p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable))
-        setfield_gc(p2, i3, descr=virtualtokendescr)
+        setfield_gc(p2, p3, descr=virtualtokendescr)
         setfield_gc(p0, p2, descr=nextdescr)
         #
         call_may_force(i1, descr=mayforcevirtdescr)
@@ -3054,7 +3054,7 @@
         setfield_gc(p1b, i1, descr=valuedescr)
         setfield_gc(p1, p1b, descr=nextdescr)
         setfield_gc(p2, p1, descr=virtualforceddescr)
-        setfield_gc(p2, -3, descr=virtualtokendescr)
+        setfield_gc(p2, NULL, descr=virtualtokendescr)
         jump(p0, i1)
         """
         # the point of this test is that 'i1' should show up in the fail_args
@@ -3084,21 +3084,21 @@
         """
         expected = """
         [p0, i1]
-        i3 = force_token()
+        p3 = force_token()
         call(i1, descr=nonwritedescr)
-        guard_no_exception(descr=fdescr) [i3, i1, p0]
+        guard_no_exception(descr=fdescr) [p3, i1, p0]
         setfield_gc(p0, NULL, descr=refdescr)
         jump(p0, i1)
         """
         self.optimize_loop(ops, expected)
-        # the fail_args contain [i3, i1, p0]:
-        #  - i3 is from the virtual expansion of p2
+        # the fail_args contain [p3, i1, p0]:
+        #  - p3 is from the virtual expansion of p2
         #  - i1 is from the virtual expansion of p1
         #  - p0 is from the extra pendingfields
         self.loop.inputargs[0].value = self.nodeobjvalue
         self.check_expanded_fail_descr('''p2, p1
             p0.refdescr = p2
-            where p2 is a jit_virtual_ref_vtable, virtualtokendescr=i3
+            where p2 is a jit_virtual_ref_vtable, virtualtokendescr=p3
             where p1 is a node_vtable, nextdescr=p1b
             where p1b is a node_vtable, valuedescr=i1
             ''', rop.GUARD_NO_EXCEPTION)
@@ -3116,13 +3116,13 @@
         """
         expected = """
         [i1]
-        i3 = force_token()
+        p3 = force_token()
         p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable))
-        setfield_gc(p2, i3, descr=virtualtokendescr)
+        setfield_gc(p2, p3, descr=virtualtokendescr)
         escape(p2)
         p1 = new_with_vtable(ConstClass(node_vtable))
         setfield_gc(p2, p1, descr=virtualforceddescr)
-        setfield_gc(p2, -3, descr=virtualtokendescr)
+        setfield_gc(p2, NULL, descr=virtualtokendescr)
         call_may_force(i1, descr=mayforcevirtdescr)
         guard_not_forced() []
         jump(i1)
@@ -3141,12 +3141,12 @@
         """
         expected = """
         [i1, p1]
-        i3 = force_token()
+        p3 = force_token()
         p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable))
-        setfield_gc(p2, i3, descr=virtualtokendescr)
+        setfield_gc(p2, p3, descr=virtualtokendescr)
         escape(p2)
         setfield_gc(p2, p1, descr=virtualforceddescr)
-        setfield_gc(p2, -3, descr=virtualtokendescr)
+        setfield_gc(p2, NULL, descr=virtualtokendescr)
         call_may_force(i1, descr=mayforcevirtdescr)
         guard_not_forced() [i1]
         jump(i1, p1)
@@ -3671,7 +3671,7 @@
         i5 = int_gt(i4, i22)
         guard_false(i5) []
         i6 = int_add(i4, 1)
-        i331 = force_token()
+        p331 = force_token()
         i7 = int_sub(i6, 1)
         setfield_gc(p0, i7, descr=valuedescr)
         jump(p0, i22)
@@ -3682,7 +3682,7 @@
         i2 = int_gt(i1, i22)
         guard_false(i2) []
         i3 = int_add(i1, 1)
-        i331 = force_token()
+        p331 = force_token()
         jump(p0, i22)
         """
         self.optimize_loop(ops, expected)
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py
@@ -3424,7 +3424,7 @@
         """
         expected = """
         [p1]
-        i0 = force_token()
+        p0 = force_token()
         jump(p1)
         """
         self.optimize_loop(ops, expected, expected)
@@ -3439,12 +3439,12 @@
         """
         expected = """
         [p1]
-        i0 = force_token()
+        p0 = force_token()
         p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable))
-        setfield_gc(p2, i0, descr=virtualtokendescr)
+        setfield_gc(p2, p0, descr=virtualtokendescr)
         escape(p2)
         setfield_gc(p2, p1, descr=virtualforceddescr)
-        setfield_gc(p2, -3, descr=virtualtokendescr)
+        setfield_gc(p2, NULL, descr=virtualtokendescr)
         jump(p1)
         """
         # XXX we should optimize a bit more the case of a nonvirtual.
@@ -3470,10 +3470,10 @@
         """
         expected = """
         [p0, i1]
-        i3 = force_token()
+        p3 = force_token()
         #
         p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable))
-        setfield_gc(p2, i3, descr=virtualtokendescr)
+        setfield_gc(p2, p3, descr=virtualtokendescr)
         setfield_gc(p0, p2, descr=nextdescr)
         #
         call_may_force(i1, descr=mayforcevirtdescr)
@@ -3485,7 +3485,7 @@
         setfield_gc(p1b, 252, descr=valuedescr)
         setfield_gc(p1, p1b, descr=nextdescr)
         setfield_gc(p2, p1, descr=virtualforceddescr)
-        setfield_gc(p2, -3, descr=virtualtokendescr)
+        setfield_gc(p2, NULL, descr=virtualtokendescr)
         jump(p0, i1)
         """
         self.optimize_loop(ops, expected, expected)
@@ -3509,10 +3509,10 @@
         """
         expected = """
         [p0, i1]
-        i3 = force_token()
+        p3 = force_token()
         #
         p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable))
-        setfield_gc(p2, i3, descr=virtualtokendescr)
+        setfield_gc(p2, p3, descr=virtualtokendescr)
         setfield_gc(p0, p2, descr=nextdescr)
         #
         call_may_force(i1, descr=mayforcevirtdescr)
@@ -3524,7 +3524,7 @@
         setfield_gc(p1b, i1, descr=valuedescr)
         setfield_gc(p1, p1b, descr=nextdescr)
         setfield_gc(p2, p1, descr=virtualforceddescr)
-        setfield_gc(p2, -3, descr=virtualtokendescr)
+        setfield_gc(p2, NULL, descr=virtualtokendescr)
         jump(p0, i1)
         """
         # the point of this test is that 'i1' should show up in the fail_args
@@ -3555,31 +3555,31 @@
         """
         preamble = """
         [p0, i1]
-        i3 = force_token()
+        p3 = force_token()
         call(i1, descr=nonwritedescr)
-        guard_no_exception(descr=fdescr) [i3, i1, p0]
+        guard_no_exception(descr=fdescr) [p3, i1, p0]
         setfield_gc(p0, NULL, descr=refdescr)
         escape()
         jump(p0, i1)
         """
         expected = """
         [p0, i1]
-        i3 = force_token()
+        p3 = force_token()
         call(i1, descr=nonwritedescr)
-        guard_no_exception(descr=fdescr2) [i3, i1, p0]
+        guard_no_exception(descr=fdescr2) [p3, i1, p0]
         setfield_gc(p0, NULL, descr=refdescr)
         escape()
         jump(p0, i1)
         """
         self.optimize_loop(ops, expected, preamble)
-        # the fail_args contain [i3, i1, p0]:
-        #  - i3 is from the virtual expansion of p2
+        # the fail_args contain [p3, i1, p0]:
+        #  - p3 is from the virtual expansion of p2
         #  - i1 is from the virtual expansion of p1
         #  - p0 is from the extra pendingfields
         #self.loop.inputargs[0].value = self.nodeobjvalue
         #self.check_expanded_fail_descr('''p2, p1
         #    p0.refdescr = p2
-        #    where p2 is a jit_virtual_ref_vtable, virtualtokendescr=i3
+        #    where p2 is a jit_virtual_ref_vtable, virtualtokendescr=p3
         #    where p1 is a node_vtable, nextdescr=p1b
         #    where p1b is a node_vtable, valuedescr=i1
         #    ''', rop.GUARD_NO_EXCEPTION)
@@ -3597,13 +3597,13 @@
         """
         expected = """
         [i1]
-        i3 = force_token()
+        p3 = force_token()
         p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable))
-        setfield_gc(p2, i3, descr=virtualtokendescr)
+        setfield_gc(p2, p3, descr=virtualtokendescr)
         escape(p2)
         p1 = new_with_vtable(ConstClass(node_vtable))
         setfield_gc(p2, p1, descr=virtualforceddescr)
-        setfield_gc(p2, -3, descr=virtualtokendescr)
+        setfield_gc(p2, NULL, descr=virtualtokendescr)
         call_may_force(i1, descr=mayforcevirtdescr)
         guard_not_forced() []
         jump(i1)
@@ -3622,12 +3622,12 @@
         """
         expected = """
         [i1, p1]
-        i3 = force_token()
+        p3 = force_token()
         p2 = new_with_vtable(ConstClass(jit_virtual_ref_vtable))
-        setfield_gc(p2, i3, descr=virtualtokendescr)
+        setfield_gc(p2, p3, descr=virtualtokendescr)
         escape(p2)
         setfield_gc(p2, p1, descr=virtualforceddescr)
-        setfield_gc(p2, -3, descr=virtualtokendescr)
+        setfield_gc(p2, NULL, descr=virtualtokendescr)
         call_may_force(i1, descr=mayforcevirtdescr)
         guard_not_forced() [i1]
         jump(i1, p1)
@@ -4312,14 +4312,14 @@
         i5 = int_gt(i4, i22)
         guard_false(i5) []
         i6 = int_add(i4, 1)
-        i331 = force_token()
+        p331 = force_token()
         i7 = int_sub(i6, 1)
         setfield_gc(p0, i7, descr=valuedescr)
         jump(p0, i22)
         """
         expected = """
         [p0, i22]
-        i331 = force_token()
+        p331 = force_token()
         jump(p0, i22)
         """
         self.optimize_loop(ops, expected)
diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py
--- a/rpython/jit/metainterp/optimizeopt/virtualize.py
+++ b/rpython/jit/metainterp/optimizeopt/virtualize.py
@@ -2,6 +2,7 @@
 from rpython.jit.metainterp.executor import execute
 from rpython.jit.codewriter.heaptracker import vtable2descr
 from rpython.jit.metainterp.history import Const, ConstInt, BoxInt
+from rpython.jit.metainterp.history import CONST_NULL, BoxPtr
 from rpython.jit.metainterp.optimizeopt import optimizer
 from rpython.jit.metainterp.optimizeopt.optimizer import OptValue, REMOVED
 from rpython.jit.metainterp.optimizeopt.util import (make_dispatcher_method,
@@ -418,7 +419,7 @@
         # but the point is that doing so does not force the original structure.
         op = ResOperation(rop.NEW_WITH_VTABLE, [c_cls], op.result)
         vrefvalue = self.make_virtual(c_cls, op.result, op)
-        tokenbox = BoxInt()
+        tokenbox = BoxPtr()
         self.emit_operation(ResOperation(rop.FORCE_TOKEN, [], tokenbox))
         vrefvalue.setfield(descr_virtual_token, self.getvalue(tokenbox))
 
@@ -441,12 +442,12 @@
 
         # - set 'forced' to point to the real object
         objbox = op.getarg(1)
-        if not self.optimizer.cpu.ts.CONST_NULL.same_constant(objbox):
+        if not CONST_NULL.same_constant(objbox):
             seo(ResOperation(rop.SETFIELD_GC, op.getarglist(), None,
                              descr = vrefinfo.descr_forced))
 
-        # - set 'virtual_token' to TOKEN_NONE
-        args = [op.getarg(0), ConstInt(vrefinfo.TOKEN_NONE)]
+        # - set 'virtual_token' to TOKEN_NONE (== NULL)
+        args = [op.getarg(0), CONST_NULL]
         seo(ResOperation(rop.SETFIELD_GC, args, None,
                          descr=vrefinfo.descr_virtual_token))
         # Note that in some cases the virtual in op.getarg(1) has been forced
@@ -462,7 +463,7 @@
         if vref.is_virtual():
             tokenvalue = vref.getfield(vrefinfo.descr_virtual_token, None)
             if (tokenvalue is not None and tokenvalue.is_constant() and
-                tokenvalue.box.getint() == vrefinfo.TOKEN_NONE):
+                    not tokenvalue.box.nonnull()):
                 forcedvalue = vref.getfield(vrefinfo.descr_forced, None)
                 if forcedvalue is not None and not forcedvalue.is_null():
                     self.make_equal_to(op.result, forcedvalue)
diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -2283,7 +2283,7 @@
             virtualizable = vinfo.unwrap_virtualizable_box(virtualizable_box)
             vinfo.tracing_before_residual_call(virtualizable)
             #
-            force_token_box = history.BoxInt()
+            force_token_box = history.BoxPtr()
             self.history.record(rop.FORCE_TOKEN, [], force_token_box)
             self.history.record(rop.SETFIELD_GC, [virtualizable_box,
                                                   force_token_box],
@@ -2376,7 +2376,7 @@
             self.virtualizable_boxes = virtualizable_boxes
             # just jumped away from assembler (case 4 in the comment in
             # virtualizable.py) into tracing (case 2); check that vable_token
-            # is and stays 0.  Note the call to reset_vable_token() in
+            # is and stays NULL.  Note the call to reset_vable_token() in
             # warmstate.py.
             virtualizable_box = self.virtualizable_boxes[-1]
             virtualizable = vinfo.unwrap_virtualizable_box(virtualizable_box)
diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py
--- a/rpython/jit/metainterp/resume.py
+++ b/rpython/jit/metainterp/resume.py
@@ -1110,7 +1110,7 @@
         else:
             # just jumped away from assembler (case 4 in the comment in
             # virtualizable.py) into tracing (case 2); check that vable_token
-            # is and stays 0.  Note the call to reset_vable_token() in
+            # is and stays NULL.  Note the call to reset_vable_token() in
             # warmstate.py.
             assert not vinfo.is_token_nonnull_gcref(virtualizable)
         return vinfo.write_from_resume_data_partial(virtualizable, self, numb)
diff --git a/rpython/jit/metainterp/test/test_recursive.py b/rpython/jit/metainterp/test/test_recursive.py
--- a/rpython/jit/metainterp/test/test_recursive.py
+++ b/rpython/jit/metainterp/test/test_recursive.py
@@ -823,7 +823,8 @@
             # at the level 2 is set to a non-zero value when doing the
             # call to the level 3 only.  This used to fail when the test
             # is run via rpython.jit.backend.x86.test.test_recursive.
-            assert ll_subframe.vable_token == 0
+            from rpython.jit.metainterp.virtualizable import TOKEN_NONE
+            assert ll_subframe.vable_token == TOKEN_NONE
 
         def main(codeno):
             frame = Frame()
diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py
--- a/rpython/jit/metainterp/test/test_virtualizable.py
+++ b/rpython/jit/metainterp/test/test_virtualizable.py
@@ -41,7 +41,7 @@
     XY = lltype.GcStruct(
         'XY',
         ('parent', rclass.OBJECT),
-        ('vable_token', lltype.Signed),
+        ('vable_token', llmemory.GCREF),
         ('inst_x', lltype.Signed),
         ('inst_node', lltype.Ptr(LLtypeMixin.NODE)),
         hints = {'virtualizable2_accessor': FieldListAccessor()})
@@ -56,7 +56,7 @@
 
     def setup(self):
         xy = lltype.malloc(self.XY)
-        xy.vable_token = 0
+        xy.vable_token = lltype.nullptr(llmemory.GCREF.TO)
         xy.parent.typeptr = self.xy_vtable
         return xy
 
@@ -206,7 +206,7 @@
     XY2 = lltype.GcStruct(
         'XY2',
         ('parent', rclass.OBJECT),
-        ('vable_token', lltype.Signed),
+        ('vable_token', llmemory.GCREF),
         ('inst_x', lltype.Signed),
         ('inst_l1', lltype.Ptr(lltype.GcArray(lltype.Signed))),
         ('inst_l2', lltype.Ptr(lltype.GcArray(lltype.Signed))),
@@ -220,7 +220,7 @@
 
     def setup2(self):
         xy2 = lltype.malloc(self.XY2)
-        xy2.vable_token = 0
+        xy2.vable_token = lltype.nullptr(llmemory.GCREF.TO)
         xy2.parent.typeptr = self.xy2_vtable
         return xy2
 
@@ -393,7 +393,7 @@
 
     def setup2sub(self):
         xy2 = lltype.malloc(self.XY2SUB)
-        xy2.parent.vable_token = 0
+        xy2.parent.vable_token = lltype.nullptr(llmemory.GCREF.TO)
         xy2.parent.parent.typeptr = self.xy2_vtable
         return xy2
 
diff --git a/rpython/jit/metainterp/test/test_virtualref.py b/rpython/jit/metainterp/test/test_virtualref.py
--- a/rpython/jit/metainterp/test/test_virtualref.py
+++ b/rpython/jit/metainterp/test/test_virtualref.py
@@ -1,6 +1,6 @@
 import py
 
-from rpython.rtyper.lltypesystem import lltype, lloperation
+from rpython.rtyper.lltypesystem import lltype, llmemory, lloperation
 from rpython.rtyper.exceptiondata import UnknownException
 from rpython.rlib.jit import JitDriver, dont_look_inside, vref_None
 from rpython.rlib.jit import virtual_ref, virtual_ref_finish, InvalidVirtualRef
@@ -110,7 +110,10 @@
                   if str(box._getrepr_()).endswith('JitVirtualRef')]
         assert len(bxs2) == 1
         JIT_VIRTUAL_REF = self.vrefinfo.JIT_VIRTUAL_REF
-        bxs2[0].getref(lltype.Ptr(JIT_VIRTUAL_REF)).virtual_token = 1234567
+        FOO = lltype.GcStruct('FOO')
+        foo = lltype.malloc(FOO)
+        tok = lltype.cast_opaque_ptr(llmemory.GCREF, foo)
+        bxs2[0].getref(lltype.Ptr(JIT_VIRTUAL_REF)).virtual_token = tok
         #
         # try reloading from blackhole.py's point of view
         from rpython.jit.metainterp.resume import ResumeDataDirectReader
diff --git a/rpython/jit/metainterp/virtualizable.py b/rpython/jit/metainterp/virtualizable.py
--- a/rpython/jit/metainterp/virtualizable.py
+++ b/rpython/jit/metainterp/virtualizable.py
@@ -10,9 +10,8 @@
 from rpython.jit.metainterp.warmstate import wrap, unwrap
 from rpython.rlib.objectmodel import specialize
 
+
 class VirtualizableInfo(object):
-    TOKEN_NONE            = 0      # must be 0 -- see also x86.call_assembler
-    TOKEN_TRACING_RESCALL = -1
 
     def __init__(self, warmrunnerdesc, VTYPEPTR):
         self.warmrunnerdesc = warmrunnerdesc
@@ -217,7 +216,7 @@
         self.cast_gcref_to_vtype = cast_gcref_to_vtype
 
         def reset_vable_token(virtualizable):
-            virtualizable.vable_token = VirtualizableInfo.TOKEN_NONE
+            virtualizable.vable_token = TOKEN_NONE
         self.reset_vable_token = reset_vable_token
 
         def clear_vable_token(virtualizable):
@@ -230,7 +229,7 @@
         def tracing_before_residual_call(virtualizable):
             virtualizable = cast_gcref_to_vtype(virtualizable)
             assert not virtualizable.vable_token
-            virtualizable.vable_token = VirtualizableInfo.TOKEN_TRACING_RESCALL
+            virtualizable.vable_token = TOKEN_TRACING_RESCALL
         self.tracing_before_residual_call = tracing_before_residual_call
 
         def tracing_after_residual_call(virtualizable):
@@ -238,8 +237,8 @@
             if virtualizable.vable_token:
                 # not modified by the residual call; assert that it is still
                 # set to TOKEN_TRACING_RESCALL and clear it.
-                assert virtualizable.vable_token == VirtualizableInfo.TOKEN_TRACING_RESCALL
-                virtualizable.vable_token = VirtualizableInfo.TOKEN_NONE
+                assert virtualizable.vable_token == TOKEN_TRACING_RESCALL
+                virtualizable.vable_token = TOKEN_NONE
                 return False
             else:
                 # marker "modified during residual call" set.
@@ -248,16 +247,16 @@
 
         def force_now(virtualizable):
             token = virtualizable.vable_token
-            if token == VirtualizableInfo.TOKEN_TRACING_RESCALL:
+            if token == TOKEN_TRACING_RESCALL:
                 # The values in the virtualizable are always correct during
                 # tracing.  We only need to reset vable_token to TOKEN_NONE
                 # as a marker for the tracing, to tell it that this
                 # virtualizable escapes.
-                virtualizable.vable_token = VirtualizableInfo.TOKEN_NONE
+                virtualizable.vable_token = TOKEN_NONE
             else:
                 from rpython.jit.metainterp.compile import ResumeGuardForcedDescr
                 ResumeGuardForcedDescr.force_now(cpu, token)
-                assert virtualizable.vable_token == VirtualizableInfo.TOKEN_NONE
+                assert virtualizable.vable_token == TOKEN_NONE
         force_now._dont_inline_ = True
         self.force_now = force_now
 
@@ -268,7 +267,7 @@
 
         def reset_token_gcref(virtualizable):
             virtualizable = cast_gcref_to_vtype(virtualizable)
-            virtualizable.vable_token = VirtualizableInfo.TOKEN_NONE
+            virtualizable.vable_token = TOKEN_NONE
         self.reset_token_gcref = reset_token_gcref
 
     def _freeze_(self):
@@ -297,16 +296,23 @@
 
 # ____________________________________________________________
 #
-# The 'vable_token' field of a virtualizable is either 0, -1, or points
-# into the CPU stack to a particular field in the current frame.  It is:
+# The 'vable_token' field of a virtualizable is either NULL, points
+# to the JITFRAME object for the current assembler frame, or is
+# the special value TOKEN_TRACING_RESCALL.  It is:
 #
-#   1. 0 (TOKEN_NONE) if not in the JIT at all, except as described below.
+#   1. NULL (TOKEN_NONE) if not in the JIT at all, except as described below.
 #
-#   2. equal to 0 when tracing is in progress; except:
+#   2. NULL when tracing is in progress; except:
 #
-#   3. equal to -1 (TOKEN_TRACING_RESCALL) during tracing when we do a
+#   3. equal to TOKEN_TRACING_RESCALL during tracing when we do a
 #      residual call, calling random unknown other parts of the interpreter;
-#      it is reset to 0 as soon as something occurs to the virtualizable.
+#      it is reset to NULL as soon as something occurs to the virtualizable.
 #
 #   4. when running the machine code with a virtualizable, it is set
-#      to the address in the CPU stack by the FORCE_TOKEN operation.
+#      to the JITFRAME, as obtained with the FORCE_TOKEN operation.
+
+_DUMMY = lltype.GcStruct('JITFRAME_DUMMY')
+_dummy = lltype.malloc(_DUMMY)
+
+TOKEN_NONE            = lltype.nullptr(llmemory.GCREF.TO)
+TOKEN_TRACING_RESCALL = lltype.cast_opaque_ptr(llmemory.GCREF, _dummy)
diff --git a/rpython/jit/metainterp/virtualref.py b/rpython/jit/metainterp/virtualref.py
--- a/rpython/jit/metainterp/virtualref.py
+++ b/rpython/jit/metainterp/virtualref.py
@@ -1,6 +1,8 @@
 from rpython.rtyper.rmodel import inputconst, log
 from rpython.rtyper.lltypesystem import lltype, llmemory, rclass
 from rpython.jit.metainterp import history
+from rpython.jit.metainterp.virtualizable import TOKEN_NONE
+from rpython.jit.metainterp.virtualizable import TOKEN_TRACING_RESCALL
 from rpython.jit.codewriter import heaptracker
 from rpython.rlib.jit import InvalidVirtualRef
 
@@ -12,7 +14,7 @@
         # we make the low-level type of an RPython class directly
         self.JIT_VIRTUAL_REF = lltype.GcStruct('JitVirtualRef',
             ('super', rclass.OBJECT),
-            ('virtual_token', lltype.Signed),
+            ('virtual_token', llmemory.GCREF),
             ('forced', rclass.OBJECTPTR))
         self.jit_virtual_ref_vtable = lltype.malloc(rclass.OBJECT_VTABLE,
                                                     zero=True, flavor='raw',
@@ -69,19 +71,17 @@
 
     # The 'virtual_token' field has the same meaning as the 'vable_token' field
     # of a virtualizable.  It is equal to:
-    #  * -3 (TOKEN_NONE) when tracing, except as described below;
-    #  * -1 (TOKEN_TRACING_RESCALL) during tracing when we do a residual call;
-    #  * addr in the CPU stack (set by FORCE_TOKEN) when running the assembler;
-    #  * -3 (TOKEN_NONE) after the virtual is forced, if it is forced at all.
-    TOKEN_NONE            = -3
-    TOKEN_TRACING_RESCALL = -1
+    #  * TOKEN_NONE when tracing, except as described below;
+    #  * TOKEN_TRACING_RESCALL during tracing when we do a residual call;
+    #  * the JITFRAME (set by FORCE_TOKEN) when running the assembler;
+    #  * TOKEN_NONE after the virtual is forced, if it is forced at all.
 
     def virtual_ref_during_tracing(self, real_object):
         assert real_object
         vref = lltype.malloc(self.JIT_VIRTUAL_REF)
         p = lltype.cast_pointer(rclass.OBJECTPTR, vref)
         p.typeptr = self.jit_virtual_ref_vtable
-        vref.virtual_token = self.TOKEN_NONE
+        vref.virtual_token = TOKEN_NONE
         vref.forced = lltype.cast_opaque_ptr(rclass.OBJECTPTR, real_object)
         return lltype.cast_opaque_ptr(llmemory.GCREF, vref)
 
@@ -95,19 +95,19 @@
         if not self.is_virtual_ref(gcref):
             return
         vref = lltype.cast_opaque_ptr(lltype.Ptr(self.JIT_VIRTUAL_REF), gcref)
-        assert vref.virtual_token == self.TOKEN_NONE
-        vref.virtual_token = self.TOKEN_TRACING_RESCALL
+        assert vref.virtual_token == TOKEN_NONE
+        vref.virtual_token = TOKEN_TRACING_RESCALL
 
     def tracing_after_residual_call(self, gcref):
         if not self.is_virtual_ref(gcref):
             return False
         vref = lltype.cast_opaque_ptr(lltype.Ptr(self.JIT_VIRTUAL_REF), gcref)
         assert vref.forced
-        if vref.virtual_token != self.TOKEN_NONE:
+        if vref.virtual_token != TOKEN_NONE:
             # not modified by the residual call; assert that it is still
             # set to TOKEN_TRACING_RESCALL and clear it.
-            assert vref.virtual_token == self.TOKEN_TRACING_RESCALL
-            vref.virtual_token = self.TOKEN_NONE
+            assert vref.virtual_token == TOKEN_TRACING_RESCALL
+            vref.virtual_token = TOKEN_NONE
             return False
         else:
             # marker "modified during residual call" set.
@@ -118,8 +118,8 @@
             return
         assert real_object
         vref = lltype.cast_opaque_ptr(lltype.Ptr(self.JIT_VIRTUAL_REF), gcref)
-        assert vref.virtual_token != self.TOKEN_TRACING_RESCALL
-        vref.virtual_token = self.TOKEN_NONE
+        assert vref.virtual_token != TOKEN_TRACING_RESCALL
+        vref.virtual_token = TOKEN_NONE
         vref.forced = lltype.cast_opaque_ptr(rclass.OBJECTPTR, real_object)
 
     # ____________________________________________________________
@@ -151,19 +151,19 @@
     def force_virtual(self, inst):
         vref = lltype.cast_pointer(lltype.Ptr(self.JIT_VIRTUAL_REF), inst)
         token = vref.virtual_token
-        if token != self.TOKEN_NONE:
-            if token == self.TOKEN_TRACING_RESCALL:
+        if token != TOKEN_NONE:
+            if token == TOKEN_TRACING_RESCALL:
                 # The "virtual" is not a virtual at all during tracing.
                 # We only need to reset virtual_token to TOKEN_NONE
                 # as a marker for the tracing, to tell it that this
                 # "virtual" escapes.
                 assert vref.forced
-                vref.virtual_token = self.TOKEN_NONE
+                vref.virtual_token = TOKEN_NONE
             else:
                 assert not vref.forced
                 from rpython.jit.metainterp.compile import ResumeGuardForcedDescr
                 ResumeGuardForcedDescr.force_now(self.cpu, token)
-                assert vref.virtual_token == self.TOKEN_NONE
+                assert vref.virtual_token == TOKEN_NONE
                 assert vref.forced
         elif not vref.forced:
             # token == TOKEN_NONE and the vref was not forced: it's invalid
diff --git a/rpython/rtyper/lltypesystem/rvirtualizable2.py b/rpython/rtyper/lltypesystem/rvirtualizable2.py
--- a/rpython/rtyper/lltypesystem/rvirtualizable2.py
+++ b/rpython/rtyper/lltypesystem/rvirtualizable2.py
@@ -9,15 +9,19 @@
     def _setup_repr_llfields(self):
         llfields = []
         if self.top_of_virtualizable_hierarchy:
-            llfields.append(('vable_token', lltype.Signed))
+            llfields.append(('vable_token', llmemory.GCREF))
         return llfields
 
-    def set_vable(self, llops, vinst, force_cast=False):
-        if self.top_of_virtualizable_hierarchy:
-            if force_cast:
-                vinst = llops.genop('cast_pointer', [vinst], resulttype=self)
-            cname = inputconst(lltype.Void, 'vable_token')
-            cvalue = inputconst(lltype.Signed, 0)
-            llops.genop('setfield', [vinst, cname, cvalue])
-        else:
-            self.rbase.set_vable(llops, vinst, force_cast=True)
+##  The code below is commented out because vtable_token is always
+##  initialized to NULL anyway.
+##
+##    def set_vable(self, llops, vinst, force_cast=False):
+##        if self.top_of_virtualizable_hierarchy:
+##            if force_cast:
+##                vinst = llops.genop('cast_pointer', [vinst], resulttype=self)
+##            cname = inputconst(lltype.Void, 'vable_token')
+##            cvalue = inputconst(llmemory.GCREF,
+##                                lltype.nullptr(llmemory.GCREF.TO))
+##            llops.genop('setfield', [vinst, cname, cvalue])
+##        else:
+##            self.rbase.set_vable(llops, vinst, force_cast=True)
diff --git a/rpython/rtyper/rvirtualizable2.py b/rpython/rtyper/rvirtualizable2.py
--- a/rpython/rtyper/rvirtualizable2.py
+++ b/rpython/rtyper/rvirtualizable2.py
@@ -23,8 +23,8 @@
     def _setup_repr_llfields(self):
         raise NotImplementedError
 
-    def set_vable(self, llops, vinst, force_cast=False):
-        raise NotImplementedError
+##    def set_vable(self, llops, vinst, force_cast=False):
+##        raise NotImplementedError
 
     def _setup_repr(self):
         if self.top_of_virtualizable_hierarchy:
@@ -43,10 +43,10 @@
             # not need it, but it doesn't hurt to have it anyway
             self.my_redirected_fields = self.rbase.my_redirected_fields
 
-    def new_instance(self, llops, classcallhop=None):
-        vptr = self._super().new_instance(llops, classcallhop)
-        self.set_vable(llops, vptr)
-        return vptr
+##    def new_instance(self, llops, classcallhop=None):
+##        vptr = self._super().new_instance(llops, classcallhop)
+##        self.set_vable(llops, vptr)
+##        return vptr
 
     def hook_access_field(self, vinst, cname, llops, flags):
         #if not flags.get('access_directly'):
diff --git a/rpython/rtyper/test/test_rvirtualizable2.py b/rpython/rtyper/test/test_rvirtualizable2.py
--- a/rpython/rtyper/test/test_rvirtualizable2.py
+++ b/rpython/rtyper/test/test_rvirtualizable2.py
@@ -1,5 +1,5 @@
 import py
-from rpython.rtyper.lltypesystem import lltype
+from rpython.rtyper.lltypesystem import lltype, llmemory
 from rpython.rtyper.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin
 from rpython.rtyper.rvirtualizable2 import replace_force_virtualizable_with_call
 from rpython.rlib.jit import hint
@@ -373,7 +373,7 @@
         assert res.item1 == 42
         res = lltype.normalizeptr(res.item0)
         assert res.inst_v == 42
-        assert res.vable_token == 0
+        assert res.vable_token == lltype.nullptr(llmemory.GCREF.TO)
 
 class TestOOtype(OORtypeMixin, BaseTest):
     prefix = 'o'


More information about the pypy-commit mailing list